1 /* Copyright (C) 1995, 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
49 #include "guardians.h"
66 #define var_start(x, y) va_start(x, y)
69 #define var_start(x, y) va_start(x)
73 /* {heap tuning parameters}
75 * These are parameters for controlling memory allocation. The heap
76 * is the area out of which scm_cons, and object headers are allocated.
78 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
79 * 64 bit machine. The units of the _SIZE parameters are bytes.
80 * Cons pairs and object headers occupy one heap cell.
82 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
83 * allocated initially the heap will grow by half its current size
84 * each subsequent time more heap is needed.
86 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
87 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
88 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
89 * is in scm_init_storage() and alloc_some_heap() in sys.c
91 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
92 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
94 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
97 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
100 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
101 * reclaimed by a GC triggered by must_malloc. If less than this is
102 * reclaimed, the trigger threshold is raised. [I don't know what a
103 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
104 * work around a oscillation that caused almost constant GC.]
107 #define SCM_INIT_HEAP_SIZE (32768L*sizeof(scm_cell))
108 #define SCM_MIN_HEAP_SEG_SIZE (2048L*sizeof(scm_cell))
110 # define SCM_HEAP_SEG_SIZE 32768L
113 # define SCM_HEAP_SEG_SIZE (7000L*sizeof(scm_cell))
115 # define SCM_HEAP_SEG_SIZE (16384L*sizeof(scm_cell))
118 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size*2)
119 #define SCM_INIT_MALLOC_LIMIT 100000
120 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
122 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
123 bounds for allocated storage */
126 /*in 386 protected mode we must only adjust the offset */
127 # define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
128 # define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
131 # define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
132 # define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
134 # define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
135 # define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
142 * is the head of freelist of cons pairs.
144 SCM scm_freelist
= SCM_EOL
;
147 * is the number of bytes of must_malloc allocation needed to trigger gc.
149 unsigned long scm_mtrigger
;
153 * If set, don't expand the heap. Set only during gc, during which no allocation
154 * is supposed to take place anyway.
156 int scm_gc_heap_lock
= 0;
159 * Don't pause for collection if this is set -- just
163 int scm_block_gc
= 1;
165 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
166 * collection (GC) more space is allocated for the heap.
168 #define MIN_GC_YIELD (scm_heap_size/4)
170 /* During collection, this accumulates objects holding
173 SCM scm_weak_vectors
;
175 /* GC Statistics Keeping
177 unsigned long scm_cells_allocated
= 0;
178 long scm_mallocated
= 0;
179 unsigned long scm_gc_cells_collected
;
180 unsigned long scm_gc_malloc_collected
;
181 unsigned long scm_gc_ports_collected
;
182 unsigned long scm_gc_rt
;
183 unsigned long scm_gc_time_taken
= 0;
185 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
186 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
187 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
188 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
189 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
190 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
193 struct scm_heap_seg_data
195 /* lower and upper bounds of the segment */
196 SCM_CELLPTR bounds
[2];
198 /* address of the head-of-freelist pointer for this segment's cells.
199 All segments usually point to the same one, scm_freelist. */
202 /* number of SCM words per object in this segment */
205 /* If SEG_DATA->valid is non-zero, the conservative marking
206 functions will apply SEG_DATA->valid to the purported pointer and
207 SEG_DATA, and mark the object iff the function returns non-zero.
208 At the moment, I don't think anyone uses this. */
215 static void scm_mark_weak_vector_spines
SCM_P ((void));
216 static scm_sizet init_heap_seg
SCM_P ((SCM_CELLPTR
, scm_sizet
, int, SCM
*));
217 static void alloc_some_heap
SCM_P ((int, SCM
*));
221 /* Debugging functions. */
223 #ifdef DEBUG_FREELIST
225 /* Return the number of the heap segment containing CELL. */
231 for (i
= 0; i
< scm_n_heap_segs
; i
++)
232 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], (SCM_CELLPTR
) cell
)
233 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], (SCM_CELLPTR
) cell
))
235 fprintf (stderr
, "which_seg: can't find segment containing cell %lx\n",
241 SCM_PROC (s_map_free_list
, "map-free-list", 0, 0, 0, scm_map_free_list
);
245 int last_seg
= -1, count
= 0;
248 fprintf (stderr
, "%d segments total\n", scm_n_heap_segs
);
249 for (f
= scm_freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
))
251 int this_seg
= which_seg (f
);
253 if (this_seg
!= last_seg
)
256 fprintf (stderr
, " %5d cells in segment %d\n", count
, last_seg
);
263 fprintf (stderr
, " %5d cells in segment %d\n", count
, last_seg
);
267 return SCM_UNSPECIFIED
;
271 /* Number of calls to SCM_NEWCELL since startup. */
272 static unsigned long scm_newcell_count
;
274 /* Search freelist for anything that isn't marked as a free cell.
275 Abort if we find something. */
277 scm_check_freelist ()
282 for (f
= scm_freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
), i
++)
283 if (SCM_CAR (f
) != (SCM
) scm_tc_free_cell
)
285 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
286 scm_newcell_count
, i
);
292 static int scm_debug_check_freelist
= 0;
294 scm_debug_newcell (void)
299 if (scm_debug_check_freelist
)
300 scm_check_freelist ();
302 /* The rest of this is supposed to be identical to the SCM_NEWCELL
304 if (SCM_IMP (scm_freelist
))
305 new = scm_gc_for_newcell ();
309 scm_freelist
= SCM_CDR (scm_freelist
);
310 ++scm_cells_allocated
;
316 #endif /* DEBUG_FREELIST */
320 /* {Scheme Interface to GC}
323 SCM_PROC (s_gc_stats
, "gc-stats", 0, 0, 0, scm_gc_stats
);
330 SCM local_scm_mtrigger
;
331 SCM local_scm_mallocated
;
332 SCM local_scm_heap_size
;
333 SCM local_scm_cells_allocated
;
334 SCM local_scm_gc_time_taken
;
342 for (i
= scm_n_heap_segs
; i
--; )
343 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
344 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
346 if (scm_n_heap_segs
!= n
)
350 local_scm_mtrigger
= scm_mtrigger
;
351 local_scm_mallocated
= scm_mallocated
;
352 local_scm_heap_size
= scm_heap_size
;
353 local_scm_cells_allocated
= scm_cells_allocated
;
354 local_scm_gc_time_taken
= scm_gc_time_taken
;
356 answer
= scm_listify (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
357 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
358 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
359 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
360 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
361 scm_cons (sym_heap_segments
, heap_segs
),
372 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ());
373 scm_gc_cells_collected
= 0;
374 scm_gc_malloc_collected
= 0;
375 scm_gc_ports_collected
= 0;
381 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt
;
382 scm_gc_time_taken
= scm_gc_time_taken
+ scm_gc_rt
;
383 scm_system_async_mark (scm_gc_async
);
387 SCM_PROC (s_object_address
, "object-address", 1, 0, 0, scm_object_address
);
389 scm_object_address (obj
)
392 return scm_ulong2num ((unsigned long)obj
);
396 SCM_PROC(s_gc
, "gc", 0, 0, 0, scm_gc
);
403 return SCM_UNSPECIFIED
;
408 /* {C Interface For When GC is Triggered}
412 scm_gc_for_alloc (ncells
, freelistp
)
418 if ((scm_gc_cells_collected
< MIN_GC_YIELD
) || SCM_IMP (*freelistp
))
420 alloc_some_heap (ncells
, freelistp
);
427 scm_gc_for_newcell ()
430 scm_gc_for_alloc (1, &scm_freelist
);
432 scm_freelist
= SCM_CDR (fl
);
443 /* During the critical section, only the current thread may run. */
444 SCM_THREAD_CRITICAL_SECTION_START
;
447 /* fprintf (stderr, "gc: %s\n", what); */
451 if (!scm_stack_base
|| scm_block_gc
)
457 if (scm_mallocated
< 0)
458 /* The byte count of allocated objects has underflowed. This is
459 probably because you forgot to report the sizes of objects you
460 have allocated, by calling scm_done_malloc or some such. When
461 the GC freed them, it subtracted their size from
462 scm_mallocated, which underflowed. */
465 if (scm_gc_heap_lock
)
466 /* We've invoked the collector while a GC is already in progress.
467 That should never happen. */
472 scm_weak_vectors
= SCM_EOL
;
474 scm_guardian_gc_init ();
476 /* unprotect any struct types with no instances */
482 pos
= &scm_type_obj_list
;
483 type_list
= scm_type_obj_list
;
484 while (type_list
!= SCM_EOL
)
485 if (SCM_VELTS (SCM_CAR (type_list
))[scm_struct_i_refcnt
])
487 pos
= SCM_CDRLOC (type_list
);
488 type_list
= SCM_CDR (type_list
);
492 *pos
= SCM_CDR (type_list
);
493 type_list
= SCM_CDR (type_list
);
498 /* flush dead entries from the continuation stack */
503 elts
= SCM_VELTS (scm_continuation_stack
);
504 bound
= SCM_LENGTH (scm_continuation_stack
);
505 x
= SCM_INUM (scm_continuation_stack_ptr
);
508 elts
[x
] = SCM_BOOL_F
;
515 /* Protect from the C stack. This must be the first marking
516 * done because it provides information about what objects
517 * are "in-use" by the C code. "in-use" objects are those
518 * for which the values from SCM_LENGTH and SCM_CHARS must remain
519 * usable. This requirement is stricter than a liveness
520 * requirement -- in particular, it constrains the implementation
521 * of scm_vector_set_length_x.
523 SCM_FLUSH_REGISTER_WINDOWS
;
524 /* This assumes that all registers are saved into the jmp_buf */
525 setjmp (scm_save_regs_gc_mark
);
526 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
527 ( (scm_sizet
) (sizeof (SCM_STACKITEM
) - 1 +
528 sizeof scm_save_regs_gc_mark
)
529 / sizeof (SCM_STACKITEM
)));
532 /* stack_len is long rather than scm_sizet in order to guarantee that
533 &stack_len is long aligned */
534 #ifdef SCM_STACK_GROWS_UP
536 long stack_len
= (SCM_STACKITEM
*) (&stack_len
) - scm_stack_base
;
538 long stack_len
= scm_stack_size (scm_stack_base
);
540 scm_mark_locations (scm_stack_base
, (scm_sizet
) stack_len
);
543 long stack_len
= scm_stack_base
- (SCM_STACKITEM
*) (&stack_len
);
545 long stack_len
= scm_stack_size (scm_stack_base
);
547 scm_mark_locations ((scm_stack_base
- stack_len
), (scm_sizet
) stack_len
);
551 #else /* USE_THREADS */
553 /* Mark every thread's stack and registers */
554 scm_threads_mark_stacks();
556 #endif /* USE_THREADS */
558 /* FIXME: insert a phase to un-protect string-data preserved
559 * in scm_vector_set_length_x.
562 j
= SCM_NUM_PROTECTS
;
564 scm_gc_mark (scm_sys_protects
[j
]);
567 scm_gc_mark (scm_root
->handle
);
570 scm_mark_weak_vector_spines ();
572 scm_guardian_zombify ();
580 SCM_THREAD_CRITICAL_SECTION_END
;
590 /* Mark an object precisely.
606 if (SCM_NCELLP (ptr
))
607 scm_wta (ptr
, "rogue pointer in heap", NULL
);
609 switch (SCM_TYP7 (ptr
))
611 case scm_tcs_cons_nimcar
:
612 if (SCM_GCMARKP (ptr
))
615 if (SCM_IMP (SCM_CDR (ptr
))) /* SCM_IMP works even with a GC mark */
620 scm_gc_mark (SCM_CAR (ptr
));
621 ptr
= SCM_GCCDR (ptr
);
623 case scm_tcs_cons_imcar
:
625 if (SCM_GCMARKP (ptr
))
628 ptr
= SCM_GCCDR (ptr
);
630 case scm_tcs_cons_gloc
:
631 if (SCM_GCMARKP (ptr
))
636 vcell
= SCM_CAR (ptr
) - 1L;
637 switch (SCM_CDR (vcell
))
641 ptr
= SCM_GCCDR (ptr
);
653 vtable_data
= (SCM
*)vcell
;
654 layout
= vtable_data
[scm_vtable_index_layout
];
655 len
= SCM_LENGTH (layout
);
656 fields_desc
= SCM_CHARS (layout
);
657 /* We're using SCM_GCCDR here like STRUCT_DATA, except
658 that it removes the mark */
659 mem
= (SCM
*)SCM_GCCDR (ptr
);
661 if (vtable_data
[scm_struct_i_flags
] & SCM_STRUCTF_ENTITY
)
663 scm_gc_mark (mem
[scm_struct_i_proc
+ 0]);
664 scm_gc_mark (mem
[scm_struct_i_proc
+ 1]);
665 scm_gc_mark (mem
[scm_struct_i_proc
+ 2]);
666 scm_gc_mark (mem
[scm_struct_i_proc
+ 3]);
667 scm_gc_mark (mem
[scm_struct_i_setter
]);
671 for (x
= 0; x
< len
- 2; x
+= 2, ++mem
)
672 if (fields_desc
[x
] == 'p')
674 if (fields_desc
[x
] == 'p')
676 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
677 for (x
= *mem
; x
; --x
)
678 scm_gc_mark (*++mem
);
683 if (!SCM_CDR (vcell
))
685 SCM_SETGCMARK (vcell
);
686 ptr
= vtable_data
[scm_vtable_index_vtable
];
693 case scm_tcs_closures
:
694 if (SCM_GCMARKP (ptr
))
697 if (SCM_IMP (SCM_CDR (ptr
)))
699 ptr
= SCM_CLOSCAR (ptr
);
702 scm_gc_mark (SCM_CLOSCAR (ptr
));
703 ptr
= SCM_GCCDR (ptr
);
706 case scm_tc7_lvector
:
710 if (SCM_GC8MARKP (ptr
))
712 SCM_SETGC8MARK (ptr
);
713 i
= SCM_LENGTH (ptr
);
717 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
718 scm_gc_mark (SCM_VELTS (ptr
)[i
]);
719 ptr
= SCM_VELTS (ptr
)[0];
724 SCM_SETGC8MARK (ptr
);
726 scm_mark_locations (SCM_VELTS (ptr
),
729 (sizeof (SCM_STACKITEM
) + -1 +
730 sizeof (scm_contregs
)) /
731 sizeof (SCM_STACKITEM
)));
746 SCM_SETGC8MARK (ptr
);
749 case scm_tc7_substring
:
750 if (SCM_GC8MARKP(ptr
))
752 SCM_SETGC8MARK (ptr
);
757 if (SCM_GC8MARKP(ptr
))
759 SCM_WVECT_GC_CHAIN (ptr
) = scm_weak_vectors
;
760 scm_weak_vectors
= ptr
;
761 SCM_SETGC8MARK (ptr
);
762 if (SCM_IS_WHVEC_ANY (ptr
))
769 len
= SCM_LENGTH (ptr
);
770 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
771 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
773 for (x
= 0; x
< len
; ++x
)
776 alist
= SCM_VELTS (ptr
)[x
];
778 /* mark everything on the alist except the keys or
779 * values, according to weak_values and weak_keys. */
780 while ( SCM_NIMP (alist
)
782 && !SCM_GCMARKP (alist
)
783 && SCM_NIMP (SCM_CAR (alist
))
784 && SCM_CONSP (SCM_CAR (alist
)))
789 kvpair
= SCM_CAR (alist
);
790 next_alist
= SCM_CDR (alist
);
793 * SCM_SETGCMARK (alist);
794 * SCM_SETGCMARK (kvpair);
796 * It may be that either the key or value is protected by
797 * an escaped reference to part of the spine of this alist.
798 * If we mark the spine here, and only mark one or neither of the
799 * key and value, they may never be properly marked.
800 * This leads to a horrible situation in which an alist containing
801 * freelist cells is exported.
803 * So only mark the spines of these arrays last of all marking.
804 * If somebody confuses us by constructing a weak vector
805 * with a circular alist then we are hosed, but at least we
806 * won't prematurely drop table entries.
809 scm_gc_mark (SCM_CAR (kvpair
));
811 scm_gc_mark (SCM_GCCDR (kvpair
));
814 if (SCM_NIMP (alist
))
820 case scm_tc7_msymbol
:
821 if (SCM_GC8MARKP(ptr
))
823 SCM_SETGC8MARK (ptr
);
824 scm_gc_mark (SCM_SYMBOL_FUNC (ptr
));
825 ptr
= SCM_SYMBOL_PROPS (ptr
);
827 case scm_tc7_ssymbol
:
828 if (SCM_GC8MARKP(ptr
))
830 SCM_SETGC8MARK (ptr
);
833 ptr
= (SCM
)(scm_heap_org
+ (((unsigned long)SCM_CAR (ptr
)) >> 8));
836 i
= SCM_PTOBNUM (ptr
);
837 if (!(i
< scm_numptob
))
839 if (SCM_GC8MARKP (ptr
))
841 SCM_SETGC8MARK (ptr
);
842 if (SCM_PTAB_ENTRY(ptr
))
843 scm_gc_mark (SCM_PTAB_ENTRY(ptr
)->file_name
);
844 if (scm_ptobs
[i
].mark
)
846 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
853 if (SCM_GC8MARKP (ptr
))
855 SCM_SETGC8MARK (ptr
);
856 switch SCM_GCTYP16 (ptr
)
857 { /* should be faster than going through scm_smobs */
858 case scm_tc_free_cell
:
859 /* printf("found free_cell %X ", ptr); fflush(stdout); */
860 SCM_SETCDR (ptr
, SCM_EOL
);
862 case scm_tcs_bignums
:
866 i
= SCM_SMOBNUM (ptr
);
867 if (!(i
< scm_numsmob
))
869 if (scm_smobs
[i
].mark
)
871 ptr
= (scm_smobs
[i
].mark
) (ptr
);
879 def
:scm_wta (ptr
, "unknown type in ", "gc_mark");
884 /* Mark a Region Conservatively
888 scm_mark_locations (x
, n
)
894 register SCM_CELLPTR ptr
;
897 if SCM_CELLP (*(SCM
**) & x
[m
])
899 ptr
= (SCM_CELLPTR
) SCM2PTR ((*(SCM
**) & x
[m
]));
901 j
= scm_n_heap_segs
- 1;
902 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
903 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
910 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
912 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
920 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
924 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
929 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
933 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
939 if ( !scm_heap_table
[seg_id
].valid
940 || scm_heap_table
[seg_id
].valid (ptr
,
941 &scm_heap_table
[seg_id
]))
942 scm_gc_mark (*(SCM
*) & x
[m
]);
951 /* The following is a C predicate which determines if an SCM value can be
952 regarded as a pointer to a cell on the heap. The code is duplicated
953 from scm_mark_locations. */
961 register SCM_CELLPTR ptr
;
963 if SCM_CELLP (*(SCM
**) & value
)
965 ptr
= (SCM_CELLPTR
) SCM2PTR ((*(SCM
**) & value
));
967 j
= scm_n_heap_segs
- 1;
968 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
969 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
976 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
978 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
986 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
990 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
995 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
999 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1005 if ( !scm_heap_table
[seg_id
].valid
1006 || scm_heap_table
[seg_id
].valid (ptr
,
1007 &scm_heap_table
[seg_id
]))
1019 scm_mark_weak_vector_spines ()
1023 for (w
= scm_weak_vectors
; w
!= SCM_EOL
; w
= SCM_WVECT_GC_CHAIN (w
))
1025 if (SCM_IS_WHVEC_ANY (w
))
1033 ptr
= SCM_VELTS (w
);
1035 for (j
= 0; j
< n
; ++j
)
1040 while ( SCM_NIMP (alist
)
1041 && SCM_CONSP (alist
)
1042 && !SCM_GCMARKP (alist
)
1043 && SCM_NIMP (SCM_CAR (alist
))
1044 && SCM_CONSP (SCM_CAR (alist
)))
1046 SCM_SETGCMARK (alist
);
1047 SCM_SETGCMARK (SCM_CAR (alist
));
1048 alist
= SCM_GCCDR (alist
);
1060 register SCM_CELLPTR ptr
;
1061 #ifdef SCM_POINTERS_MUNGED
1062 register SCM scmptr
;
1065 #define scmptr (SCM)ptr
1067 register SCM nfreelist
;
1068 register SCM
*hp_freelist
;
1076 /* Reset all free list pointers. We'll reconstruct them completely
1078 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1079 *scm_heap_table
[i
].freelistp
= SCM_EOL
;
1081 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1083 register scm_sizet n
= 0;
1084 register scm_sizet j
;
1086 /* Unmarked cells go onto the front of the freelist this heap
1087 segment points to. Rather than updating the real freelist
1088 pointer as we go along, we accumulate the new head in
1089 nfreelist. Then, if it turns out that the entire segment is
1090 free, we free (i.e., malloc's free) the whole segment, and
1091 simply don't assign nfreelist back into the real freelist. */
1092 hp_freelist
= scm_heap_table
[i
].freelistp
;
1093 nfreelist
= *hp_freelist
;
1095 span
= scm_heap_table
[i
].ncells
;
1096 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0]);
1097 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1]) - ptr
;
1098 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1100 #ifdef SCM_POINTERS_MUNGED
1101 scmptr
= PTR2SCM (ptr
);
1103 switch SCM_TYP7 (scmptr
)
1105 case scm_tcs_cons_gloc
:
1106 if (SCM_GCMARKP (scmptr
))
1108 if (SCM_CDR (SCM_CAR (scmptr
) - 1) == (SCM
)1)
1109 SCM_SETCDR (SCM_CAR (scmptr
) - 1, (SCM
) 0);
1114 vcell
= SCM_CAR (scmptr
) - 1L;
1116 if ((SCM_CDR (vcell
) == 0) || (SCM_CDR (vcell
) == 1))
1118 SCM
*p
= (SCM
*) SCM_GCCDR (scmptr
);
1119 if (((SCM
*) vcell
)[scm_struct_i_flags
]
1120 & SCM_STRUCTF_LIGHT
)
1122 SCM layout
= ((SCM
*)vcell
)[scm_vtable_index_layout
];
1123 m
+= (SCM_LENGTH (layout
) / 2) * sizeof (SCM
);
1128 m
+= p
[scm_struct_i_n_words
] * sizeof (SCM
) + 7;
1129 /* I feel like I'm programming in BCPL here... */
1130 free ((char *) p
[scm_struct_i_ptr
]);
1135 case scm_tcs_cons_imcar
:
1136 case scm_tcs_cons_nimcar
:
1137 case scm_tcs_closures
:
1139 if (SCM_GCMARKP (scmptr
))
1143 if (SCM_GC8MARKP (scmptr
))
1149 m
+= (2 + SCM_LENGTH (scmptr
)) * sizeof (SCM
);
1150 scm_must_free ((char *)(SCM_VELTS (scmptr
) - 2));
1154 case scm_tc7_vector
:
1155 case scm_tc7_lvector
:
1159 if (SCM_GC8MARKP (scmptr
))
1162 m
+= (SCM_LENGTH (scmptr
) * sizeof (SCM
));
1164 scm_must_free (SCM_CHARS (scmptr
));
1165 /* SCM_SETCHARS(scmptr, 0);*/
1168 if SCM_GC8MARKP (scmptr
)
1170 m
+= sizeof (long) * ((SCM_HUGE_LENGTH (scmptr
) + SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1172 case scm_tc7_byvect
:
1173 if SCM_GC8MARKP (scmptr
)
1175 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (char);
1179 if SCM_GC8MARKP (scmptr
)
1181 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long);
1184 if SCM_GC8MARKP (scmptr
)
1186 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (short);
1189 case scm_tc7_llvect
:
1190 if SCM_GC8MARKP (scmptr
)
1192 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long_long
);
1196 if SCM_GC8MARKP (scmptr
)
1198 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (float);
1201 if SCM_GC8MARKP (scmptr
)
1203 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (double);
1206 if SCM_GC8MARKP (scmptr
)
1208 m
+= SCM_HUGE_LENGTH (scmptr
) * 2 * sizeof (double);
1210 case scm_tc7_substring
:
1211 if (SCM_GC8MARKP (scmptr
))
1214 case scm_tc7_string
:
1215 if (SCM_GC8MARKP (scmptr
))
1217 m
+= SCM_HUGE_LENGTH (scmptr
) + 1;
1219 case scm_tc7_msymbol
:
1220 if (SCM_GC8MARKP (scmptr
))
1222 m
+= ( SCM_LENGTH (scmptr
)
1224 + sizeof (SCM
) * ((SCM
*)SCM_CHARS (scmptr
) - SCM_SLOTS(scmptr
)));
1225 scm_must_free ((char *)SCM_SLOTS (scmptr
));
1227 case scm_tc7_contin
:
1228 if SCM_GC8MARKP (scmptr
)
1230 m
+= SCM_LENGTH (scmptr
) * sizeof (SCM_STACKITEM
) + sizeof (scm_contregs
);
1231 if (SCM_VELTS (scmptr
))
1233 case scm_tc7_ssymbol
:
1234 if SCM_GC8MARKP(scmptr
)
1240 if SCM_GC8MARKP (scmptr
)
1242 if SCM_OPENP (scmptr
)
1244 int k
= SCM_PTOBNUM (scmptr
);
1245 if (!(k
< scm_numptob
))
1247 /* Keep "revealed" ports alive. */
1248 if (scm_revealed_count(scmptr
) > 0)
1250 /* Yes, I really do mean scm_ptobs[k].free */
1251 /* rather than ftobs[k].close. .close */
1252 /* is for explicit CLOSE-PORT by user */
1253 (scm_ptobs
[k
].free
) (scmptr
);
1254 SCM_SETSTREAM (scmptr
, 0);
1255 scm_remove_from_port_table (scmptr
);
1256 scm_gc_ports_collected
++;
1257 SCM_SETAND_CAR (scmptr
, ~SCM_OPN
);
1261 switch SCM_GCTYP16 (scmptr
)
1263 case scm_tc_free_cell
:
1264 if SCM_GC8MARKP (scmptr
)
1268 case scm_tcs_bignums
:
1269 if SCM_GC8MARKP (scmptr
)
1271 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1273 #endif /* def SCM_BIGDIG */
1275 if SCM_GC8MARKP (scmptr
)
1277 switch ((int) (SCM_CAR (scmptr
) >> 16))
1279 case (SCM_IMAG_PART
| SCM_REAL_PART
) >> 16:
1280 m
+= sizeof (double);
1281 case SCM_REAL_PART
>> 16:
1282 case SCM_IMAG_PART
>> 16:
1283 m
+= sizeof (double);
1292 if SCM_GC8MARKP (scmptr
)
1297 k
= SCM_SMOBNUM (scmptr
);
1298 if (!(k
< scm_numsmob
))
1300 m
+= (scm_smobs
[k
].free
) ((SCM
) scmptr
);
1306 sweeperr
:scm_wta (scmptr
, "unknown type in ", "gc_sweep");
1310 if (SCM_CAR (scmptr
) == (SCM
) scm_tc_free_cell
)
1313 /* Stick the new cell on the front of nfreelist. It's
1314 critical that we mark this cell as freed; otherwise, the
1315 conservative collector might trace it as some other type
1317 SCM_SETCAR (scmptr
, (SCM
) scm_tc_free_cell
);
1318 SCM_SETCDR (scmptr
, nfreelist
);
1323 SCM_CLRGC8MARK (scmptr
);
1326 SCM_CLRGCMARK (scmptr
);
1328 #ifdef GC_FREE_SEGMENTS
1333 scm_heap_size
-= seg_size
;
1334 free ((char *) scm_heap_table
[i
].bounds
[0]);
1335 scm_heap_table
[i
].bounds
[0] = 0;
1336 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1337 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1338 scm_n_heap_segs
-= 1;
1339 i
--; /* We need to scan the segment just moved. */
1342 #endif /* ifdef GC_FREE_SEGMENTS */
1343 /* Update the real freelist pointer to point to the head of
1344 the list of free cells we've built for this segment. */
1345 *hp_freelist
= nfreelist
;
1347 #ifdef DEBUG_FREELIST
1348 scm_check_freelist ();
1349 scm_map_free_list ();
1352 scm_gc_cells_collected
+= n
;
1354 /* Scan weak vectors. */
1357 for (w
= scm_weak_vectors
; w
!= SCM_EOL
; w
= SCM_WVECT_GC_CHAIN (w
))
1359 if (!SCM_IS_WHVEC_ANY (w
))
1363 ptr
= SCM_VELTS (w
);
1365 for (j
= 0; j
< n
; ++j
)
1366 if (SCM_NIMP (ptr
[j
]) && SCM_FREEP (ptr
[j
]))
1367 ptr
[j
] = SCM_BOOL_F
;
1369 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1372 register long n
= SCM_LENGTH (w
);
1375 ptr
= SCM_VELTS (w
);
1377 for (j
= 0; j
< n
; ++j
)
1384 weak_keys
= SCM_IS_WHVEC (obj
) || SCM_IS_WHVEC_B (obj
);
1385 weak_values
= SCM_IS_WHVEC_V (obj
) || SCM_IS_WHVEC_B (obj
);
1390 while (SCM_NIMP (alist
)
1391 && SCM_CONSP (alist
)
1392 && SCM_NIMP (SCM_CAR (alist
))
1393 && SCM_CONSP (SCM_CAR (alist
)))
1398 key
= SCM_CAAR (alist
);
1399 value
= SCM_CDAR (alist
);
1400 if ( (weak_keys
&& SCM_NIMP (key
) && SCM_FREEP (key
))
1401 || (weak_values
&& SCM_NIMP (value
) && SCM_FREEP (value
)))
1403 *fixup
= SCM_CDR (alist
);
1406 fixup
= SCM_CDRLOC (alist
);
1407 alist
= SCM_CDR (alist
);
1413 scm_cells_allocated
= (scm_heap_size
- scm_gc_cells_collected
);
1414 scm_mallocated
-= m
;
1415 scm_gc_malloc_collected
= m
;
1421 /* {Front end to malloc}
1423 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1425 * These functions provide services comperable to malloc, realloc, and
1426 * free. They are for allocating malloced parts of scheme objects.
1427 * The primary purpose of the front end is to impose calls to gc.
1431 * Return newly malloced storage or throw an error.
1433 * The parameter WHAT is a string for error reporting.
1434 * If the threshold scm_mtrigger will be passed by this
1435 * allocation, or if the first call to malloc fails,
1436 * garbage collect -- on the presumption that some objects
1437 * using malloced storage may be collected.
1439 * The limit scm_mtrigger may be raised by this allocation.
1442 scm_must_malloc (len
, what
)
1447 scm_sizet size
= len
;
1448 unsigned long nm
= scm_mallocated
+ size
;
1451 scm_wta (SCM_MAKINUM (len
), (char *) SCM_NALLOC
, what
);
1452 if ((nm
<= scm_mtrigger
))
1454 SCM_SYSCALL (ptr
= (char *) malloc (size
));
1457 scm_mallocated
= nm
;
1463 nm
= scm_mallocated
+ size
;
1464 SCM_SYSCALL (ptr
= (char *) malloc (size
));
1467 scm_mallocated
= nm
;
1468 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1469 if (nm
> scm_mtrigger
)
1470 scm_mtrigger
= nm
+ nm
/ 2;
1472 scm_mtrigger
+= scm_mtrigger
/ 2;
1481 * is similar to scm_must_malloc.
1484 scm_must_realloc (char *where
,
1490 scm_sizet size
= len
;
1491 scm_sizet nm
= scm_mallocated
+ size
- olen
;
1494 scm_wta (SCM_MAKINUM (len
), (char *) SCM_NALLOC
, what
);
1495 if ((nm
<= scm_mtrigger
))
1497 SCM_SYSCALL (ptr
= (char *) realloc (where
, size
));
1500 scm_mallocated
= nm
;
1505 nm
= scm_mallocated
+ size
- olen
;
1506 SCM_SYSCALL (ptr
= (char *) realloc (where
, size
));
1509 scm_mallocated
= nm
;
1510 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1511 if (nm
> scm_mtrigger
)
1512 scm_mtrigger
= nm
+ nm
/ 2;
1514 scm_mtrigger
+= scm_mtrigger
/ 2;
1528 scm_wta (SCM_INUM0
, "already free", "");
1531 /* Announce that there has been some malloc done that will be freed
1532 * during gc. A typical use is for a smob that uses some malloced
1533 * memory but can not get it from scm_must_malloc (for whatever
1534 * reason). When a new object of this smob is created you call
1535 * scm_done_malloc with the size of the object. When your smob free
1536 * function is called, be sure to include this size in the return
1540 scm_done_malloc (size
)
1543 scm_mallocated
+= size
;
1545 if (scm_mallocated
> scm_mtrigger
)
1547 scm_igc ("foreign mallocs");
1548 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
1550 if (scm_mallocated
> scm_mtrigger
)
1551 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
1553 scm_mtrigger
+= scm_mtrigger
/ 2;
1563 * Each heap segment is an array of objects of a particular size.
1564 * Every segment has an associated (possibly shared) freelist.
1565 * A table of segment records is kept that records the upper and
1566 * lower extents of the segment; this is used during the conservative
1567 * phase of gc to identify probably gc roots (because they point
1568 * into valid segments at reasonable offsets). */
1571 * is true if the first segment was smaller than INIT_HEAP_SEG.
1572 * If scm_expmem is set to one, subsequent segment allocations will
1573 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1578 * is the lowest base address of any heap segment.
1580 SCM_CELLPTR scm_heap_org
;
1582 struct scm_heap_seg_data
* scm_heap_table
= 0;
1583 int scm_n_heap_segs
= 0;
1586 * is the total number of cells in heap segments.
1588 unsigned long scm_heap_size
= 0;
1591 * initializes a new heap segment and return the number of objects it contains.
1593 * The segment origin, segment size in bytes, and the span of objects
1594 * in cells are input parameters. The freelist is both input and output.
1596 * This function presume that the scm_heap_table has already been expanded
1597 * to accomodate a new segment record.
1602 init_heap_seg (seg_org
, size
, ncells
, freelistp
)
1603 SCM_CELLPTR seg_org
;
1608 register SCM_CELLPTR ptr
;
1609 #ifdef SCM_POINTERS_MUNGED
1610 register SCM scmptr
;
1615 SCM_CELLPTR seg_end
;
1619 if (seg_org
== NULL
)
1624 /* Compute the ceiling on valid object pointers w/in this segment.
1626 seg_end
= CELL_DN ((char *) ptr
+ size
);
1628 /* Find the right place and insert the segment record.
1631 for (new_seg_index
= 0;
1632 ( (new_seg_index
< scm_n_heap_segs
)
1633 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
));
1639 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
1640 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
1645 scm_heap_table
[new_seg_index
].valid
= 0;
1646 scm_heap_table
[new_seg_index
].ncells
= ncells
;
1647 scm_heap_table
[new_seg_index
].freelistp
= freelistp
;
1648 scm_heap_table
[new_seg_index
].bounds
[0] = (SCM_CELLPTR
)ptr
;
1649 scm_heap_table
[new_seg_index
].bounds
[1] = (SCM_CELLPTR
)seg_end
;
1652 /* Compute the least valid object pointer w/in this segment
1654 ptr
= CELL_UP (ptr
);
1657 n_new_objects
= seg_end
- ptr
;
1659 /* Prepend objects in this segment to the freelist.
1661 while (ptr
< seg_end
)
1663 #ifdef SCM_POINTERS_MUNGED
1664 scmptr
= PTR2SCM (ptr
);
1666 SCM_SETCAR (scmptr
, (SCM
) scm_tc_free_cell
);
1667 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ ncells
));
1673 /* Patch up the last freelist pointer in the segment
1674 * to join it to the input freelist.
1676 SCM_SETCDR (PTR2SCM (ptr
), *freelistp
);
1677 *freelistp
= PTR2SCM (CELL_UP (seg_org
));
1679 scm_heap_size
+= (ncells
* n_new_objects
);
1688 alloc_some_heap (ncells
, freelistp
)
1692 struct scm_heap_seg_data
* tmptable
;
1696 /* Critical code sections (such as the garbage collector)
1697 * aren't supposed to add heap segments.
1699 if (scm_gc_heap_lock
)
1700 scm_wta (SCM_UNDEFINED
, "need larger initial", "heap");
1702 /* Expand the heap tables to have room for the new segment.
1703 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1704 * only if the allocation of the segment itself succeeds.
1706 len
= (1 + scm_n_heap_segs
) * sizeof (struct scm_heap_seg_data
);
1708 SCM_SYSCALL (tmptable
= ((struct scm_heap_seg_data
*)
1709 realloc ((char *)scm_heap_table
, len
)));
1711 scm_wta (SCM_UNDEFINED
, "could not grow", "hplims");
1713 scm_heap_table
= tmptable
;
1716 /* Pick a size for the new heap segment.
1717 * The rule for picking the size of a segment is explained in
1722 len
= (scm_sizet
) (SCM_EXPHEAP (scm_heap_size
) * sizeof (scm_cell
));
1723 if ((scm_sizet
) (SCM_EXPHEAP (scm_heap_size
) * sizeof (scm_cell
)) != len
)
1727 len
= SCM_HEAP_SEG_SIZE
;
1732 smallest
= (ncells
* sizeof (scm_cell
));
1734 len
= (ncells
* sizeof (scm_cell
));
1736 /* Allocate with decaying ambition. */
1737 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
1738 && (len
>= smallest
))
1740 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (len
));
1743 init_heap_seg (ptr
, len
, ncells
, freelistp
);
1750 scm_wta (SCM_UNDEFINED
, "could not grow", "heap");
1755 SCM_PROC (s_unhash_name
, "unhash-name", 1, 0, 0, scm_unhash_name
);
1757 scm_unhash_name (name
)
1762 SCM_ASSERT (SCM_NIMP (name
) && SCM_SYMBOLP (name
), name
, SCM_ARG1
, s_unhash_name
);
1764 bound
= scm_n_heap_segs
;
1765 for (x
= 0; x
< bound
; ++x
)
1769 p
= (SCM_CELLPTR
)scm_heap_table
[x
].bounds
[0];
1770 pbound
= (SCM_CELLPTR
)scm_heap_table
[x
].bounds
[1];
1775 if (1 == (7 & (int)incar
))
1778 if ( ((name
== SCM_BOOL_T
) || (SCM_CAR (incar
) == name
))
1779 && (SCM_CDR (incar
) != 0)
1780 && (SCM_CDR (incar
) != 1))
1794 /* {GC Protection Helper Functions}
1805 scm_return_first (SCM elt
, ...)
1812 scm_permanent_object (obj
)
1816 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
1822 /* Protect OBJ from the garbage collector. OBJ will not be freed,
1823 even if all other references are dropped, until someone applies
1824 scm_unprotect_object to it. This function returns OBJ.
1826 Calls to scm_protect_object nest. For every object O, there is a
1827 counter which scm_protect_object(O) increments and
1828 scm_unprotect_object(O) decrements, if it is greater than zero. If
1829 an object's counter is greater than zero, the garbage collector
1832 Of course, that's not how it's implemented. scm_protect_object and
1833 scm_unprotect_object just maintain a list of references to things.
1834 Since the GC knows about this list, all objects it mentions stay
1835 alive. scm_protect_object adds its argument to the list;
1836 scm_unprotect_object removes the first occurrence of its argument
1839 scm_protect_object (obj
)
1842 scm_protects
= scm_cons (obj
, scm_protects
);
1848 /* Remove any protection for OBJ established by a prior call to
1849 scm_protect_object. This function returns OBJ.
1851 See scm_protect_object for more information. */
1853 scm_unprotect_object (obj
)
1856 SCM
*tail_ptr
= &scm_protects
;
1858 while (SCM_NIMP (*tail_ptr
) && SCM_CONSP (*tail_ptr
))
1859 if (SCM_CAR (*tail_ptr
) == obj
)
1861 *tail_ptr
= SCM_CDR (*tail_ptr
);
1865 tail_ptr
= SCM_CDRLOC (*tail_ptr
);
1872 /* called on process termination. */
1873 static void cleanup (void)
1876 scm_flush_all_ports ();
1881 scm_init_storage (scm_sizet init_heap_size
)
1885 j
= SCM_NUM_PROTECTS
;
1887 scm_sys_protects
[--j
] = SCM_BOOL_F
;
1889 scm_freelist
= SCM_EOL
;
1892 j
= SCM_HEAP_SEG_SIZE
;
1893 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
1894 scm_heap_table
= ((struct scm_heap_seg_data
*)
1895 scm_must_malloc (sizeof (struct scm_heap_seg_data
), "hplims"));
1896 if (0L == init_heap_size
)
1897 init_heap_size
= SCM_INIT_HEAP_SIZE
;
1899 if ((init_heap_size
!= j
)
1900 || !init_heap_seg ((SCM_CELLPTR
) malloc (j
), j
, 1, &scm_freelist
))
1902 j
= SCM_HEAP_SEG_SIZE
;
1903 if (!init_heap_seg ((SCM_CELLPTR
) malloc (j
), j
, 1, &scm_freelist
))
1908 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0]);
1909 /* scm_hplims[0] can change. do not remove scm_heap_org */
1910 scm_weak_vectors
= SCM_EOL
;
1912 /* Initialise the list of ports. */
1913 scm_port_table
= (scm_port
**)
1914 malloc (sizeof (scm_port
*) * scm_port_table_room
);
1915 if (!scm_port_table
)
1920 scm_undefineds
= scm_cons (SCM_UNDEFINED
, SCM_EOL
);
1921 SCM_SETCDR (scm_undefineds
, scm_undefineds
);
1923 scm_listofnull
= scm_cons (SCM_EOL
, SCM_EOL
);
1924 scm_nullstr
= scm_makstr (0L, 0);
1925 scm_nullvect
= scm_make_vector (SCM_INUM0
, SCM_UNDEFINED
);
1926 scm_symhash
= scm_make_vector ((SCM
) SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
1927 scm_weak_symhash
= scm_make_weak_key_hash_table ((SCM
) SCM_MAKINUM (scm_symhash_dim
));
1928 scm_symhash_vars
= scm_make_vector ((SCM
) SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
1929 scm_stand_in_procs
= SCM_EOL
;
1930 scm_permobjs
= SCM_EOL
;
1931 scm_protects
= SCM_EOL
;
1932 scm_asyncs
= SCM_EOL
;
1933 scm_sysintern ("most-positive-fixnum", (SCM
) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM
));
1934 scm_sysintern ("most-negative-fixnum", (SCM
) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM
));
1936 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD
));