1 /* Copyright (C) 1995, 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
49 #include "guardians.h"
66 #define var_start(x, y) va_start(x, y)
69 #define var_start(x, y) va_start(x)
73 /* {heap tuning parameters}
75 * These are parameters for controlling memory allocation. The heap
76 * is the area out of which scm_cons, and object headers are allocated.
78 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
79 * 64 bit machine. The units of the _SIZE parameters are bytes.
80 * Cons pairs and object headers occupy one heap cell.
82 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
83 * allocated initially the heap will grow by half its current size
84 * each subsequent time more heap is needed.
86 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
87 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
88 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
89 * is in scm_init_storage() and alloc_some_heap() in sys.c
91 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
92 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
94 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
97 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
100 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
101 * reclaimed by a GC triggered by must_malloc. If less than this is
102 * reclaimed, the trigger threshold is raised. [I don't know what a
103 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
104 * work around a oscillation that caused almost constant GC.]
107 #define SCM_INIT_HEAP_SIZE (32768L*sizeof(scm_cell))
108 #define SCM_MIN_HEAP_SEG_SIZE (2048L*sizeof(scm_cell))
110 # define SCM_HEAP_SEG_SIZE 32768L
113 # define SCM_HEAP_SEG_SIZE (7000L*sizeof(scm_cell))
115 # define SCM_HEAP_SEG_SIZE (16384L*sizeof(scm_cell))
118 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size*2)
119 #define SCM_INIT_MALLOC_LIMIT 100000
120 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
122 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
123 bounds for allocated storage */
126 /*in 386 protected mode we must only adjust the offset */
127 # define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
128 # define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
131 # define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
132 # define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
134 # define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
135 # define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
142 * is the head of freelist of cons pairs.
144 SCM scm_freelist
= SCM_EOL
;
147 * is the number of bytes of must_malloc allocation needed to trigger gc.
149 unsigned long scm_mtrigger
;
153 * If set, don't expand the heap. Set only during gc, during which no allocation
154 * is supposed to take place anyway.
156 int scm_gc_heap_lock
= 0;
159 * Don't pause for collection if this is set -- just
163 int scm_block_gc
= 1;
165 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
166 * collection (GC) more space is allocated for the heap.
168 #define MIN_GC_YIELD (scm_heap_size/4)
170 /* During collection, this accumulates objects holding
173 SCM scm_weak_vectors
;
175 /* GC Statistics Keeping
177 unsigned long scm_cells_allocated
= 0;
178 long scm_mallocated
= 0;
179 unsigned long scm_gc_cells_collected
;
180 unsigned long scm_gc_malloc_collected
;
181 unsigned long scm_gc_ports_collected
;
182 unsigned long scm_gc_rt
;
183 unsigned long scm_gc_time_taken
= 0;
185 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
186 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
187 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
188 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
189 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
190 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
193 struct scm_heap_seg_data
195 /* lower and upper bounds of the segment */
196 SCM_CELLPTR bounds
[2];
198 /* address of the head-of-freelist pointer for this segment's cells.
199 All segments usually point to the same one, scm_freelist. */
202 /* number of SCM words per object in this segment */
205 /* If SEG_DATA->valid is non-zero, the conservative marking
206 functions will apply SEG_DATA->valid to the purported pointer and
207 SEG_DATA, and mark the object iff the function returns non-zero.
208 At the moment, I don't think anyone uses this. */
215 static void scm_mark_weak_vector_spines
SCM_P ((void));
216 static scm_sizet init_heap_seg
SCM_P ((SCM_CELLPTR
, scm_sizet
, int, SCM
*));
217 static void alloc_some_heap
SCM_P ((int, SCM
*));
221 /* Debugging functions. */
223 #ifdef GUILE_DEBUG_FREELIST
225 /* Return the number of the heap segment containing CELL. */
231 for (i
= 0; i
< scm_n_heap_segs
; i
++)
232 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], (SCM_CELLPTR
) cell
)
233 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], (SCM_CELLPTR
) cell
))
235 fprintf (stderr
, "which_seg: can't find segment containing cell %lx\n",
241 SCM_PROC (s_map_free_list
, "map-free-list", 0, 0, 0, scm_map_free_list
);
245 int last_seg
= -1, count
= 0;
248 fprintf (stderr
, "%d segments total\n", scm_n_heap_segs
);
249 for (f
= scm_freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
))
251 int this_seg
= which_seg (f
);
253 if (this_seg
!= last_seg
)
256 fprintf (stderr
, " %5d cells in segment %d\n", count
, last_seg
);
263 fprintf (stderr
, " %5d cells in segment %d\n", count
, last_seg
);
267 return SCM_UNSPECIFIED
;
271 /* Number of calls to SCM_NEWCELL since startup. */
272 static unsigned long scm_newcell_count
;
274 /* Search freelist for anything that isn't marked as a free cell.
275 Abort if we find something. */
277 scm_check_freelist ()
282 for (f
= scm_freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
), i
++)
283 if (SCM_CAR (f
) != (SCM
) scm_tc_free_cell
)
285 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
286 scm_newcell_count
, i
);
292 static int scm_debug_check_freelist
= 0;
294 SCM_PROC (s_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0, scm_gc_set_debug_check_freelist_x
);
296 scm_gc_set_debug_check_freelist_x (SCM flag
)
298 SCM_ASSERT(SCM_BOOL_T
== flag
|| SCM_BOOL_F
== flag
,
299 flag
, 1, s_gc_set_debug_check_freelist_x
);
300 scm_debug_check_freelist
= (SCM_BOOL_T
==flag
)? 1: 0;
301 return SCM_UNSPECIFIED
;
306 scm_debug_newcell (void)
311 if (scm_debug_check_freelist
) {
312 scm_check_freelist ();
316 /* The rest of this is supposed to be identical to the SCM_NEWCELL
318 if (SCM_IMP (scm_freelist
))
319 new = scm_gc_for_newcell ();
323 scm_freelist
= SCM_CDR (scm_freelist
);
324 ++scm_cells_allocated
;
330 #endif /* GUILE_DEBUG_FREELIST */
334 /* {Scheme Interface to GC}
337 SCM_PROC (s_gc_stats
, "gc-stats", 0, 0, 0, scm_gc_stats
);
344 SCM local_scm_mtrigger
;
345 SCM local_scm_mallocated
;
346 SCM local_scm_heap_size
;
347 SCM local_scm_cells_allocated
;
348 SCM local_scm_gc_time_taken
;
356 for (i
= scm_n_heap_segs
; i
--; )
357 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
358 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
360 if (scm_n_heap_segs
!= n
)
364 local_scm_mtrigger
= scm_mtrigger
;
365 local_scm_mallocated
= scm_mallocated
;
366 local_scm_heap_size
= scm_heap_size
;
367 local_scm_cells_allocated
= scm_cells_allocated
;
368 local_scm_gc_time_taken
= scm_gc_time_taken
;
370 answer
= scm_listify (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
371 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
372 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
373 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
374 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
375 scm_cons (sym_heap_segments
, heap_segs
),
386 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ());
387 scm_gc_cells_collected
= 0;
388 scm_gc_malloc_collected
= 0;
389 scm_gc_ports_collected
= 0;
395 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt
;
396 scm_gc_time_taken
= scm_gc_time_taken
+ scm_gc_rt
;
397 scm_system_async_mark (scm_gc_async
);
401 SCM_PROC (s_object_address
, "object-address", 1, 0, 0, scm_object_address
);
403 scm_object_address (obj
)
406 return scm_ulong2num ((unsigned long)obj
);
410 SCM_PROC(s_gc
, "gc", 0, 0, 0, scm_gc
);
417 return SCM_UNSPECIFIED
;
422 /* {C Interface For When GC is Triggered}
426 scm_gc_for_alloc (ncells
, freelistp
)
432 if ((scm_gc_cells_collected
< MIN_GC_YIELD
) || SCM_IMP (*freelistp
))
434 alloc_some_heap (ncells
, freelistp
);
441 scm_gc_for_newcell ()
444 scm_gc_for_alloc (1, &scm_freelist
);
446 scm_freelist
= SCM_CDR (fl
);
457 /* During the critical section, only the current thread may run. */
458 SCM_THREAD_CRITICAL_SECTION_START
;
461 /* fprintf (stderr, "gc: %s\n", what); */
465 if (!scm_stack_base
|| scm_block_gc
)
471 if (scm_mallocated
< 0)
472 /* The byte count of allocated objects has underflowed. This is
473 probably because you forgot to report the sizes of objects you
474 have allocated, by calling scm_done_malloc or some such. When
475 the GC freed them, it subtracted their size from
476 scm_mallocated, which underflowed. */
479 if (scm_gc_heap_lock
)
480 /* We've invoked the collector while a GC is already in progress.
481 That should never happen. */
486 scm_weak_vectors
= SCM_EOL
;
488 scm_guardian_gc_init ();
490 /* unprotect any struct types with no instances */
496 pos
= &scm_type_obj_list
;
497 type_list
= scm_type_obj_list
;
498 while (type_list
!= SCM_EOL
)
499 if (SCM_VELTS (SCM_CAR (type_list
))[scm_struct_i_refcnt
])
501 pos
= SCM_CDRLOC (type_list
);
502 type_list
= SCM_CDR (type_list
);
506 *pos
= SCM_CDR (type_list
);
507 type_list
= SCM_CDR (type_list
);
512 /* flush dead entries from the continuation stack */
517 elts
= SCM_VELTS (scm_continuation_stack
);
518 bound
= SCM_LENGTH (scm_continuation_stack
);
519 x
= SCM_INUM (scm_continuation_stack_ptr
);
522 elts
[x
] = SCM_BOOL_F
;
529 /* Protect from the C stack. This must be the first marking
530 * done because it provides information about what objects
531 * are "in-use" by the C code. "in-use" objects are those
532 * for which the values from SCM_LENGTH and SCM_CHARS must remain
533 * usable. This requirement is stricter than a liveness
534 * requirement -- in particular, it constrains the implementation
535 * of scm_vector_set_length_x.
537 SCM_FLUSH_REGISTER_WINDOWS
;
538 /* This assumes that all registers are saved into the jmp_buf */
539 setjmp (scm_save_regs_gc_mark
);
540 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
541 ( (scm_sizet
) (sizeof (SCM_STACKITEM
) - 1 +
542 sizeof scm_save_regs_gc_mark
)
543 / sizeof (SCM_STACKITEM
)));
546 /* stack_len is long rather than scm_sizet in order to guarantee that
547 &stack_len is long aligned */
548 #ifdef SCM_STACK_GROWS_UP
550 long stack_len
= (SCM_STACKITEM
*) (&stack_len
) - scm_stack_base
;
552 long stack_len
= scm_stack_size (scm_stack_base
);
554 scm_mark_locations (scm_stack_base
, (scm_sizet
) stack_len
);
557 long stack_len
= scm_stack_base
- (SCM_STACKITEM
*) (&stack_len
);
559 long stack_len
= scm_stack_size (scm_stack_base
);
561 scm_mark_locations ((scm_stack_base
- stack_len
), (scm_sizet
) stack_len
);
565 #else /* USE_THREADS */
567 /* Mark every thread's stack and registers */
568 scm_threads_mark_stacks();
570 #endif /* USE_THREADS */
572 /* FIXME: insert a phase to un-protect string-data preserved
573 * in scm_vector_set_length_x.
576 j
= SCM_NUM_PROTECTS
;
578 scm_gc_mark (scm_sys_protects
[j
]);
580 /* FIXME: we should have a means to register C functions to be run
581 * in different phases of GC
583 scm_mark_subr_table ();
586 scm_gc_mark (scm_root
->handle
);
589 scm_mark_weak_vector_spines ();
591 scm_guardian_zombify ();
599 SCM_THREAD_CRITICAL_SECTION_END
;
609 /* Mark an object precisely.
625 if (SCM_NCELLP (ptr
))
626 scm_wta (ptr
, "rogue pointer in heap", NULL
);
628 switch (SCM_TYP7 (ptr
))
630 case scm_tcs_cons_nimcar
:
631 if (SCM_GCMARKP (ptr
))
634 if (SCM_IMP (SCM_CDR (ptr
))) /* SCM_IMP works even with a GC mark */
639 scm_gc_mark (SCM_CAR (ptr
));
640 ptr
= SCM_GCCDR (ptr
);
642 case scm_tcs_cons_imcar
:
644 if (SCM_GCMARKP (ptr
))
647 ptr
= SCM_GCCDR (ptr
);
649 case scm_tcs_cons_gloc
:
650 if (SCM_GCMARKP (ptr
))
655 vcell
= SCM_CAR (ptr
) - 1L;
656 switch (SCM_CDR (vcell
))
660 ptr
= SCM_GCCDR (ptr
);
672 vtable_data
= (SCM
*)vcell
;
673 layout
= vtable_data
[scm_vtable_index_layout
];
674 len
= SCM_LENGTH (layout
);
675 fields_desc
= SCM_CHARS (layout
);
676 /* We're using SCM_GCCDR here like STRUCT_DATA, except
677 that it removes the mark */
678 mem
= (SCM
*)SCM_GCCDR (ptr
);
680 if (vtable_data
[scm_struct_i_flags
] & SCM_STRUCTF_ENTITY
)
682 scm_gc_mark (mem
[scm_struct_i_procedure
]);
683 scm_gc_mark (mem
[scm_struct_i_setter
]);
687 for (x
= 0; x
< len
- 2; x
+= 2, ++mem
)
688 if (fields_desc
[x
] == 'p')
690 if (fields_desc
[x
] == 'p')
692 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
693 for (x
= *mem
; x
; --x
)
694 scm_gc_mark (*++mem
);
699 if (!SCM_CDR (vcell
))
701 SCM_SETGCMARK (vcell
);
702 ptr
= vtable_data
[scm_vtable_index_vtable
];
709 case scm_tcs_closures
:
710 if (SCM_GCMARKP (ptr
))
713 if (SCM_IMP (SCM_CDR (ptr
)))
715 ptr
= SCM_CLOSCAR (ptr
);
718 scm_gc_mark (SCM_CLOSCAR (ptr
));
719 ptr
= SCM_GCCDR (ptr
);
722 case scm_tc7_lvector
:
726 if (SCM_GC8MARKP (ptr
))
728 SCM_SETGC8MARK (ptr
);
729 i
= SCM_LENGTH (ptr
);
733 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
734 scm_gc_mark (SCM_VELTS (ptr
)[i
]);
735 ptr
= SCM_VELTS (ptr
)[0];
740 SCM_SETGC8MARK (ptr
);
742 scm_mark_locations (SCM_VELTS (ptr
),
745 (sizeof (SCM_STACKITEM
) + -1 +
746 sizeof (scm_contregs
)) /
747 sizeof (SCM_STACKITEM
)));
762 SCM_SETGC8MARK (ptr
);
765 case scm_tc7_substring
:
766 if (SCM_GC8MARKP(ptr
))
768 SCM_SETGC8MARK (ptr
);
773 if (SCM_GC8MARKP(ptr
))
775 SCM_WVECT_GC_CHAIN (ptr
) = scm_weak_vectors
;
776 scm_weak_vectors
= ptr
;
777 SCM_SETGC8MARK (ptr
);
778 if (SCM_IS_WHVEC_ANY (ptr
))
785 len
= SCM_LENGTH (ptr
);
786 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
787 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
789 for (x
= 0; x
< len
; ++x
)
792 alist
= SCM_VELTS (ptr
)[x
];
794 /* mark everything on the alist except the keys or
795 * values, according to weak_values and weak_keys. */
796 while ( SCM_NIMP (alist
)
798 && !SCM_GCMARKP (alist
)
799 && SCM_NIMP (SCM_CAR (alist
))
800 && SCM_CONSP (SCM_CAR (alist
)))
805 kvpair
= SCM_CAR (alist
);
806 next_alist
= SCM_CDR (alist
);
809 * SCM_SETGCMARK (alist);
810 * SCM_SETGCMARK (kvpair);
812 * It may be that either the key or value is protected by
813 * an escaped reference to part of the spine of this alist.
814 * If we mark the spine here, and only mark one or neither of the
815 * key and value, they may never be properly marked.
816 * This leads to a horrible situation in which an alist containing
817 * freelist cells is exported.
819 * So only mark the spines of these arrays last of all marking.
820 * If somebody confuses us by constructing a weak vector
821 * with a circular alist then we are hosed, but at least we
822 * won't prematurely drop table entries.
825 scm_gc_mark (SCM_CAR (kvpair
));
827 scm_gc_mark (SCM_GCCDR (kvpair
));
830 if (SCM_NIMP (alist
))
836 case scm_tc7_msymbol
:
837 if (SCM_GC8MARKP(ptr
))
839 SCM_SETGC8MARK (ptr
);
840 scm_gc_mark (SCM_SYMBOL_FUNC (ptr
));
841 ptr
= SCM_SYMBOL_PROPS (ptr
);
843 case scm_tc7_ssymbol
:
844 if (SCM_GC8MARKP(ptr
))
846 SCM_SETGC8MARK (ptr
);
851 i
= SCM_PTOBNUM (ptr
);
852 if (!(i
< scm_numptob
))
854 if (SCM_GC8MARKP (ptr
))
856 SCM_SETGC8MARK (ptr
);
857 if (SCM_PTAB_ENTRY(ptr
))
858 scm_gc_mark (SCM_PTAB_ENTRY(ptr
)->file_name
);
859 if (scm_ptobs
[i
].mark
)
861 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
868 if (SCM_GC8MARKP (ptr
))
870 SCM_SETGC8MARK (ptr
);
871 switch SCM_GCTYP16 (ptr
)
872 { /* should be faster than going through scm_smobs */
873 case scm_tc_free_cell
:
874 /* printf("found free_cell %X ", ptr); fflush(stdout); */
875 SCM_SETCDR (ptr
, SCM_EOL
);
877 case scm_tcs_bignums
:
881 i
= SCM_SMOBNUM (ptr
);
882 if (!(i
< scm_numsmob
))
884 if (scm_smobs
[i
].mark
)
886 ptr
= (scm_smobs
[i
].mark
) (ptr
);
894 def
:scm_wta (ptr
, "unknown type in ", "gc_mark");
899 /* Mark a Region Conservatively
903 scm_mark_locations (x
, n
)
909 register SCM_CELLPTR ptr
;
912 if SCM_CELLP (*(SCM
**) & x
[m
])
914 ptr
= (SCM_CELLPTR
) SCM2PTR ((*(SCM
**) & x
[m
]));
916 j
= scm_n_heap_segs
- 1;
917 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
918 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
925 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
927 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
935 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
939 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
944 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
948 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
954 if ( !scm_heap_table
[seg_id
].valid
955 || scm_heap_table
[seg_id
].valid (ptr
,
956 &scm_heap_table
[seg_id
]))
957 scm_gc_mark (*(SCM
*) & x
[m
]);
966 /* The following is a C predicate which determines if an SCM value can be
967 regarded as a pointer to a cell on the heap. The code is duplicated
968 from scm_mark_locations. */
976 register SCM_CELLPTR ptr
;
978 if SCM_CELLP (*(SCM
**) & value
)
980 ptr
= (SCM_CELLPTR
) SCM2PTR ((*(SCM
**) & value
));
982 j
= scm_n_heap_segs
- 1;
983 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
984 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
991 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
993 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1001 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1005 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1010 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1014 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1020 if ( !scm_heap_table
[seg_id
].valid
1021 || scm_heap_table
[seg_id
].valid (ptr
,
1022 &scm_heap_table
[seg_id
]))
1034 scm_mark_weak_vector_spines ()
1038 for (w
= scm_weak_vectors
; w
!= SCM_EOL
; w
= SCM_WVECT_GC_CHAIN (w
))
1040 if (SCM_IS_WHVEC_ANY (w
))
1048 ptr
= SCM_VELTS (w
);
1050 for (j
= 0; j
< n
; ++j
)
1055 while ( SCM_NIMP (alist
)
1056 && SCM_CONSP (alist
)
1057 && !SCM_GCMARKP (alist
)
1058 && SCM_NIMP (SCM_CAR (alist
))
1059 && SCM_CONSP (SCM_CAR (alist
)))
1061 SCM_SETGCMARK (alist
);
1062 SCM_SETGCMARK (SCM_CAR (alist
));
1063 alist
= SCM_GCCDR (alist
);
1075 register SCM_CELLPTR ptr
;
1076 #ifdef SCM_POINTERS_MUNGED
1077 register SCM scmptr
;
1080 #define scmptr (SCM)ptr
1082 register SCM nfreelist
;
1083 register SCM
*hp_freelist
;
1091 /* Reset all free list pointers. We'll reconstruct them completely
1093 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1094 *scm_heap_table
[i
].freelistp
= SCM_EOL
;
1096 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1098 register scm_sizet n
= 0;
1099 register scm_sizet j
;
1101 /* Unmarked cells go onto the front of the freelist this heap
1102 segment points to. Rather than updating the real freelist
1103 pointer as we go along, we accumulate the new head in
1104 nfreelist. Then, if it turns out that the entire segment is
1105 free, we free (i.e., malloc's free) the whole segment, and
1106 simply don't assign nfreelist back into the real freelist. */
1107 hp_freelist
= scm_heap_table
[i
].freelistp
;
1108 nfreelist
= *hp_freelist
;
1110 span
= scm_heap_table
[i
].ncells
;
1111 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0]);
1112 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1]) - ptr
;
1113 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1115 #ifdef SCM_POINTERS_MUNGED
1116 scmptr
= PTR2SCM (ptr
);
1118 switch SCM_TYP7 (scmptr
)
1120 case scm_tcs_cons_gloc
:
1121 if (SCM_GCMARKP (scmptr
))
1123 if (SCM_CDR (SCM_CAR (scmptr
) - 1) == (SCM
)1)
1124 SCM_SETCDR (SCM_CAR (scmptr
) - 1, (SCM
) 0);
1129 vcell
= SCM_CAR (scmptr
) - 1L;
1131 if ((SCM_CDR (vcell
) == 0) || (SCM_CDR (vcell
) == 1))
1133 scm_struct_free_t free
1134 = (scm_struct_free_t
) ((SCM
*) vcell
)[scm_struct_i_free
];
1135 m
+= free ((SCM
*) vcell
, (SCM
*) SCM_GCCDR (scmptr
));
1139 case scm_tcs_cons_imcar
:
1140 case scm_tcs_cons_nimcar
:
1141 case scm_tcs_closures
:
1143 if (SCM_GCMARKP (scmptr
))
1147 if (SCM_GC8MARKP (scmptr
))
1153 m
+= (2 + SCM_LENGTH (scmptr
)) * sizeof (SCM
);
1154 scm_must_free ((char *)(SCM_VELTS (scmptr
) - 2));
1158 case scm_tc7_vector
:
1159 case scm_tc7_lvector
:
1163 if (SCM_GC8MARKP (scmptr
))
1166 m
+= (SCM_LENGTH (scmptr
) * sizeof (SCM
));
1168 scm_must_free (SCM_CHARS (scmptr
));
1169 /* SCM_SETCHARS(scmptr, 0);*/
1172 if SCM_GC8MARKP (scmptr
)
1174 m
+= sizeof (long) * ((SCM_HUGE_LENGTH (scmptr
) + SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1176 case scm_tc7_byvect
:
1177 if SCM_GC8MARKP (scmptr
)
1179 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (char);
1183 if SCM_GC8MARKP (scmptr
)
1185 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long);
1188 if SCM_GC8MARKP (scmptr
)
1190 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (short);
1193 case scm_tc7_llvect
:
1194 if SCM_GC8MARKP (scmptr
)
1196 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long_long
);
1200 if SCM_GC8MARKP (scmptr
)
1202 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (float);
1205 if SCM_GC8MARKP (scmptr
)
1207 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (double);
1210 if SCM_GC8MARKP (scmptr
)
1212 m
+= SCM_HUGE_LENGTH (scmptr
) * 2 * sizeof (double);
1214 case scm_tc7_substring
:
1215 if (SCM_GC8MARKP (scmptr
))
1218 case scm_tc7_string
:
1219 if (SCM_GC8MARKP (scmptr
))
1221 m
+= SCM_HUGE_LENGTH (scmptr
) + 1;
1223 case scm_tc7_msymbol
:
1224 if (SCM_GC8MARKP (scmptr
))
1226 m
+= ( SCM_LENGTH (scmptr
)
1228 + sizeof (SCM
) * ((SCM
*)SCM_CHARS (scmptr
) - SCM_SLOTS(scmptr
)));
1229 scm_must_free ((char *)SCM_SLOTS (scmptr
));
1231 case scm_tc7_contin
:
1232 if SCM_GC8MARKP (scmptr
)
1234 m
+= SCM_LENGTH (scmptr
) * sizeof (SCM_STACKITEM
) + sizeof (scm_contregs
);
1235 if (SCM_VELTS (scmptr
))
1237 case scm_tc7_ssymbol
:
1238 if SCM_GC8MARKP(scmptr
)
1244 if SCM_GC8MARKP (scmptr
)
1246 if SCM_OPENP (scmptr
)
1248 int k
= SCM_PTOBNUM (scmptr
);
1249 if (!(k
< scm_numptob
))
1251 /* Keep "revealed" ports alive. */
1252 if (scm_revealed_count(scmptr
) > 0)
1254 /* Yes, I really do mean scm_ptobs[k].free */
1255 /* rather than ftobs[k].close. .close */
1256 /* is for explicit CLOSE-PORT by user */
1257 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1258 SCM_SETSTREAM (scmptr
, 0);
1259 scm_remove_from_port_table (scmptr
);
1260 scm_gc_ports_collected
++;
1261 SCM_SETAND_CAR (scmptr
, ~SCM_OPN
);
1265 switch SCM_GCTYP16 (scmptr
)
1267 case scm_tc_free_cell
:
1268 if SCM_GC8MARKP (scmptr
)
1272 case scm_tcs_bignums
:
1273 if SCM_GC8MARKP (scmptr
)
1275 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1277 #endif /* def SCM_BIGDIG */
1279 if SCM_GC8MARKP (scmptr
)
1281 switch ((int) (SCM_CAR (scmptr
) >> 16))
1283 case (SCM_IMAG_PART
| SCM_REAL_PART
) >> 16:
1284 m
+= sizeof (double);
1285 case SCM_REAL_PART
>> 16:
1286 case SCM_IMAG_PART
>> 16:
1287 m
+= sizeof (double);
1296 if SCM_GC8MARKP (scmptr
)
1301 k
= SCM_SMOBNUM (scmptr
);
1302 if (!(k
< scm_numsmob
))
1304 m
+= (scm_smobs
[k
].free
) ((SCM
) scmptr
);
1310 sweeperr
:scm_wta (scmptr
, "unknown type in ", "gc_sweep");
1314 if (SCM_CAR (scmptr
) == (SCM
) scm_tc_free_cell
)
1317 /* Stick the new cell on the front of nfreelist. It's
1318 critical that we mark this cell as freed; otherwise, the
1319 conservative collector might trace it as some other type
1321 SCM_SETCAR (scmptr
, (SCM
) scm_tc_free_cell
);
1322 SCM_SETCDR (scmptr
, nfreelist
);
1327 SCM_CLRGC8MARK (scmptr
);
1330 SCM_CLRGCMARK (scmptr
);
1332 #ifdef GC_FREE_SEGMENTS
1337 scm_heap_size
-= seg_size
;
1338 free ((char *) scm_heap_table
[i
].bounds
[0]);
1339 scm_heap_table
[i
].bounds
[0] = 0;
1340 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1341 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1342 scm_n_heap_segs
-= 1;
1343 i
--; /* We need to scan the segment just moved. */
1346 #endif /* ifdef GC_FREE_SEGMENTS */
1347 /* Update the real freelist pointer to point to the head of
1348 the list of free cells we've built for this segment. */
1349 *hp_freelist
= nfreelist
;
1351 #ifdef GUILE_DEBUG_FREELIST
1352 scm_check_freelist ();
1353 scm_map_free_list ();
1356 scm_gc_cells_collected
+= n
;
1358 /* Scan weak vectors. */
1361 for (w
= scm_weak_vectors
; w
!= SCM_EOL
; w
= SCM_WVECT_GC_CHAIN (w
))
1363 if (!SCM_IS_WHVEC_ANY (w
))
1367 ptr
= SCM_VELTS (w
);
1369 for (j
= 0; j
< n
; ++j
)
1370 if (SCM_NIMP (ptr
[j
]) && SCM_FREEP (ptr
[j
]))
1371 ptr
[j
] = SCM_BOOL_F
;
1373 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1376 register long n
= SCM_LENGTH (w
);
1379 ptr
= SCM_VELTS (w
);
1381 for (j
= 0; j
< n
; ++j
)
1388 weak_keys
= SCM_IS_WHVEC (obj
) || SCM_IS_WHVEC_B (obj
);
1389 weak_values
= SCM_IS_WHVEC_V (obj
) || SCM_IS_WHVEC_B (obj
);
1394 while (SCM_NIMP (alist
)
1395 && SCM_CONSP (alist
)
1396 && SCM_NIMP (SCM_CAR (alist
))
1397 && SCM_CONSP (SCM_CAR (alist
)))
1402 key
= SCM_CAAR (alist
);
1403 value
= SCM_CDAR (alist
);
1404 if ( (weak_keys
&& SCM_NIMP (key
) && SCM_FREEP (key
))
1405 || (weak_values
&& SCM_NIMP (value
) && SCM_FREEP (value
)))
1407 *fixup
= SCM_CDR (alist
);
1410 fixup
= SCM_CDRLOC (alist
);
1411 alist
= SCM_CDR (alist
);
1417 scm_cells_allocated
= (scm_heap_size
- scm_gc_cells_collected
);
1418 scm_mallocated
-= m
;
1419 scm_gc_malloc_collected
= m
;
1425 /* {Front end to malloc}
1427 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1429 * These functions provide services comperable to malloc, realloc, and
1430 * free. They are for allocating malloced parts of scheme objects.
1431 * The primary purpose of the front end is to impose calls to gc.
1435 * Return newly malloced storage or throw an error.
1437 * The parameter WHAT is a string for error reporting.
1438 * If the threshold scm_mtrigger will be passed by this
1439 * allocation, or if the first call to malloc fails,
1440 * garbage collect -- on the presumption that some objects
1441 * using malloced storage may be collected.
1443 * The limit scm_mtrigger may be raised by this allocation.
1446 scm_must_malloc (scm_sizet size
, const char *what
)
1449 unsigned long nm
= scm_mallocated
+ size
;
1451 if (nm
<= scm_mtrigger
)
1453 SCM_SYSCALL (ptr
= malloc (size
));
1456 scm_mallocated
= nm
;
1463 nm
= scm_mallocated
+ size
;
1464 SCM_SYSCALL (ptr
= malloc (size
));
1467 scm_mallocated
= nm
;
1468 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1469 if (nm
> scm_mtrigger
)
1470 scm_mtrigger
= nm
+ nm
/ 2;
1472 scm_mtrigger
+= scm_mtrigger
/ 2;
1477 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1478 return 0; /* never reached */
1483 * is similar to scm_must_malloc.
1486 scm_must_realloc (void *where
,
1492 scm_sizet nm
= scm_mallocated
+ size
- old_size
;
1494 if (nm
<= scm_mtrigger
)
1496 SCM_SYSCALL (ptr
= realloc (where
, size
));
1499 scm_mallocated
= nm
;
1506 nm
= scm_mallocated
+ size
- old_size
;
1507 SCM_SYSCALL (ptr
= realloc (where
, size
));
1510 scm_mallocated
= nm
;
1511 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1512 if (nm
> scm_mtrigger
)
1513 scm_mtrigger
= nm
+ nm
/ 2;
1515 scm_mtrigger
+= scm_mtrigger
/ 2;
1520 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1521 return 0; /* never reached */
1525 scm_must_free (void *obj
)
1530 scm_wta (SCM_INUM0
, "already free", "");
1533 /* Announce that there has been some malloc done that will be freed
1534 * during gc. A typical use is for a smob that uses some malloced
1535 * memory but can not get it from scm_must_malloc (for whatever
1536 * reason). When a new object of this smob is created you call
1537 * scm_done_malloc with the size of the object. When your smob free
1538 * function is called, be sure to include this size in the return
1542 scm_done_malloc (size
)
1545 scm_mallocated
+= size
;
1547 if (scm_mallocated
> scm_mtrigger
)
1549 scm_igc ("foreign mallocs");
1550 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
1552 if (scm_mallocated
> scm_mtrigger
)
1553 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
1555 scm_mtrigger
+= scm_mtrigger
/ 2;
1565 * Each heap segment is an array of objects of a particular size.
1566 * Every segment has an associated (possibly shared) freelist.
1567 * A table of segment records is kept that records the upper and
1568 * lower extents of the segment; this is used during the conservative
1569 * phase of gc to identify probably gc roots (because they point
1570 * into valid segments at reasonable offsets). */
1573 * is true if the first segment was smaller than INIT_HEAP_SEG.
1574 * If scm_expmem is set to one, subsequent segment allocations will
1575 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1580 * is the lowest base address of any heap segment.
1582 SCM_CELLPTR scm_heap_org
;
1584 struct scm_heap_seg_data
* scm_heap_table
= 0;
1585 int scm_n_heap_segs
= 0;
1588 * is the total number of cells in heap segments.
1590 unsigned long scm_heap_size
= 0;
1593 * initializes a new heap segment and return the number of objects it contains.
1595 * The segment origin, segment size in bytes, and the span of objects
1596 * in cells are input parameters. The freelist is both input and output.
1598 * This function presume that the scm_heap_table has already been expanded
1599 * to accomodate a new segment record.
1604 init_heap_seg (seg_org
, size
, ncells
, freelistp
)
1605 SCM_CELLPTR seg_org
;
1610 register SCM_CELLPTR ptr
;
1611 #ifdef SCM_POINTERS_MUNGED
1612 register SCM scmptr
;
1617 SCM_CELLPTR seg_end
;
1621 if (seg_org
== NULL
)
1626 /* Compute the ceiling on valid object pointers w/in this segment.
1628 seg_end
= CELL_DN ((char *) ptr
+ size
);
1630 /* Find the right place and insert the segment record.
1633 for (new_seg_index
= 0;
1634 ( (new_seg_index
< scm_n_heap_segs
)
1635 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
));
1641 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
1642 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
1647 scm_heap_table
[new_seg_index
].valid
= 0;
1648 scm_heap_table
[new_seg_index
].ncells
= ncells
;
1649 scm_heap_table
[new_seg_index
].freelistp
= freelistp
;
1650 scm_heap_table
[new_seg_index
].bounds
[0] = (SCM_CELLPTR
)ptr
;
1651 scm_heap_table
[new_seg_index
].bounds
[1] = (SCM_CELLPTR
)seg_end
;
1654 /* Compute the least valid object pointer w/in this segment
1656 ptr
= CELL_UP (ptr
);
1659 n_new_objects
= seg_end
- ptr
;
1661 /* Prepend objects in this segment to the freelist.
1663 while (ptr
< seg_end
)
1665 #ifdef SCM_POINTERS_MUNGED
1666 scmptr
= PTR2SCM (ptr
);
1668 SCM_SETCAR (scmptr
, (SCM
) scm_tc_free_cell
);
1669 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ ncells
));
1675 /* Patch up the last freelist pointer in the segment
1676 * to join it to the input freelist.
1678 SCM_SETCDR (PTR2SCM (ptr
), *freelistp
);
1679 *freelistp
= PTR2SCM (CELL_UP (seg_org
));
1681 scm_heap_size
+= (ncells
* n_new_objects
);
1690 alloc_some_heap (ncells
, freelistp
)
1694 struct scm_heap_seg_data
* tmptable
;
1698 /* Critical code sections (such as the garbage collector)
1699 * aren't supposed to add heap segments.
1701 if (scm_gc_heap_lock
)
1702 scm_wta (SCM_UNDEFINED
, "need larger initial", "heap");
1704 /* Expand the heap tables to have room for the new segment.
1705 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1706 * only if the allocation of the segment itself succeeds.
1708 len
= (1 + scm_n_heap_segs
) * sizeof (struct scm_heap_seg_data
);
1710 SCM_SYSCALL (tmptable
= ((struct scm_heap_seg_data
*)
1711 realloc ((char *)scm_heap_table
, len
)));
1713 scm_wta (SCM_UNDEFINED
, "could not grow", "hplims");
1715 scm_heap_table
= tmptable
;
1718 /* Pick a size for the new heap segment.
1719 * The rule for picking the size of a segment is explained in
1724 len
= (scm_sizet
) (SCM_EXPHEAP (scm_heap_size
) * sizeof (scm_cell
));
1725 if ((scm_sizet
) (SCM_EXPHEAP (scm_heap_size
) * sizeof (scm_cell
)) != len
)
1729 len
= SCM_HEAP_SEG_SIZE
;
1734 smallest
= (ncells
* sizeof (scm_cell
));
1736 len
= (ncells
* sizeof (scm_cell
));
1738 /* Allocate with decaying ambition. */
1739 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
1740 && (len
>= smallest
))
1742 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (len
));
1745 init_heap_seg (ptr
, len
, ncells
, freelistp
);
1752 scm_wta (SCM_UNDEFINED
, "could not grow", "heap");
1757 SCM_PROC (s_unhash_name
, "unhash-name", 1, 0, 0, scm_unhash_name
);
1759 scm_unhash_name (name
)
1764 SCM_ASSERT (SCM_NIMP (name
) && SCM_SYMBOLP (name
), name
, SCM_ARG1
, s_unhash_name
);
1766 bound
= scm_n_heap_segs
;
1767 for (x
= 0; x
< bound
; ++x
)
1771 p
= (SCM_CELLPTR
)scm_heap_table
[x
].bounds
[0];
1772 pbound
= (SCM_CELLPTR
)scm_heap_table
[x
].bounds
[1];
1777 if (1 == (7 & (int)incar
))
1780 if ( ((name
== SCM_BOOL_T
) || (SCM_CAR (incar
) == name
))
1781 && (SCM_CDR (incar
) != 0)
1782 && (SCM_CDR (incar
) != 1))
1796 /* {GC Protection Helper Functions}
1807 scm_return_first (SCM elt
, ...)
1814 scm_permanent_object (obj
)
1818 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
1824 /* Protect OBJ from the garbage collector. OBJ will not be freed,
1825 even if all other references are dropped, until someone applies
1826 scm_unprotect_object to it. This function returns OBJ.
1828 Calls to scm_protect_object nest. For every object O, there is a
1829 counter which scm_protect_object(O) increments and
1830 scm_unprotect_object(O) decrements, if it is greater than zero. If
1831 an object's counter is greater than zero, the garbage collector
1834 Of course, that's not how it's implemented. scm_protect_object and
1835 scm_unprotect_object just maintain a list of references to things.
1836 Since the GC knows about this list, all objects it mentions stay
1837 alive. scm_protect_object adds its argument to the list;
1838 scm_unprotect_object removes the first occurrence of its argument
1841 scm_protect_object (obj
)
1844 scm_protects
= scm_cons (obj
, scm_protects
);
1850 /* Remove any protection for OBJ established by a prior call to
1851 scm_protect_object. This function returns OBJ.
1853 See scm_protect_object for more information. */
1855 scm_unprotect_object (obj
)
1858 SCM
*tail_ptr
= &scm_protects
;
1860 while (SCM_NIMP (*tail_ptr
) && SCM_CONSP (*tail_ptr
))
1861 if (SCM_CAR (*tail_ptr
) == obj
)
1863 *tail_ptr
= SCM_CDR (*tail_ptr
);
1867 tail_ptr
= SCM_CDRLOC (*tail_ptr
);
1874 /* called on process termination. */
1880 extern int on_exit (void (*procp
) (), int arg
);
1883 cleanup (int status
, void *arg
)
1885 #error Dont know how to setup a cleanup handler on your system.
1890 scm_flush_all_ports ();
1895 scm_init_storage (scm_sizet init_heap_size
)
1899 j
= SCM_NUM_PROTECTS
;
1901 scm_sys_protects
[--j
] = SCM_BOOL_F
;
1903 scm_freelist
= SCM_EOL
;
1906 j
= SCM_HEAP_SEG_SIZE
;
1907 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
1908 scm_heap_table
= ((struct scm_heap_seg_data
*)
1909 scm_must_malloc (sizeof (struct scm_heap_seg_data
), "hplims"));
1910 if (0L == init_heap_size
)
1911 init_heap_size
= SCM_INIT_HEAP_SIZE
;
1913 if ((init_heap_size
!= j
)
1914 || !init_heap_seg ((SCM_CELLPTR
) malloc (j
), j
, 1, &scm_freelist
))
1916 j
= SCM_HEAP_SEG_SIZE
;
1917 if (!init_heap_seg ((SCM_CELLPTR
) malloc (j
), j
, 1, &scm_freelist
))
1922 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0]);
1923 /* scm_hplims[0] can change. do not remove scm_heap_org */
1924 scm_weak_vectors
= SCM_EOL
;
1926 /* Initialise the list of ports. */
1927 scm_port_table
= (scm_port
**)
1928 malloc (sizeof (scm_port
*) * scm_port_table_room
);
1929 if (!scm_port_table
)
1936 on_exit (cleanup
, 0);
1940 scm_undefineds
= scm_cons (SCM_UNDEFINED
, SCM_EOL
);
1941 SCM_SETCDR (scm_undefineds
, scm_undefineds
);
1943 scm_listofnull
= scm_cons (SCM_EOL
, SCM_EOL
);
1944 scm_nullstr
= scm_makstr (0L, 0);
1945 scm_nullvect
= scm_make_vector (SCM_INUM0
, SCM_UNDEFINED
);
1946 scm_symhash
= scm_make_vector ((SCM
) SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
1947 scm_weak_symhash
= scm_make_weak_key_hash_table ((SCM
) SCM_MAKINUM (scm_symhash_dim
));
1948 scm_symhash_vars
= scm_make_vector ((SCM
) SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
1949 scm_stand_in_procs
= SCM_EOL
;
1950 scm_permobjs
= SCM_EOL
;
1951 scm_protects
= SCM_EOL
;
1952 scm_asyncs
= SCM_EOL
;
1953 scm_sysintern ("most-positive-fixnum", (SCM
) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM
));
1954 scm_sysintern ("most-negative-fixnum", (SCM
) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM
));
1956 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD
));