1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
53 #include "guardians.h"
71 #define var_start(x, y) va_start(x, y)
74 #define var_start(x, y) va_start(x)
78 /* {heap tuning parameters}
80 * These are parameters for controlling memory allocation. The heap
81 * is the area out of which scm_cons, and object headers are allocated.
83 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
84 * 64 bit machine. The units of the _SIZE parameters are bytes.
85 * Cons pairs and object headers occupy one heap cell.
87 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
88 * allocated initially the heap will grow by half its current size
89 * each subsequent time more heap is needed.
91 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
92 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
93 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
94 * is in scm_init_storage() and alloc_some_heap() in sys.c
96 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
97 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
99 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
102 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
105 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
106 * reclaimed by a GC triggered by must_malloc. If less than this is
107 * reclaimed, the trigger threshold is raised. [I don't know what a
108 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
109 * work around a oscillation that caused almost constant GC.]
112 #define SCM_INIT_HEAP_SIZE (40000L * sizeof (scm_cell))
113 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
115 # define SCM_HEAP_SEG_SIZE 32768L
118 # define SCM_HEAP_SEG_SIZE (7000L*sizeof(scm_cell))
120 # define SCM_HEAP_SEG_SIZE (16384L*sizeof(scm_cell))
123 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
124 #define SCM_INIT_MALLOC_LIMIT 100000
125 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
127 #define SCM_GC_TRIGGER 10000
128 #define SCM_GC_TRIGGER2 10000
130 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
131 bounds for allocated storage */
134 /*in 386 protected mode we must only adjust the offset */
135 # define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
136 # define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
139 # define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
140 # define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
142 # define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
143 # define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
152 #ifdef GUILE_NEW_GC_SCHEME
153 SCM scm_freelist
= SCM_EOL
;
154 scm_freelist_t scm_master_freelist
= {
155 SCM_EOL
, 0, SCM_EOL
, SCM_EOL
, 0, 0, 1, 0, 0
157 SCM scm_freelist2
= SCM_EOL
;
158 scm_freelist_t scm_master_freelist2
= {
159 SCM_EOL
, 0, SCM_EOL
, SCM_EOL
, 0, 0, 2, 0, 0
162 scm_freelist_t scm_freelist
= { SCM_EOL
, 1, 0, 0 };
163 scm_freelist_t scm_freelist2
= { SCM_EOL
, 2, 0, 0 };
167 * is the number of bytes of must_malloc allocation needed to trigger gc.
169 unsigned long scm_mtrigger
;
173 * If set, don't expand the heap. Set only during gc, during which no allocation
174 * is supposed to take place anyway.
176 int scm_gc_heap_lock
= 0;
179 * Don't pause for collection if this is set -- just
183 int scm_block_gc
= 1;
185 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
186 * collection (GC) more space is allocated for the heap.
188 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
190 /* During collection, this accumulates objects holding
193 SCM scm_weak_vectors
;
195 /* GC Statistics Keeping
197 unsigned long scm_cells_allocated
= 0;
198 long scm_mallocated
= 0;
199 /* unsigned long scm_gc_cells_collected; */
200 unsigned long scm_gc_malloc_collected
;
201 unsigned long scm_gc_ports_collected
;
202 unsigned long scm_gc_rt
;
203 unsigned long scm_gc_time_taken
= 0;
205 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
206 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
207 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
208 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
209 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
210 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
213 struct scm_heap_seg_data
215 /* lower and upper bounds of the segment */
216 SCM_CELLPTR bounds
[2];
218 /* address of the head-of-freelist pointer for this segment's cells.
219 All segments usually point to the same one, scm_freelist. */
220 scm_freelist_t
*freelistp
;
222 /* number of SCM words per object in this segment */
225 /* If SEG_DATA->valid is non-zero, the conservative marking
226 functions will apply SEG_DATA->valid to the purported pointer and
227 SEG_DATA, and mark the object iff the function returns non-zero.
228 At the moment, I don't think anyone uses this. */
235 static void scm_mark_weak_vector_spines (void);
236 static scm_sizet
init_heap_seg (SCM_CELLPTR
, scm_sizet
, scm_freelist_t
*);
237 static void alloc_some_heap (scm_freelist_t
*);
241 /* Debugging functions. */
243 #ifdef GUILE_DEBUG_FREELIST
245 /* Return the number of the heap segment containing CELL. */
251 for (i
= 0; i
< scm_n_heap_segs
; i
++)
252 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], (SCM_CELLPTR
) cell
)
253 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], (SCM_CELLPTR
) cell
))
255 fprintf (stderr
, "which_seg: can't find segment containing cell %lx\n",
262 map_free_list (scm_freelist_t
*freelistp
)
264 int last_seg
= -1, count
= 0;
267 for (f
= freelistp
->cells
; SCM_NIMP (f
); f
= SCM_CDR (f
))
269 int this_seg
= which_seg (f
);
271 if (this_seg
!= last_seg
)
274 fprintf (stderr
, " %5d %d-cells in segment %d\n",
275 count
, freelistp
->span
, last_seg
);
282 fprintf (stderr
, " %5d %d-cells in segment %d\n",
283 count
, freelistp
->span
, last_seg
);
286 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
288 "Print debugging information about the free-list.\n"
289 "`map-free-list' is only included in GUILE_DEBUG_FREELIST builds of Guile.")
290 #define FUNC_NAME s_scm_map_free_list
292 fprintf (stderr
, "%d segments total\n", scm_n_heap_segs
);
293 #ifdef GUILE_NEW_GC_SCHEME
294 map_free_list (&scm_master_freelist
);
295 map_free_list (&scm_master_freelist2
);
297 map_free_list (&scm_freelist
);
298 map_free_list (&scm_freelist2
);
302 return SCM_UNSPECIFIED
;
307 /* Number of calls to SCM_NEWCELL since startup. */
308 static unsigned long scm_newcell_count
;
309 static unsigned long scm_newcell2_count
;
311 /* Search freelist for anything that isn't marked as a free cell.
312 Abort if we find something. */
314 scm_check_freelist (scm_freelist_t
*freelistp
)
319 for (f
= freelistp
->cells
; SCM_NIMP (f
); f
= SCM_CDR (f
), i
++)
320 if (SCM_CAR (f
) != (SCM
) scm_tc_free_cell
)
322 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
323 scm_newcell_count
, i
);
329 static int scm_debug_check_freelist
= 0;
331 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
333 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
334 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
335 "compile-time flag was selected.\n")
336 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
338 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
339 return SCM_UNSPECIFIED
;
344 #ifdef GUILE_NEW_GC_SCHEME
347 scm_debug_newcell (void)
352 if (scm_debug_check_freelist
)
354 scm_check_freelist (&scm_master_freelist
);
358 /* The rest of this is supposed to be identical to the SCM_NEWCELL
360 if (SCM_IMP (scm_freelist
))
361 new = scm_gc_for_newcell (&scm_master_freelist
, &scm_freelist
);
365 scm_freelist
= SCM_CDR (scm_freelist
);
366 SCM_SETCAR (new, scm_tc16_allocated
);
373 scm_debug_newcell2 (void)
377 scm_newcell2_count
++;
378 if (scm_debug_check_freelist
)
380 scm_check_freelist (&scm_master_freelist2
);
384 /* The rest of this is supposed to be identical to the SCM_NEWCELL
386 if (SCM_IMP (scm_freelist2
))
387 new = scm_gc_for_newcell (&scm_master_freelist2
, &scm_freelist2
);
391 scm_freelist2
= SCM_CDR (scm_freelist2
);
392 SCM_SETCAR (new, scm_tc16_allocated
);
398 #else /* GUILE_NEW_GC_SCHEME */
401 scm_debug_newcell (void)
406 if (scm_debug_check_freelist
)
408 scm_check_freelist (&scm_freelist
);
412 /* The rest of this is supposed to be identical to the SCM_NEWCELL
414 if (SCM_IMP (scm_freelist
.cells
))
415 new = scm_gc_for_newcell (&scm_freelist
);
418 new = scm_freelist
.cells
;
419 scm_freelist
.cells
= SCM_CDR (scm_freelist
.cells
);
420 SCM_SETCAR (new, scm_tc16_allocated
);
421 ++scm_cells_allocated
;
428 scm_debug_newcell2 (void)
432 scm_newcell2_count
++;
433 if (scm_debug_check_freelist
) {
434 scm_check_freelist (&scm_freelist2
);
438 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
440 if (SCM_IMP (scm_freelist2
.cells
))
441 new = scm_gc_for_newcell (&scm_freelist2
);
444 new = scm_freelist2
.cells
;
445 scm_freelist2
.cells
= SCM_CDR (scm_freelist2
.cells
);
446 SCM_SETCAR (new, scm_tc16_allocated
);
447 scm_cells_allocated
+= 2;
453 #endif /* GUILE_NEW_GC_SCHEME */
454 #endif /* GUILE_DEBUG_FREELIST */
458 /* {Scheme Interface to GC}
461 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
463 "Returns an association list of statistics about Guile's current use of storage. ")
464 #define FUNC_NAME s_scm_gc_stats
469 long int local_scm_mtrigger
;
470 long int local_scm_mallocated
;
471 long int local_scm_heap_size
;
472 long int local_scm_cells_allocated
;
473 long int local_scm_gc_time_taken
;
481 for (i
= scm_n_heap_segs
; i
--; )
482 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
483 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
485 if (scm_n_heap_segs
!= n
)
490 local_scm_mtrigger
= scm_mtrigger
;
491 local_scm_mallocated
= scm_mallocated
;
492 #ifdef GUILE_NEW_GC_SCHEME
493 local_scm_heap_size
= scm_master_freelist
.heap_size
; /*fixme*/
495 local_scm_heap_size
= scm_freelist
.heap_size
; /*fixme*/
497 local_scm_cells_allocated
= scm_cells_allocated
;
498 local_scm_gc_time_taken
= scm_gc_time_taken
;
500 answer
= scm_listify (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
501 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
502 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
503 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
504 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
505 scm_cons (sym_heap_segments
, heap_segs
),
514 scm_gc_start (const char *what
)
516 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ());
517 /* scm_gc_cells_collected = 0; */
518 scm_gc_malloc_collected
= 0;
519 scm_gc_ports_collected
= 0;
525 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt
;
526 scm_gc_time_taken
+= scm_gc_rt
;
527 scm_system_async_mark (scm_gc_async
);
531 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
533 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
534 "returned by this function for @var{obj}")
535 #define FUNC_NAME s_scm_object_address
537 return scm_ulong2num ((unsigned long) obj
);
542 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
544 "Scans all of SCM objects and reclaims for further use those that are\n"
545 "no longer accessible.")
546 #define FUNC_NAME s_scm_gc
551 return SCM_UNSPECIFIED
;
557 /* {C Interface For When GC is Triggered}
560 #ifdef GUILE_NEW_GC_SCHEME
562 /* When we get POSIX threads support, the master will be global and
563 common while the freelist will be individual for each thread. */
566 scm_gc_for_newcell (scm_freelist_t
*master
, SCM
*freelist
)
570 if (master
->triggeredp
)
572 else if (SCM_NULLP (master
->clusters
))
573 alloc_some_heap (master
);
574 else if (SCM_NULLP (SCM_CDR (master
->clusters
)))
575 /* we are satisfied; GC instead of alloc next time around */
576 master
->triggeredp
= 1;
578 cell
= SCM_CAR (master
->clusters
);
579 master
->clusters
= SCM_CDR (master
->clusters
);
580 *freelist
= SCM_CDR (cell
);
581 SCM_SETCAR (cell
, scm_tc16_allocated
);
585 #else /* GUILE_NEW_GC_SCHEME */
588 scm_gc_for_alloc (scm_freelist_t
*freelistp
)
592 #ifdef GUILE_DEBUG_FREELIST
593 fprintf (stderr
, "Collected: %d, min_yield: %d\n",
594 freelistp
->collected
, MIN_GC_YIELD (freelistp
));
596 if ((freelistp
->collected
< MIN_GC_YIELD (freelistp
))
597 || SCM_IMP (freelistp
->cells
))
598 alloc_some_heap (freelistp
);
604 scm_gc_for_newcell (scm_freelist_t
*freelistp
)
607 scm_gc_for_alloc (freelistp
);
608 fl
= freelistp
->cells
;
609 freelistp
->cells
= SCM_CDR (fl
);
610 SCM_SETCAR (fl
, scm_tc16_allocated
);
614 #endif /* GUILE_NEW_GC_SCHEME */
617 scm_igc (const char *what
)
622 /* During the critical section, only the current thread may run. */
623 SCM_THREAD_CRITICAL_SECTION_START
;
626 /* fprintf (stderr, "gc: %s\n", what); */
630 if (!scm_stack_base
|| scm_block_gc
)
636 if (scm_mallocated
< 0)
637 /* The byte count of allocated objects has underflowed. This is
638 probably because you forgot to report the sizes of objects you
639 have allocated, by calling scm_done_malloc or some such. When
640 the GC freed them, it subtracted their size from
641 scm_mallocated, which underflowed. */
644 if (scm_gc_heap_lock
)
645 /* We've invoked the collector while a GC is already in progress.
646 That should never happen. */
651 scm_weak_vectors
= SCM_EOL
;
653 scm_guardian_gc_init ();
655 /* unprotect any struct types with no instances */
661 pos
= &scm_type_obj_list
;
662 type_list
= scm_type_obj_list
;
663 while (type_list
!= SCM_EOL
)
664 if (SCM_VELTS (SCM_CAR (type_list
))[scm_struct_i_refcnt
])
666 pos
= SCM_CDRLOC (type_list
);
667 type_list
= SCM_CDR (type_list
);
671 *pos
= SCM_CDR (type_list
);
672 type_list
= SCM_CDR (type_list
);
677 /* flush dead entries from the continuation stack */
682 elts
= SCM_VELTS (scm_continuation_stack
);
683 bound
= SCM_LENGTH (scm_continuation_stack
);
684 x
= SCM_INUM (scm_continuation_stack_ptr
);
687 elts
[x
] = SCM_BOOL_F
;
694 /* Protect from the C stack. This must be the first marking
695 * done because it provides information about what objects
696 * are "in-use" by the C code. "in-use" objects are those
697 * for which the values from SCM_LENGTH and SCM_CHARS must remain
698 * usable. This requirement is stricter than a liveness
699 * requirement -- in particular, it constrains the implementation
700 * of scm_vector_set_length_x.
702 SCM_FLUSH_REGISTER_WINDOWS
;
703 /* This assumes that all registers are saved into the jmp_buf */
704 setjmp (scm_save_regs_gc_mark
);
705 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
706 ( (scm_sizet
) (sizeof (SCM_STACKITEM
) - 1 +
707 sizeof scm_save_regs_gc_mark
)
708 / sizeof (SCM_STACKITEM
)));
711 /* stack_len is long rather than scm_sizet in order to guarantee that
712 &stack_len is long aligned */
713 #ifdef SCM_STACK_GROWS_UP
715 long stack_len
= (SCM_STACKITEM
*) (&stack_len
) - scm_stack_base
;
717 long stack_len
= scm_stack_size (scm_stack_base
);
719 scm_mark_locations (scm_stack_base
, (scm_sizet
) stack_len
);
722 long stack_len
= scm_stack_base
- (SCM_STACKITEM
*) (&stack_len
);
724 long stack_len
= scm_stack_size (scm_stack_base
);
726 scm_mark_locations ((scm_stack_base
- stack_len
), (scm_sizet
) stack_len
);
730 #else /* USE_THREADS */
732 /* Mark every thread's stack and registers */
733 scm_threads_mark_stacks ();
735 #endif /* USE_THREADS */
737 /* FIXME: insert a phase to un-protect string-data preserved
738 * in scm_vector_set_length_x.
741 j
= SCM_NUM_PROTECTS
;
743 scm_gc_mark (scm_sys_protects
[j
]);
745 /* FIXME: we should have a means to register C functions to be run
746 * in different phases of GC
748 scm_mark_subr_table ();
751 scm_gc_mark (scm_root
->handle
);
754 scm_mark_weak_vector_spines ();
756 scm_guardian_zombify ();
764 SCM_THREAD_CRITICAL_SECTION_END
;
774 /* Mark an object precisely.
789 if (SCM_NCELLP (ptr
))
790 scm_wta (ptr
, "rogue pointer in heap", NULL
);
792 switch (SCM_TYP7 (ptr
))
794 case scm_tcs_cons_nimcar
:
795 if (SCM_GCMARKP (ptr
))
798 if (SCM_IMP (SCM_CDR (ptr
))) /* SCM_IMP works even with a GC mark */
803 scm_gc_mark (SCM_CAR (ptr
));
804 ptr
= SCM_GCCDR (ptr
);
806 case scm_tcs_cons_imcar
:
807 if (SCM_GCMARKP (ptr
))
810 ptr
= SCM_GCCDR (ptr
);
813 if (SCM_GCMARKP (ptr
))
816 scm_gc_mark (SCM_CELL_WORD (ptr
, 2));
817 ptr
= SCM_GCCDR (ptr
);
819 case scm_tcs_cons_gloc
:
820 if (SCM_GCMARKP (ptr
))
825 vcell
= SCM_CAR (ptr
) - 1L;
826 switch (SCM_UNPACK (SCM_CDR (vcell
)))
830 ptr
= SCM_GCCDR (ptr
);
842 vtable_data
= (SCM
*)vcell
;
843 layout
= vtable_data
[scm_vtable_index_layout
];
844 len
= SCM_LENGTH (layout
);
845 fields_desc
= SCM_CHARS (layout
);
846 /* We're using SCM_GCCDR here like STRUCT_DATA, except
847 that it removes the mark */
848 mem
= (SCM
*)SCM_GCCDR (ptr
);
850 if (SCM_UNPACK (vtable_data
[scm_struct_i_flags
]) & SCM_STRUCTF_ENTITY
)
852 scm_gc_mark (mem
[scm_struct_i_procedure
]);
853 scm_gc_mark (mem
[scm_struct_i_setter
]);
857 for (x
= 0; x
< len
- 2; x
+= 2, ++mem
)
858 if (fields_desc
[x
] == 'p')
860 if (fields_desc
[x
] == 'p')
863 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
864 for (j
= (long int) *mem
; x
; --x
)
865 scm_gc_mark (*++mem
);
870 if (!SCM_CDR (vcell
))
872 SCM_SETGCMARK (vcell
);
873 ptr
= vtable_data
[scm_vtable_index_vtable
];
880 case scm_tcs_closures
:
881 if (SCM_GCMARKP (ptr
))
884 if (SCM_IMP (SCM_CDR (ptr
)))
886 ptr
= SCM_CLOSCAR (ptr
);
889 scm_gc_mark (SCM_CLOSCAR (ptr
));
890 ptr
= SCM_GCCDR (ptr
);
893 case scm_tc7_lvector
:
897 if (SCM_GC8MARKP (ptr
))
899 SCM_SETGC8MARK (ptr
);
900 i
= SCM_LENGTH (ptr
);
904 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
905 scm_gc_mark (SCM_VELTS (ptr
)[i
]);
906 ptr
= SCM_VELTS (ptr
)[0];
911 SCM_SETGC8MARK (ptr
);
913 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr
),
916 (sizeof (SCM_STACKITEM
) + -1 +
917 sizeof (scm_contregs
)) /
918 sizeof (SCM_STACKITEM
)));
929 #ifdef HAVE_LONG_LONGS
934 SCM_SETGC8MARK (ptr
);
937 case scm_tc7_substring
:
938 if (SCM_GC8MARKP(ptr
))
940 SCM_SETGC8MARK (ptr
);
945 if (SCM_GC8MARKP(ptr
))
947 SCM_WVECT_GC_CHAIN (ptr
) = scm_weak_vectors
;
948 scm_weak_vectors
= ptr
;
949 SCM_SETGC8MARK (ptr
);
950 if (SCM_IS_WHVEC_ANY (ptr
))
957 len
= SCM_LENGTH (ptr
);
958 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
959 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
961 for (x
= 0; x
< len
; ++x
)
964 alist
= SCM_VELTS (ptr
)[x
];
966 /* mark everything on the alist except the keys or
967 * values, according to weak_values and weak_keys. */
968 while ( SCM_CONSP (alist
)
969 && !SCM_GCMARKP (alist
)
970 && SCM_CONSP (SCM_CAR (alist
)))
975 kvpair
= SCM_CAR (alist
);
976 next_alist
= SCM_CDR (alist
);
979 * SCM_SETGCMARK (alist);
980 * SCM_SETGCMARK (kvpair);
982 * It may be that either the key or value is protected by
983 * an escaped reference to part of the spine of this alist.
984 * If we mark the spine here, and only mark one or neither of the
985 * key and value, they may never be properly marked.
986 * This leads to a horrible situation in which an alist containing
987 * freelist cells is exported.
989 * So only mark the spines of these arrays last of all marking.
990 * If somebody confuses us by constructing a weak vector
991 * with a circular alist then we are hosed, but at least we
992 * won't prematurely drop table entries.
995 scm_gc_mark (SCM_CAR (kvpair
));
997 scm_gc_mark (SCM_GCCDR (kvpair
));
1000 if (SCM_NIMP (alist
))
1001 scm_gc_mark (alist
);
1006 case scm_tc7_msymbol
:
1007 if (SCM_GC8MARKP(ptr
))
1009 SCM_SETGC8MARK (ptr
);
1010 scm_gc_mark (SCM_SYMBOL_FUNC (ptr
));
1011 ptr
= SCM_SYMBOL_PROPS (ptr
);
1013 case scm_tc7_ssymbol
:
1014 if (SCM_GC8MARKP(ptr
))
1016 SCM_SETGC8MARK (ptr
);
1021 i
= SCM_PTOBNUM (ptr
);
1022 if (!(i
< scm_numptob
))
1024 if (SCM_GC8MARKP (ptr
))
1026 SCM_SETGC8MARK (ptr
);
1027 if (SCM_PTAB_ENTRY(ptr
))
1028 scm_gc_mark (SCM_PTAB_ENTRY(ptr
)->file_name
);
1029 if (scm_ptobs
[i
].mark
)
1031 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1038 if (SCM_GC8MARKP (ptr
))
1040 SCM_SETGC8MARK (ptr
);
1041 switch (SCM_GCTYP16 (ptr
))
1042 { /* should be faster than going through scm_smobs */
1043 case scm_tc_free_cell
:
1044 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1045 case scm_tc16_allocated
:
1048 case scm_tc16_complex
:
1051 i
= SCM_SMOBNUM (ptr
);
1052 if (!(i
< scm_numsmob
))
1054 if (scm_smobs
[i
].mark
)
1056 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1064 def
:scm_wta (ptr
, "unknown type in ", "gc_mark");
1069 /* Mark a Region Conservatively
1073 scm_mark_locations (SCM_STACKITEM x
[], scm_sizet n
)
1075 register long m
= n
;
1077 register SCM_CELLPTR ptr
;
1080 if (SCM_CELLP (*(SCM
**) (& x
[m
])))
1082 ptr
= (SCM_CELLPTR
) SCM2PTR ((*(SCM
**) & x
[m
]));
1084 j
= scm_n_heap_segs
- 1;
1085 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1086 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1093 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
1095 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1103 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1107 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1112 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1116 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1122 if ( !scm_heap_table
[seg_id
].valid
1123 || scm_heap_table
[seg_id
].valid (ptr
,
1124 &scm_heap_table
[seg_id
]))
1125 scm_gc_mark (*(SCM
*) & x
[m
]);
1134 /* The following is a C predicate which determines if an SCM value can be
1135 regarded as a pointer to a cell on the heap. The code is duplicated
1136 from scm_mark_locations. */
1140 scm_cellp (SCM value
)
1143 register SCM_CELLPTR ptr
;
1145 if SCM_CELLP (*(SCM
**) (& value
))
1147 ptr
= (SCM_CELLPTR
) SCM2PTR ((*(SCM
**) & value
));
1149 j
= scm_n_heap_segs
- 1;
1150 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1151 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1158 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
1160 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1168 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1172 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1177 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1181 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1187 if ( !scm_heap_table
[seg_id
].valid
1188 || scm_heap_table
[seg_id
].valid (ptr
,
1189 &scm_heap_table
[seg_id
]))
1201 scm_mark_weak_vector_spines ()
1205 for (w
= scm_weak_vectors
; w
!= SCM_EOL
; w
= SCM_WVECT_GC_CHAIN (w
))
1207 if (SCM_IS_WHVEC_ANY (w
))
1215 ptr
= SCM_VELTS (w
);
1217 for (j
= 0; j
< n
; ++j
)
1222 while ( SCM_CONSP (alist
)
1223 && !SCM_GCMARKP (alist
)
1224 && SCM_CONSP (SCM_CAR (alist
)))
1226 SCM_SETGCMARK (alist
);
1227 SCM_SETGCMARK (SCM_CAR (alist
));
1228 alist
= SCM_GCCDR (alist
);
1240 register SCM_CELLPTR ptr
;
1241 #ifdef SCM_POINTERS_MUNGED
1242 register SCM scmptr
;
1245 #define scmptr (SCM)ptr
1247 register SCM nfreelist
;
1248 register scm_freelist_t
*hp_freelist
;
1251 #ifdef GUILE_NEW_GC_SCHEME
1259 #ifdef GUILE_NEW_GC_SCHEME
1260 /* Reset all free list pointers. We'll reconstruct them completely
1262 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1264 scm_heap_table
[i
].freelistp
->cells
= SCM_EOL
;
1265 scm_heap_table
[i
].freelistp
->n_objects
1266 = scm_heap_table
[i
].freelistp
->gc_trigger
;
1267 scm_heap_table
[i
].freelistp
->clusters
= SCM_EOL
;
1268 scm_heap_table
[i
].freelistp
->clustertail
1269 = &scm_heap_table
[i
].freelistp
->clusters
;
1270 scm_heap_table
[i
].freelistp
->triggeredp
= 0;
1273 /* Reset all free list pointers. We'll reconstruct them completely
1275 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1276 scm_heap_table
[i
].freelistp
->cells
= SCM_EOL
;
1279 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1281 register scm_sizet n
= 0;
1282 register scm_sizet j
;
1284 /* Unmarked cells go onto the front of the freelist this heap
1285 segment points to. Rather than updating the real freelist
1286 pointer as we go along, we accumulate the new head in
1287 nfreelist. Then, if it turns out that the entire segment is
1288 free, we free (i.e., malloc's free) the whole segment, and
1289 simply don't assign nfreelist back into the real freelist. */
1290 hp_freelist
= scm_heap_table
[i
].freelistp
;
1291 nfreelist
= hp_freelist
->cells
;
1292 #ifdef GUILE_NEW_GC_SCHEME
1293 n_objects
= hp_freelist
->n_objects
;
1295 span
= scm_heap_table
[i
].span
;
1296 hp_freelist
->collected
= 0;
1298 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0]);
1299 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1]) - ptr
;
1300 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1302 #ifdef SCM_POINTERS_MUNGED
1303 scmptr
= PTR2SCM (ptr
);
1305 switch SCM_TYP7 (scmptr
)
1307 case scm_tcs_cons_gloc
:
1308 if (SCM_GCMARKP (scmptr
))
1310 if (SCM_CDR (SCM_CAR (scmptr
) - 1) == (SCM
)1)
1311 SCM_SETCDR (SCM_CAR (scmptr
) - 1, (SCM
) 0);
1316 vcell
= SCM_CAR (scmptr
) - 1L;
1318 if ((SCM_CDR (vcell
) == 0) || (SCM_UNPACK (SCM_CDR (vcell
)) == 1))
1320 scm_struct_free_t free
1321 = (scm_struct_free_t
) ((SCM
*) vcell
)[scm_struct_i_free
];
1322 m
+= free ((SCM
*) vcell
, (SCM
*) SCM_GCCDR (scmptr
));
1326 case scm_tcs_cons_imcar
:
1327 case scm_tcs_cons_nimcar
:
1328 case scm_tcs_closures
:
1330 if (SCM_GCMARKP (scmptr
))
1334 if (SCM_GC8MARKP (scmptr
))
1340 m
+= (2 + SCM_LENGTH (scmptr
)) * sizeof (SCM
);
1341 scm_must_free ((char *)(SCM_VELTS (scmptr
) - 2));
1345 case scm_tc7_vector
:
1346 case scm_tc7_lvector
:
1350 if (SCM_GC8MARKP (scmptr
))
1353 m
+= (SCM_LENGTH (scmptr
) * sizeof (SCM
));
1355 scm_must_free (SCM_CHARS (scmptr
));
1356 /* SCM_SETCHARS(scmptr, 0);*/
1360 if SCM_GC8MARKP (scmptr
)
1362 m
+= sizeof (long) * ((SCM_HUGE_LENGTH (scmptr
) + SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1364 case scm_tc7_byvect
:
1365 if SCM_GC8MARKP (scmptr
)
1367 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (char);
1371 if SCM_GC8MARKP (scmptr
)
1373 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long);
1376 if SCM_GC8MARKP (scmptr
)
1378 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (short);
1380 #ifdef HAVE_LONG_LONGS
1381 case scm_tc7_llvect
:
1382 if SCM_GC8MARKP (scmptr
)
1384 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long_long
);
1388 if SCM_GC8MARKP (scmptr
)
1390 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (float);
1393 if SCM_GC8MARKP (scmptr
)
1395 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (double);
1398 if SCM_GC8MARKP (scmptr
)
1400 m
+= SCM_HUGE_LENGTH (scmptr
) * 2 * sizeof (double);
1403 case scm_tc7_substring
:
1404 if (SCM_GC8MARKP (scmptr
))
1407 case scm_tc7_string
:
1408 if (SCM_GC8MARKP (scmptr
))
1410 m
+= SCM_HUGE_LENGTH (scmptr
) + 1;
1412 case scm_tc7_msymbol
:
1413 if (SCM_GC8MARKP (scmptr
))
1415 m
+= ( SCM_LENGTH (scmptr
)
1417 + sizeof (SCM
) * ((SCM
*)SCM_CHARS (scmptr
) - SCM_SLOTS(scmptr
)));
1418 scm_must_free ((char *)SCM_SLOTS (scmptr
));
1420 case scm_tc7_contin
:
1421 if SCM_GC8MARKP (scmptr
)
1423 m
+= SCM_LENGTH (scmptr
) * sizeof (SCM_STACKITEM
) + sizeof (scm_contregs
);
1424 if (SCM_VELTS (scmptr
))
1426 case scm_tc7_ssymbol
:
1427 if SCM_GC8MARKP(scmptr
)
1433 if SCM_GC8MARKP (scmptr
)
1435 if SCM_OPENP (scmptr
)
1437 int k
= SCM_PTOBNUM (scmptr
);
1438 if (!(k
< scm_numptob
))
1440 /* Keep "revealed" ports alive. */
1441 if (scm_revealed_count (scmptr
) > 0)
1443 /* Yes, I really do mean scm_ptobs[k].free */
1444 /* rather than ftobs[k].close. .close */
1445 /* is for explicit CLOSE-PORT by user */
1446 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1447 SCM_SETSTREAM (scmptr
, 0);
1448 scm_remove_from_port_table (scmptr
);
1449 scm_gc_ports_collected
++;
1450 SCM_SETAND_CAR (scmptr
, ~SCM_OPN
);
1454 switch SCM_GCTYP16 (scmptr
)
1456 case scm_tc_free_cell
:
1458 if SCM_GC8MARKP (scmptr
)
1463 if SCM_GC8MARKP (scmptr
)
1465 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1467 #endif /* def SCM_BIGDIG */
1468 case scm_tc16_complex
:
1469 if SCM_GC8MARKP (scmptr
)
1471 m
+= 2 * sizeof (double);
1474 if SCM_GC8MARKP (scmptr
)
1479 k
= SCM_SMOBNUM (scmptr
);
1480 if (!(k
< scm_numsmob
))
1482 m
+= (scm_smobs
[k
].free
) ((SCM
) scmptr
);
1488 sweeperr
:scm_wta (scmptr
, "unknown type in ", "gc_sweep");
1491 if (SCM_CAR (scmptr
) == (SCM
) scm_tc_free_cell
)
1494 #ifndef GUILE_NEW_GC_SCHEME
1497 if (--n_objects
< 0)
1499 SCM_SETCAR (scmptr
, nfreelist
);
1500 *hp_freelist
->clustertail
= scmptr
;
1501 hp_freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1503 nfreelist
= SCM_EOL
;
1504 n_objects
= hp_freelist
->gc_trigger
;
1505 n
+= span
* (n_objects
+ 1);
1510 /* Stick the new cell on the front of nfreelist. It's
1511 critical that we mark this cell as freed; otherwise, the
1512 conservative collector might trace it as some other type
1514 SCM_SETCAR (scmptr
, scm_tc_free_cell
);
1515 SCM_SETCDR (scmptr
, nfreelist
);
1521 SCM_CLRGC8MARK (scmptr
);
1524 SCM_CLRGCMARK (scmptr
);
1526 #ifdef GC_FREE_SEGMENTS
1531 hp_freelist
->heap_size
-= seg_size
;
1532 free ((char *) scm_heap_table
[i
].bounds
[0]);
1533 scm_heap_table
[i
].bounds
[0] = 0;
1534 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1535 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1536 scm_n_heap_segs
-= 1;
1537 i
--; /* We need to scan the segment just moved. */
1540 #endif /* ifdef GC_FREE_SEGMENTS */
1542 /* Update the real freelist pointer to point to the head of
1543 the list of free cells we've built for this segment. */
1544 hp_freelist
->cells
= nfreelist
;
1545 #ifdef GUILE_NEW_GC_SCHEME
1546 hp_freelist
->n_objects
= n_objects
;
1550 #ifdef GUILE_NEW_GC_SCHEME
1551 j
= span
* (hp_freelist
->gc_trigger
- n_objects
);
1552 /* sum up---if this is last turn for this freelist */
1553 hp_freelist
->collected
+= n
+ j
;
1554 n
-= j
; /* compensate for the sum up */
1556 hp_freelist
->collected
+= n
;
1558 scm_cells_allocated
+= hp_freelist
->heap_size
- hp_freelist
->collected
;
1560 #ifdef GUILE_DEBUG_FREELIST
1561 scm_check_freelist (hp_freelist
);
1562 scm_map_free_list ();
1566 #ifdef GUILE_NEW_GC_SCHEME
1567 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1569 scm_freelist_t
*hp_freelist
= scm_heap_table
[i
].freelistp
;
1570 if (hp_freelist
->gc_trigger
- hp_freelist
->n_objects
> 1)
1572 SCM c
= hp_freelist
->cells
;
1573 SCM_SETCAR (c
, SCM_CDR (c
));
1574 SCM_SETCDR (c
, SCM_EOL
);
1575 *hp_freelist
->clustertail
= c
;
1576 hp_freelist
->n_objects
= hp_freelist
->gc_trigger
;
1579 *hp_freelist
->clustertail
= SCM_EOL
;
1581 fprintf (stderr
, "%d:%d: ",
1582 i
, scm_ilength (hp_freelist
->clusters
));
1584 SCM ls
, c
= hp_freelist
->clusters
;
1586 while (SCM_NNULLP (c
))
1589 for (n
= 0; SCM_NNULLP (ls
); ls
= SCM_CDR (ls
))
1591 fprintf (stderr
, "%d ", n
);
1594 fprintf (stderr
, "\n");
1600 /* Scan weak vectors. */
1603 for (w
= scm_weak_vectors
; w
!= SCM_EOL
; w
= SCM_WVECT_GC_CHAIN (w
))
1605 if (!SCM_IS_WHVEC_ANY (w
))
1609 ptr
= SCM_VELTS (w
);
1611 for (j
= 0; j
< n
; ++j
)
1612 if (SCM_FREEP (ptr
[j
]))
1613 ptr
[j
] = SCM_BOOL_F
;
1615 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1618 register long n
= SCM_LENGTH (w
);
1621 ptr
= SCM_VELTS (w
);
1623 for (j
= 0; j
< n
; ++j
)
1630 weak_keys
= SCM_IS_WHVEC (obj
) || SCM_IS_WHVEC_B (obj
);
1631 weak_values
= SCM_IS_WHVEC_V (obj
) || SCM_IS_WHVEC_B (obj
);
1636 while ( SCM_CONSP (alist
)
1637 && SCM_CONSP (SCM_CAR (alist
)))
1642 key
= SCM_CAAR (alist
);
1643 value
= SCM_CDAR (alist
);
1644 if ( (weak_keys
&& SCM_FREEP (key
))
1645 || (weak_values
&& SCM_FREEP (value
)))
1647 *fixup
= SCM_CDR (alist
);
1650 fixup
= SCM_CDRLOC (alist
);
1651 alist
= SCM_CDR (alist
);
1657 scm_mallocated
-= m
;
1658 scm_gc_malloc_collected
= m
;
1664 /* {Front end to malloc}
1666 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1668 * These functions provide services comperable to malloc, realloc, and
1669 * free. They are for allocating malloced parts of scheme objects.
1670 * The primary purpose of the front end is to impose calls to gc.
1674 * Return newly malloced storage or throw an error.
1676 * The parameter WHAT is a string for error reporting.
1677 * If the threshold scm_mtrigger will be passed by this
1678 * allocation, or if the first call to malloc fails,
1679 * garbage collect -- on the presumption that some objects
1680 * using malloced storage may be collected.
1682 * The limit scm_mtrigger may be raised by this allocation.
1685 scm_must_malloc (scm_sizet size
, const char *what
)
1688 unsigned long nm
= scm_mallocated
+ size
;
1690 if (nm
<= scm_mtrigger
)
1692 SCM_SYSCALL (ptr
= malloc (size
));
1695 scm_mallocated
= nm
;
1702 nm
= scm_mallocated
+ size
;
1703 SCM_SYSCALL (ptr
= malloc (size
));
1706 scm_mallocated
= nm
;
1707 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1708 if (nm
> scm_mtrigger
)
1709 scm_mtrigger
= nm
+ nm
/ 2;
1711 scm_mtrigger
+= scm_mtrigger
/ 2;
1716 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1717 return 0; /* never reached */
1722 * is similar to scm_must_malloc.
1725 scm_must_realloc (void *where
,
1731 scm_sizet nm
= scm_mallocated
+ size
- old_size
;
1733 if (nm
<= scm_mtrigger
)
1735 SCM_SYSCALL (ptr
= realloc (where
, size
));
1738 scm_mallocated
= nm
;
1745 nm
= scm_mallocated
+ size
- old_size
;
1746 SCM_SYSCALL (ptr
= realloc (where
, size
));
1749 scm_mallocated
= nm
;
1750 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1751 if (nm
> scm_mtrigger
)
1752 scm_mtrigger
= nm
+ nm
/ 2;
1754 scm_mtrigger
+= scm_mtrigger
/ 2;
1759 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1760 return 0; /* never reached */
1764 scm_must_free (void *obj
)
1769 scm_wta (SCM_INUM0
, "already free", "");
1772 /* Announce that there has been some malloc done that will be freed
1773 * during gc. A typical use is for a smob that uses some malloced
1774 * memory but can not get it from scm_must_malloc (for whatever
1775 * reason). When a new object of this smob is created you call
1776 * scm_done_malloc with the size of the object. When your smob free
1777 * function is called, be sure to include this size in the return
1781 scm_done_malloc (long size
)
1783 scm_mallocated
+= size
;
1785 if (scm_mallocated
> scm_mtrigger
)
1787 scm_igc ("foreign mallocs");
1788 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
1790 if (scm_mallocated
> scm_mtrigger
)
1791 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
1793 scm_mtrigger
+= scm_mtrigger
/ 2;
1803 * Each heap segment is an array of objects of a particular size.
1804 * Every segment has an associated (possibly shared) freelist.
1805 * A table of segment records is kept that records the upper and
1806 * lower extents of the segment; this is used during the conservative
1807 * phase of gc to identify probably gc roots (because they point
1808 * into valid segments at reasonable offsets). */
1811 * is true if the first segment was smaller than INIT_HEAP_SEG.
1812 * If scm_expmem is set to one, subsequent segment allocations will
1813 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1818 * is the lowest base address of any heap segment.
1820 SCM_CELLPTR scm_heap_org
;
1822 struct scm_heap_seg_data
* scm_heap_table
= 0;
1823 int scm_n_heap_segs
= 0;
1826 * initializes a new heap segment and return the number of objects it contains.
1828 * The segment origin, segment size in bytes, and the span of objects
1829 * in cells are input parameters. The freelist is both input and output.
1831 * This function presume that the scm_heap_table has already been expanded
1832 * to accomodate a new segment record.
1837 init_heap_seg (SCM_CELLPTR seg_org
, scm_sizet size
, scm_freelist_t
*freelistp
)
1839 register SCM_CELLPTR ptr
;
1840 #ifdef SCM_POINTERS_MUNGED
1841 register SCM scmptr
;
1846 SCM_CELLPTR seg_end
;
1849 int span
= freelistp
->span
;
1851 if (seg_org
== NULL
)
1856 size
= (size
/ sizeof (scm_cell
) / span
) * span
* sizeof (scm_cell
);
1858 /* Compute the ceiling on valid object pointers w/in this segment.
1860 seg_end
= CELL_DN ((char *) ptr
+ size
);
1862 /* Find the right place and insert the segment record.
1865 for (new_seg_index
= 0;
1866 ( (new_seg_index
< scm_n_heap_segs
)
1867 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
));
1873 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
1874 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
1879 scm_heap_table
[new_seg_index
].valid
= 0;
1880 scm_heap_table
[new_seg_index
].span
= span
;
1881 scm_heap_table
[new_seg_index
].freelistp
= freelistp
;
1882 scm_heap_table
[new_seg_index
].bounds
[0] = (SCM_CELLPTR
)ptr
;
1883 scm_heap_table
[new_seg_index
].bounds
[1] = (SCM_CELLPTR
)seg_end
;
1886 /* Compute the least valid object pointer w/in this segment
1888 ptr
= CELL_UP (ptr
);
1892 n_new_cells
= seg_end
- ptr
;
1894 #ifdef GUILE_NEW_GC_SCHEME
1896 freelistp
->heap_size
+= n_new_cells
;
1898 /* Partition objects in this segment into clusters
1902 SCM
*clusterp
= &clusters
;
1903 int trigger
= span
* freelistp
->gc_trigger
;
1906 while (n_new_cells
> span
)
1908 if (n_new_cells
> trigger
)
1916 *clusterp
= PTR2SCM (ptr
);
1917 SCM_SETCAR (*clusterp
, PTR2SCM (ptr
+ span
));
1918 clusterp
= SCM_CDRLOC (*clusterp
);
1922 while (ptr
< seg_end
)
1924 #ifdef SCM_POINTERS_MUNGED
1925 scmptr
= PTR2SCM (ptr
);
1927 SCM_SETCAR (scmptr
, scm_tc_free_cell
);
1928 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ span
));
1931 SCM_SETCDR (PTR2SCM (ptr
- span
), SCM_EOL
);
1934 /* Correction for cluster cells + spill */
1935 freelistp
->heap_size
-= c
+ n_new_cells
;
1937 /* Patch up the last cluster pointer in the segment
1938 * to join it to the input freelist.
1940 *clusterp
= freelistp
->clusters
;
1941 freelistp
->clusters
= clusters
;
1944 #else /* GUILE_NEW_GC_SCHEME */
1946 /* Prepend objects in this segment to the freelist.
1948 while (ptr
< seg_end
)
1950 #ifdef SCM_POINTERS_MUNGED
1951 scmptr
= PTR2SCM (ptr
);
1953 SCM_SETCAR (scmptr
, (SCM
) scm_tc_free_cell
);
1954 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ span
));
1960 /* Patch up the last freelist pointer in the segment
1961 * to join it to the input freelist.
1963 SCM_SETCDR (PTR2SCM (ptr
), freelistp
->cells
);
1964 freelistp
->cells
= PTR2SCM (CELL_UP (seg_org
));
1966 freelistp
->heap_size
+= n_new_cells
;
1968 #endif /* GUILE_NEW_GC_SCHEME */
1978 alloc_some_heap (scm_freelist_t
*freelistp
)
1980 struct scm_heap_seg_data
* tmptable
;
1984 /* Critical code sections (such as the garbage collector)
1985 * aren't supposed to add heap segments.
1987 if (scm_gc_heap_lock
)
1988 scm_wta (SCM_UNDEFINED
, "need larger initial", "heap");
1990 /* Expand the heap tables to have room for the new segment.
1991 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1992 * only if the allocation of the segment itself succeeds.
1994 len
= (1 + scm_n_heap_segs
) * sizeof (struct scm_heap_seg_data
);
1996 SCM_SYSCALL (tmptable
= ((struct scm_heap_seg_data
*)
1997 realloc ((char *)scm_heap_table
, len
)));
1999 scm_wta (SCM_UNDEFINED
, "could not grow", "hplims");
2001 scm_heap_table
= tmptable
;
2004 /* Pick a size for the new heap segment.
2005 * The rule for picking the size of a segment is explained in
2010 len
= (scm_sizet
) SCM_EXPHEAP (freelistp
->heap_size
* sizeof (scm_cell
));
2011 if ((scm_sizet
) SCM_EXPHEAP (freelistp
->heap_size
* sizeof (scm_cell
))
2016 len
= SCM_HEAP_SEG_SIZE
;
2021 smallest
= (freelistp
->span
* sizeof (scm_cell
));
2023 len
= (freelistp
->span
* sizeof (scm_cell
));
2025 /* Allocate with decaying ambition. */
2026 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2027 && (len
>= smallest
))
2029 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (len
));
2032 init_heap_seg (ptr
, len
, freelistp
);
2039 scm_wta (SCM_UNDEFINED
, "could not grow", "heap");
2044 SCM_DEFINE (scm_unhash_name
, "unhash-name", 1, 0, 0,
2047 #define FUNC_NAME s_scm_unhash_name
2051 SCM_VALIDATE_SYMBOL (1,name
);
2053 bound
= scm_n_heap_segs
;
2054 for (x
= 0; x
< bound
; ++x
)
2058 p
= (SCM_CELLPTR
)scm_heap_table
[x
].bounds
[0];
2059 pbound
= (SCM_CELLPTR
)scm_heap_table
[x
].bounds
[1];
2064 if (1 == (7 & (int)incar
))
2067 if ( ((name
== SCM_BOOL_T
) || (SCM_CAR (incar
) == name
))
2068 && (SCM_CDR (incar
) != 0)
2069 && (SCM_UNPACK (SCM_CDR (incar
)) != 1))
2084 /* {GC Protection Helper Functions}
2089 scm_remember (SCM
*ptr
)
2094 These crazy functions prevent garbage collection
2095 of arguments after the first argument by
2096 ensuring they remain live throughout the
2097 function because they are used in the last
2098 line of the code block.
2099 It'd be better to have a nice compiler hint to
2100 aid the conservative stack-scanning GC. --03/09/00 gjb */
2102 scm_return_first (SCM elt
, ...)
2108 scm_return_first_int (int i
, ...)
2115 scm_permanent_object (SCM obj
)
2118 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2124 /* Protect OBJ from the garbage collector. OBJ will not be freed,
2125 even if all other references are dropped, until someone applies
2126 scm_unprotect_object to it. This function returns OBJ.
2128 Calls to scm_protect_object nest. For every object OBJ, there is a
2129 counter which scm_protect_object(OBJ) increments and
2130 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
2131 an object's counter is greater than zero, the garbage collector
2134 Of course, that's not how it's implemented. scm_protect_object and
2135 scm_unprotect_object just maintain a list of references to things.
2136 Since the GC knows about this list, all objects it mentions stay
2137 alive. scm_protect_object adds its argument to the list;
2138 scm_unprotect_object removes the first occurrence of its argument
2141 scm_protect_object (SCM obj
)
2143 scm_protects
= scm_cons (obj
, scm_protects
);
2149 /* Remove any protection for OBJ established by a prior call to
2150 scm_protect_object. This function returns OBJ.
2152 See scm_protect_object for more information. */
2154 scm_unprotect_object (SCM obj
)
2156 SCM
*tail_ptr
= &scm_protects
;
2158 while (SCM_CONSP (*tail_ptr
))
2159 if (SCM_CAR (*tail_ptr
) == obj
)
2161 *tail_ptr
= SCM_CDR (*tail_ptr
);
2165 tail_ptr
= SCM_CDRLOC (*tail_ptr
);
2172 /* called on process termination. */
2178 extern int on_exit (void (*procp
) (), int arg
);
2181 cleanup (int status
, void *arg
)
2183 #error Dont know how to setup a cleanup handler on your system.
2188 scm_flush_all_ports ();
2193 make_initial_segment (scm_sizet init_heap_size
, scm_freelist_t
*freelistp
)
2195 if (0L == init_heap_size
)
2196 init_heap_size
= SCM_INIT_HEAP_SIZE
;
2197 if (!init_heap_seg ((SCM_CELLPTR
) malloc (init_heap_size
),
2201 init_heap_size
= SCM_HEAP_SEG_SIZE
;
2202 if (!init_heap_seg ((SCM_CELLPTR
) malloc (init_heap_size
),
2214 #ifdef GUILE_NEW_GC_SCHEME
2216 scm_init_storage (scm_sizet init_heap_size
, int gc_trigger
,
2217 scm_sizet init_heap2_size
, int gc_trigger2
)
2220 scm_init_storage (scm_sizet init_heap_size
, scm_sizet init_heap2_size
)
2225 j
= SCM_NUM_PROTECTS
;
2227 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2230 #ifdef GUILE_NEW_GC_SCHEME
2231 scm_freelist
= SCM_EOL
;
2232 scm_master_freelist
.clusters
= SCM_EOL
;
2233 scm_master_freelist
.triggeredp
= 0;
2234 scm_master_freelist
.gc_trigger
2235 = gc_trigger
? gc_trigger
: SCM_GC_TRIGGER
;
2236 scm_master_freelist
.span
= 1;
2237 scm_master_freelist
.collected
= 0;
2238 scm_master_freelist
.heap_size
= 0;
2240 scm_freelist
.cells
= SCM_EOL
;
2241 scm_freelist
.span
= 1;
2242 scm_freelist
.collected
= 0;
2243 scm_freelist
.heap_size
= 0;
2246 #ifdef GUILE_NEW_GC_SCHEME
2247 scm_freelist2
= SCM_EOL
;
2248 scm_master_freelist2
.clusters
= SCM_EOL
;
2249 scm_master_freelist2
.triggeredp
= 0;
2250 scm_master_freelist2
.gc_trigger
2251 = gc_trigger2
? gc_trigger2
: SCM_GC_TRIGGER2
;
2252 scm_master_freelist2
.span
= 2;
2253 scm_master_freelist2
.collected
= 0;
2254 scm_master_freelist2
.heap_size
= 0;
2256 scm_freelist2
.cells
= SCM_EOL
;
2257 scm_freelist2
.span
= 2;
2258 scm_freelist2
.collected
= 0;
2259 scm_freelist2
.heap_size
= 0;
2264 j
= SCM_HEAP_SEG_SIZE
;
2265 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2266 scm_heap_table
= ((struct scm_heap_seg_data
*)
2267 scm_must_malloc (sizeof (struct scm_heap_seg_data
) * 2, "hplims"));
2269 #ifdef GUILE_NEW_GC_SCHEME
2270 if (make_initial_segment (init_heap_size
, &scm_master_freelist
) ||
2271 make_initial_segment (init_heap2_size
, &scm_master_freelist2
))
2274 if (make_initial_segment (init_heap_size
, &scm_freelist
) ||
2275 make_initial_segment (init_heap2_size
, &scm_freelist2
))
2279 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0]);
2281 /* scm_hplims[0] can change. do not remove scm_heap_org */
2282 scm_weak_vectors
= SCM_EOL
;
2284 /* Initialise the list of ports. */
2285 scm_port_table
= (scm_port
**)
2286 malloc (sizeof (scm_port
*) * scm_port_table_room
);
2287 if (!scm_port_table
)
2294 on_exit (cleanup
, 0);
2298 scm_undefineds
= scm_cons (SCM_UNDEFINED
, SCM_EOL
);
2299 SCM_SETCDR (scm_undefineds
, scm_undefineds
);
2301 scm_listofnull
= scm_cons (SCM_EOL
, SCM_EOL
);
2302 scm_nullstr
= scm_makstr (0L, 0);
2303 scm_nullvect
= scm_make_vector (SCM_INUM0
, SCM_UNDEFINED
);
2304 scm_symhash
= scm_make_vector ((SCM
) SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2305 scm_weak_symhash
= scm_make_weak_key_hash_table ((SCM
) SCM_MAKINUM (scm_symhash_dim
));
2306 scm_symhash_vars
= scm_make_vector ((SCM
) SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2307 scm_stand_in_procs
= SCM_EOL
;
2308 scm_permobjs
= SCM_EOL
;
2309 scm_protects
= SCM_EOL
;
2310 scm_asyncs
= SCM_EOL
;
2311 scm_sysintern ("most-positive-fixnum", (SCM
) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM
));
2312 scm_sysintern ("most-negative-fixnum", (SCM
) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM
));
2314 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD
));