1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
45 /* #define DEBUGINFO */
49 #include "libguile/_scm.h"
50 #include "libguile/stime.h"
51 #include "libguile/stackchk.h"
52 #include "libguile/struct.h"
53 #include "libguile/smob.h"
54 #include "libguile/unif.h"
55 #include "libguile/async.h"
56 #include "libguile/ports.h"
57 #include "libguile/root.h"
58 #include "libguile/strings.h"
59 #include "libguile/vectors.h"
60 #include "libguile/weaks.h"
61 #include "libguile/hashtab.h"
63 #include "libguile/validate.h"
64 #include "libguile/gc.h"
66 #ifdef GUILE_DEBUG_MALLOC
67 #include "libguile/debug-malloc.h"
80 #define var_start(x, y) va_start(x, y)
83 #define var_start(x, y) va_start(x)
87 /* {heap tuning parameters}
89 * These are parameters for controlling memory allocation. The heap
90 * is the area out of which scm_cons, and object headers are allocated.
92 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
93 * 64 bit machine. The units of the _SIZE parameters are bytes.
94 * Cons pairs and object headers occupy one heap cell.
96 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
97 * allocated initially the heap will grow by half its current size
98 * each subsequent time more heap is needed.
100 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
101 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
102 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
103 * is in scm_init_storage() and alloc_some_heap() in sys.c
105 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
106 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
108 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
111 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
114 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
115 * reclaimed by a GC triggered by must_malloc. If less than this is
116 * reclaimed, the trigger threshold is raised. [I don't know what a
117 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
118 * work around a oscillation that caused almost constant GC.]
122 * Heap size 45000 and 40% min yield gives quick startup and no extra
123 * heap allocation. Having higher values on min yield may lead to
124 * large heaps, especially if code behaviour is varying its
125 * maximum consumption between different freelists.
127 #define SCM_INIT_HEAP_SIZE_1 (45000L * sizeof (scm_cell))
128 #define SCM_CLUSTER_SIZE_1 2000L
129 #define SCM_MIN_YIELD_1 40
131 #define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
132 #define SCM_CLUSTER_SIZE_2 1000L
133 /* The following value may seem large, but note that if we get to GC at
134 * all, this means that we have a numerically intensive application
136 #define SCM_MIN_YIELD_2 40
138 #define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
140 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
142 # define SCM_HEAP_SEG_SIZE 32768L
145 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
147 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
150 /* Make heap grow with factor 1.5 */
151 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
152 #define SCM_INIT_MALLOC_LIMIT 100000
153 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
155 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
156 bounds for allocated storage */
159 /*in 386 protected mode we must only adjust the offset */
160 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
161 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
164 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
165 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
167 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
168 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
171 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
172 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
173 #define SCM_HEAP_SIZE \
174 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
175 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
182 typedef struct scm_freelist_t
{
183 /* collected cells */
185 /* number of cells left to collect before cluster is full */
186 unsigned int left_to_collect
;
187 /* number of clusters which have been allocated */
188 unsigned int clusters_allocated
;
189 /* a list of freelists, each of size cluster_size,
190 * except the last one which may be shorter
194 /* this is the number of objects in each cluster, including the spine cell */
196 /* indicates that we should grow heap instead of GC:ing
199 /* minimum yield on this list in order not to grow the heap
202 /* defines min_yield as percent of total heap size
204 int min_yield_fraction
;
205 /* number of cells per object on this list */
207 /* number of collected cells during last GC */
209 /* number of collected cells during penultimate GC */
211 /* total number of cells in heap segments
212 * belonging to this list.
217 SCM scm_freelist
= SCM_EOL
;
218 scm_freelist_t scm_master_freelist
= {
219 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_1
, 0, 0, 0, 1, 0, 0
221 SCM scm_freelist2
= SCM_EOL
;
222 scm_freelist_t scm_master_freelist2
= {
223 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_2
, 0, 0, 0, 2, 0, 0
227 * is the number of bytes of must_malloc allocation needed to trigger gc.
229 unsigned long scm_mtrigger
;
233 * If set, don't expand the heap. Set only during gc, during which no allocation
234 * is supposed to take place anyway.
236 int scm_gc_heap_lock
= 0;
239 * Don't pause for collection if this is set -- just
243 int scm_block_gc
= 1;
245 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
246 * collection (GC) more space is allocated for the heap.
248 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
250 /* During collection, this accumulates objects holding
253 SCM scm_weak_vectors
;
255 /* GC Statistics Keeping
257 unsigned long scm_cells_allocated
= 0;
258 long scm_mallocated
= 0;
259 unsigned long scm_gc_cells_collected
;
260 unsigned long scm_gc_yield
;
261 static unsigned long scm_gc_yield_1
= 0; /* previous GC yield */
262 unsigned long scm_gc_malloc_collected
;
263 unsigned long scm_gc_ports_collected
;
264 unsigned long scm_gc_rt
;
265 unsigned long scm_gc_time_taken
= 0;
267 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
268 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
269 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
270 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
271 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
272 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
274 typedef struct scm_heap_seg_data_t
276 /* lower and upper bounds of the segment */
277 SCM_CELLPTR bounds
[2];
279 /* address of the head-of-freelist pointer for this segment's cells.
280 All segments usually point to the same one, scm_freelist. */
281 scm_freelist_t
*freelist
;
283 /* number of cells per object in this segment */
285 } scm_heap_seg_data_t
;
289 static scm_sizet
init_heap_seg (SCM_CELLPTR
, scm_sizet
, scm_freelist_t
*);
290 static void alloc_some_heap (scm_freelist_t
*);
294 /* Debugging functions. */
296 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
298 /* Return the number of the heap segment containing CELL. */
304 for (i
= 0; i
< scm_n_heap_segs
; i
++)
305 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], SCM2PTR (cell
))
306 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], SCM2PTR (cell
)))
308 fprintf (stderr
, "which_seg: can't find segment containing cell %lx\n",
315 map_free_list (scm_freelist_t
*master
, SCM freelist
)
317 int last_seg
= -1, count
= 0;
320 for (f
= freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
))
322 int this_seg
= which_seg (f
);
324 if (this_seg
!= last_seg
)
327 fprintf (stderr
, " %5d %d-cells in segment %d\n",
328 count
, master
->span
, last_seg
);
335 fprintf (stderr
, " %5d %d-cells in segment %d\n",
336 count
, master
->span
, last_seg
);
339 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
341 "Print debugging information about the free-list.\n"
342 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
343 #define FUNC_NAME s_scm_map_free_list
346 fprintf (stderr
, "%d segments total (%d:%d",
348 scm_heap_table
[0].span
,
349 scm_heap_table
[0].bounds
[1] - scm_heap_table
[0].bounds
[0]);
350 for (i
= 1; i
< scm_n_heap_segs
; i
++)
351 fprintf (stderr
, ", %d:%d",
352 scm_heap_table
[i
].span
,
353 scm_heap_table
[i
].bounds
[1] - scm_heap_table
[i
].bounds
[0]);
354 fprintf (stderr
, ")\n");
355 map_free_list (&scm_master_freelist
, scm_freelist
);
356 map_free_list (&scm_master_freelist2
, scm_freelist2
);
359 return SCM_UNSPECIFIED
;
363 static int last_cluster
;
364 static int last_size
;
367 free_list_length (char *title
, int i
, SCM freelist
)
371 for (ls
= freelist
; SCM_NNULLP (ls
); ls
= SCM_CDR (ls
))
372 if (SCM_CELL_TYPE (ls
) == scm_tc_free_cell
)
376 fprintf (stderr
, "bad cell in %s at position %d\n", title
, n
);
383 if (last_cluster
== i
- 1)
384 fprintf (stderr
, "\t%d\n", last_size
);
386 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
389 fprintf (stderr
, "%s %d", title
, i
);
391 fprintf (stderr
, "%s\t%d\n", title
, n
);
399 free_list_lengths (char *title
, scm_freelist_t
*master
, SCM freelist
)
402 int i
= 0, len
, n
= 0;
403 fprintf (stderr
, "%s\n\n", title
);
404 n
+= free_list_length ("free list", -1, freelist
);
405 for (clusters
= master
->clusters
;
406 SCM_NNULLP (clusters
);
407 clusters
= SCM_CDR (clusters
))
409 len
= free_list_length ("cluster", i
++, SCM_CAR (clusters
));
412 if (last_cluster
== i
- 1)
413 fprintf (stderr
, "\t%d\n", last_size
);
415 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
416 fprintf (stderr
, "\ntotal %d objects\n\n", n
);
419 SCM_DEFINE (scm_free_list_length
, "free-list-length", 0, 0, 0,
421 "Print debugging information about the free-list.\n"
422 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
423 #define FUNC_NAME s_scm_free_list_length
425 free_list_lengths ("1-cells", &scm_master_freelist
, scm_freelist
);
426 free_list_lengths ("2-cells", &scm_master_freelist2
, scm_freelist2
);
427 return SCM_UNSPECIFIED
;
433 #ifdef GUILE_DEBUG_FREELIST
435 /* Number of calls to SCM_NEWCELL since startup. */
436 static unsigned long scm_newcell_count
;
437 static unsigned long scm_newcell2_count
;
439 /* Search freelist for anything that isn't marked as a free cell.
440 Abort if we find something. */
442 scm_check_freelist (SCM freelist
)
447 for (f
= freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
), i
++)
448 if (SCM_CAR (f
) != (SCM
) scm_tc_free_cell
)
450 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
451 scm_newcell_count
, i
);
457 static int scm_debug_check_freelist
= 0;
459 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
461 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
462 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
463 "compile-time flag was selected.\n")
464 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
466 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
467 return SCM_UNSPECIFIED
;
473 scm_debug_newcell (void)
478 if (scm_debug_check_freelist
)
480 scm_check_freelist (scm_freelist
);
484 /* The rest of this is supposed to be identical to the SCM_NEWCELL
486 if (SCM_IMP (scm_freelist
))
487 new = scm_gc_for_newcell (&scm_master_freelist
, &scm_freelist
);
491 scm_freelist
= SCM_CDR (scm_freelist
);
492 SCM_SETCAR (new, scm_tc16_allocated
);
499 scm_debug_newcell2 (void)
503 scm_newcell2_count
++;
504 if (scm_debug_check_freelist
)
506 scm_check_freelist (scm_freelist2
);
510 /* The rest of this is supposed to be identical to the SCM_NEWCELL
512 if (SCM_IMP (scm_freelist2
))
513 new = scm_gc_for_newcell (&scm_master_freelist2
, &scm_freelist2
);
517 scm_freelist2
= SCM_CDR (scm_freelist2
);
518 SCM_SETCAR (new, scm_tc16_allocated
);
524 #endif /* GUILE_DEBUG_FREELIST */
529 master_cells_allocated (scm_freelist_t
*master
)
531 int objects
= master
->clusters_allocated
* (master
->cluster_size
- 1);
532 if (SCM_NULLP (master
->clusters
))
533 objects
-= master
->left_to_collect
;
534 return master
->span
* objects
;
538 freelist_length (SCM freelist
)
541 for (n
= 0; SCM_NNULLP (freelist
); freelist
= SCM_CDR (freelist
))
547 compute_cells_allocated ()
549 return (scm_cells_allocated
550 + master_cells_allocated (&scm_master_freelist
)
551 + master_cells_allocated (&scm_master_freelist2
)
552 - scm_master_freelist
.span
* freelist_length (scm_freelist
)
553 - scm_master_freelist2
.span
* freelist_length (scm_freelist2
));
556 /* {Scheme Interface to GC}
559 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
561 "Returns an association list of statistics about Guile's current use of storage. ")
562 #define FUNC_NAME s_scm_gc_stats
567 long int local_scm_mtrigger
;
568 long int local_scm_mallocated
;
569 long int local_scm_heap_size
;
570 long int local_scm_cells_allocated
;
571 long int local_scm_gc_time_taken
;
579 for (i
= scm_n_heap_segs
; i
--; )
580 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
581 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
583 if (scm_n_heap_segs
!= n
)
587 /* Below, we cons to produce the resulting list. We want a snapshot of
588 * the heap situation before consing.
590 local_scm_mtrigger
= scm_mtrigger
;
591 local_scm_mallocated
= scm_mallocated
;
592 local_scm_heap_size
= SCM_HEAP_SIZE
;
593 local_scm_cells_allocated
= compute_cells_allocated ();
594 local_scm_gc_time_taken
= scm_gc_time_taken
;
596 answer
= scm_listify (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
597 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
598 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
599 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
600 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
601 scm_cons (sym_heap_segments
, heap_segs
),
610 scm_gc_start (const char *what
)
612 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ());
613 scm_gc_cells_collected
= 0;
614 scm_gc_yield_1
= scm_gc_yield
;
615 scm_gc_yield
= (scm_cells_allocated
616 + master_cells_allocated (&scm_master_freelist
)
617 + master_cells_allocated (&scm_master_freelist2
));
618 scm_gc_malloc_collected
= 0;
619 scm_gc_ports_collected
= 0;
625 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt
;
626 scm_gc_time_taken
+= scm_gc_rt
;
627 scm_system_async_mark (scm_gc_async
);
631 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
633 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
634 "returned by this function for @var{obj}")
635 #define FUNC_NAME s_scm_object_address
637 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj
));
642 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
644 "Scans all of SCM objects and reclaims for further use those that are\n"
645 "no longer accessible.")
646 #define FUNC_NAME s_scm_gc
651 return SCM_UNSPECIFIED
;
657 /* {C Interface For When GC is Triggered}
661 adjust_min_yield (scm_freelist_t
*freelist
)
663 /* min yield is adjusted upwards so that next predicted total yield
664 * (allocated cells actually freed by GC) becomes
665 * `min_yield_fraction' of total heap size. Note, however, that
666 * the absolute value of min_yield will correspond to `collected'
667 * on one master (the one which currently is triggering GC).
669 * The reason why we look at total yield instead of cells collected
670 * on one list is that we want to take other freelists into account.
671 * On this freelist, we know that (local) yield = collected cells,
672 * but that's probably not the case on the other lists.
674 * (We might consider computing a better prediction, for example
675 * by computing an average over multiple GC:s.)
677 if (freelist
->min_yield_fraction
)
679 /* Pick largest of last two yields. */
680 int delta
= ((SCM_HEAP_SIZE
* freelist
->min_yield_fraction
/ 100)
681 - (long) SCM_MAX (scm_gc_yield_1
, scm_gc_yield
));
683 fprintf (stderr
, " after GC = %d, delta = %d\n",
688 freelist
->min_yield
+= delta
;
692 /* When we get POSIX threads support, the master will be global and
693 * common while the freelist will be individual for each thread.
697 scm_gc_for_newcell (scm_freelist_t
*master
, SCM
*freelist
)
703 if (SCM_NULLP (master
->clusters
))
705 if (master
->grow_heap_p
)
707 master
->grow_heap_p
= 0;
708 alloc_some_heap (master
);
713 fprintf (stderr
, "allocated = %d, ",
715 + master_cells_allocated (&scm_master_freelist
)
716 + master_cells_allocated (&scm_master_freelist2
));
719 adjust_min_yield (master
);
722 cell
= SCM_CAR (master
->clusters
);
723 master
->clusters
= SCM_CDR (master
->clusters
);
724 ++master
->clusters_allocated
;
726 while (SCM_NULLP (cell
));
728 *freelist
= SCM_CDR (cell
);
729 SCM_SET_CELL_TYPE (cell
, scm_tc16_allocated
);
734 /* This is a support routine which can be used to reserve a cluster
735 * for some special use, such as debugging. It won't be useful until
736 * free cells are preserved between garbage collections.
740 scm_alloc_cluster (scm_freelist_t
*master
)
743 cell
= scm_gc_for_newcell (master
, &freelist
);
744 SCM_SETCDR (cell
, freelist
);
749 SCM scm_after_gc_hook
;
751 scm_c_hook_t scm_before_gc_c_hook
;
752 scm_c_hook_t scm_before_mark_c_hook
;
753 scm_c_hook_t scm_before_sweep_c_hook
;
754 scm_c_hook_t scm_after_sweep_c_hook
;
755 scm_c_hook_t scm_after_gc_c_hook
;
758 scm_igc (const char *what
)
762 scm_c_hook_run (&scm_before_gc_c_hook
, 0);
765 SCM_NULLP (scm_freelist
)
767 : (SCM_NULLP (scm_freelist2
) ? "o" : "m"));
770 /* During the critical section, only the current thread may run. */
771 SCM_THREAD_CRITICAL_SECTION_START
;
774 /* fprintf (stderr, "gc: %s\n", what); */
778 if (!scm_stack_base
|| scm_block_gc
)
784 if (scm_mallocated
< 0)
785 /* The byte count of allocated objects has underflowed. This is
786 probably because you forgot to report the sizes of objects you
787 have allocated, by calling scm_done_malloc or some such. When
788 the GC freed them, it subtracted their size from
789 scm_mallocated, which underflowed. */
792 if (scm_gc_heap_lock
)
793 /* We've invoked the collector while a GC is already in progress.
794 That should never happen. */
799 /* unprotect any struct types with no instances */
805 pos
= &scm_type_obj_list
;
806 type_list
= scm_type_obj_list
;
807 while (type_list
!= SCM_EOL
)
808 if (SCM_VELTS (SCM_CAR (type_list
))[scm_struct_i_refcnt
])
810 pos
= SCM_CDRLOC (type_list
);
811 type_list
= SCM_CDR (type_list
);
815 *pos
= SCM_CDR (type_list
);
816 type_list
= SCM_CDR (type_list
);
821 /* flush dead entries from the continuation stack */
826 elts
= SCM_VELTS (scm_continuation_stack
);
827 bound
= SCM_LENGTH (scm_continuation_stack
);
828 x
= SCM_INUM (scm_continuation_stack_ptr
);
831 elts
[x
] = SCM_BOOL_F
;
836 scm_c_hook_run (&scm_before_mark_c_hook
, 0);
840 /* Protect from the C stack. This must be the first marking
841 * done because it provides information about what objects
842 * are "in-use" by the C code. "in-use" objects are those
843 * for which the values from SCM_LENGTH and SCM_CHARS must remain
844 * usable. This requirement is stricter than a liveness
845 * requirement -- in particular, it constrains the implementation
846 * of scm_vector_set_length_x.
848 SCM_FLUSH_REGISTER_WINDOWS
;
849 /* This assumes that all registers are saved into the jmp_buf */
850 setjmp (scm_save_regs_gc_mark
);
851 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
852 ( (scm_sizet
) (sizeof (SCM_STACKITEM
) - 1 +
853 sizeof scm_save_regs_gc_mark
)
854 / sizeof (SCM_STACKITEM
)));
857 scm_sizet stack_len
= scm_stack_size (scm_stack_base
);
858 #ifdef SCM_STACK_GROWS_UP
859 scm_mark_locations (scm_stack_base
, stack_len
);
861 scm_mark_locations (scm_stack_base
- stack_len
, stack_len
);
865 #else /* USE_THREADS */
867 /* Mark every thread's stack and registers */
868 scm_threads_mark_stacks ();
870 #endif /* USE_THREADS */
872 /* FIXME: insert a phase to un-protect string-data preserved
873 * in scm_vector_set_length_x.
876 j
= SCM_NUM_PROTECTS
;
878 scm_gc_mark (scm_sys_protects
[j
]);
880 /* FIXME: we should have a means to register C functions to be run
881 * in different phases of GC
883 scm_mark_subr_table ();
886 scm_gc_mark (scm_root
->handle
);
889 scm_c_hook_run (&scm_before_sweep_c_hook
, 0);
893 scm_c_hook_run (&scm_after_sweep_c_hook
, 0);
899 SCM_THREAD_CRITICAL_SECTION_END
;
901 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
910 /* Mark an object precisely.
925 if (SCM_NCELLP (ptr
))
926 scm_wta (ptr
, "rogue pointer in heap", NULL
);
928 switch (SCM_TYP7 (ptr
))
930 case scm_tcs_cons_nimcar
:
931 if (SCM_GCMARKP (ptr
))
934 if (SCM_IMP (SCM_CDR (ptr
))) /* SCM_IMP works even with a GC mark */
939 scm_gc_mark (SCM_CAR (ptr
));
940 ptr
= SCM_GCCDR (ptr
);
942 case scm_tcs_cons_imcar
:
943 if (SCM_GCMARKP (ptr
))
946 ptr
= SCM_GCCDR (ptr
);
949 if (SCM_GCMARKP (ptr
))
952 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr
));
953 ptr
= SCM_GCCDR (ptr
);
955 case scm_tcs_cons_gloc
:
956 if (SCM_GCMARKP (ptr
))
960 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
961 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
962 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
963 * pointer to a struct vtable data region. The fact that these are
964 * accessed in the same way restricts the possibilites to change the
965 * data layout of structs or heap cells.
967 scm_bits_t word0
= SCM_CELL_WORD_0 (ptr
) - scm_tc3_cons_gloc
;
968 scm_bits_t
* vtable_data
= (scm_bits_t
*) word0
; /* access as struct */
969 switch (vtable_data
[scm_vtable_index_vcell
])
974 SCM gloc_car
= SCM_PACK (word0
);
975 scm_gc_mark (gloc_car
);
976 ptr
= SCM_GCCDR (ptr
);
982 /* ptr is a struct */
983 SCM layout
= SCM_PACK (vtable_data
[scm_vtable_index_layout
]);
984 int len
= SCM_LENGTH (layout
);
985 char * fields_desc
= SCM_CHARS (layout
);
986 /* We're using SCM_GCCDR here like STRUCT_DATA, except
987 that it removes the mark */
988 scm_bits_t
* struct_data
= (scm_bits_t
*) SCM_UNPACK (SCM_GCCDR (ptr
));
990 if (vtable_data
[scm_struct_i_flags
] & SCM_STRUCTF_ENTITY
)
992 scm_gc_mark (SCM_PACK (struct_data
[scm_struct_i_procedure
]));
993 scm_gc_mark (SCM_PACK (struct_data
[scm_struct_i_setter
]));
999 for (x
= 0; x
< len
- 2; x
+= 2, ++struct_data
)
1000 if (fields_desc
[x
] == 'p')
1001 scm_gc_mark (SCM_PACK (*struct_data
));
1002 if (fields_desc
[x
] == 'p')
1004 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
1005 for (x
= *struct_data
; x
; --x
)
1006 scm_gc_mark (SCM_PACK (*++struct_data
));
1008 scm_gc_mark (SCM_PACK (*struct_data
));
1011 if (vtable_data
[scm_vtable_index_vcell
] == 0)
1013 vtable_data
[scm_vtable_index_vcell
] = 1;
1014 ptr
= SCM_PACK (vtable_data
[scm_vtable_index_vtable
]);
1021 case scm_tcs_closures
:
1022 if (SCM_GCMARKP (ptr
))
1024 SCM_SETGCMARK (ptr
);
1025 if (SCM_IMP (SCM_CDR (ptr
)))
1027 ptr
= SCM_CLOSCAR (ptr
);
1030 scm_gc_mark (SCM_CLOSCAR (ptr
));
1031 ptr
= SCM_GCCDR (ptr
);
1033 case scm_tc7_vector
:
1034 case scm_tc7_lvector
:
1038 if (SCM_GC8MARKP (ptr
))
1040 SCM_SETGC8MARK (ptr
);
1041 i
= SCM_LENGTH (ptr
);
1045 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
1046 scm_gc_mark (SCM_VELTS (ptr
)[i
]);
1047 ptr
= SCM_VELTS (ptr
)[0];
1049 case scm_tc7_contin
:
1052 SCM_SETGC8MARK (ptr
);
1053 if (SCM_VELTS (ptr
))
1054 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr
),
1057 (sizeof (SCM_STACKITEM
) + -1 +
1058 sizeof (scm_contregs
)) /
1059 sizeof (SCM_STACKITEM
)));
1063 case scm_tc7_byvect
:
1070 #ifdef HAVE_LONG_LONGS
1071 case scm_tc7_llvect
:
1074 case scm_tc7_string
:
1075 SCM_SETGC8MARK (ptr
);
1078 case scm_tc7_substring
:
1079 if (SCM_GC8MARKP(ptr
))
1081 SCM_SETGC8MARK (ptr
);
1082 ptr
= SCM_CDR (ptr
);
1086 if (SCM_GC8MARKP(ptr
))
1088 SCM_WVECT_GC_CHAIN (ptr
) = scm_weak_vectors
;
1089 scm_weak_vectors
= ptr
;
1090 SCM_SETGC8MARK (ptr
);
1091 if (SCM_IS_WHVEC_ANY (ptr
))
1098 len
= SCM_LENGTH (ptr
);
1099 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
1100 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
1102 for (x
= 0; x
< len
; ++x
)
1105 alist
= SCM_VELTS (ptr
)[x
];
1107 /* mark everything on the alist except the keys or
1108 * values, according to weak_values and weak_keys. */
1109 while ( SCM_CONSP (alist
)
1110 && !SCM_GCMARKP (alist
)
1111 && SCM_CONSP (SCM_CAR (alist
)))
1116 kvpair
= SCM_CAR (alist
);
1117 next_alist
= SCM_CDR (alist
);
1120 * SCM_SETGCMARK (alist);
1121 * SCM_SETGCMARK (kvpair);
1123 * It may be that either the key or value is protected by
1124 * an escaped reference to part of the spine of this alist.
1125 * If we mark the spine here, and only mark one or neither of the
1126 * key and value, they may never be properly marked.
1127 * This leads to a horrible situation in which an alist containing
1128 * freelist cells is exported.
1130 * So only mark the spines of these arrays last of all marking.
1131 * If somebody confuses us by constructing a weak vector
1132 * with a circular alist then we are hosed, but at least we
1133 * won't prematurely drop table entries.
1136 scm_gc_mark (SCM_CAR (kvpair
));
1138 scm_gc_mark (SCM_GCCDR (kvpair
));
1141 if (SCM_NIMP (alist
))
1142 scm_gc_mark (alist
);
1147 case scm_tc7_msymbol
:
1148 if (SCM_GC8MARKP(ptr
))
1150 SCM_SETGC8MARK (ptr
);
1151 scm_gc_mark (SCM_SYMBOL_FUNC (ptr
));
1152 ptr
= SCM_SYMBOL_PROPS (ptr
);
1154 case scm_tc7_ssymbol
:
1155 if (SCM_GC8MARKP(ptr
))
1157 SCM_SETGC8MARK (ptr
);
1162 i
= SCM_PTOBNUM (ptr
);
1163 if (!(i
< scm_numptob
))
1165 if (SCM_GC8MARKP (ptr
))
1167 SCM_SETGC8MARK (ptr
);
1168 if (SCM_PTAB_ENTRY(ptr
))
1169 scm_gc_mark (SCM_PTAB_ENTRY(ptr
)->file_name
);
1170 if (scm_ptobs
[i
].mark
)
1172 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1179 if (SCM_GC8MARKP (ptr
))
1181 SCM_SETGC8MARK (ptr
);
1182 switch (SCM_GCTYP16 (ptr
))
1183 { /* should be faster than going through scm_smobs */
1184 case scm_tc_free_cell
:
1185 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1186 case scm_tc16_allocated
:
1189 case scm_tc16_complex
:
1192 i
= SCM_SMOBNUM (ptr
);
1193 if (!(i
< scm_numsmob
))
1195 if (scm_smobs
[i
].mark
)
1197 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1205 def
:scm_wta (ptr
, "unknown type in ", "gc_mark");
1210 /* Mark a Region Conservatively
1214 scm_mark_locations (SCM_STACKITEM x
[], scm_sizet n
)
1216 register long m
= n
;
1218 register SCM_CELLPTR ptr
;
1221 if (SCM_CELLP (* (SCM
*) &x
[m
]))
1223 ptr
= SCM2PTR (* (SCM
*) &x
[m
]);
1225 j
= scm_n_heap_segs
- 1;
1226 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1227 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1234 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
1236 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1244 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1248 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1253 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1257 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1263 if (scm_heap_table
[seg_id
].span
== 1
1264 || SCM_DOUBLE_CELLP (* (SCM
*) &x
[m
]))
1265 scm_gc_mark (* (SCM
*) &x
[m
]);
1274 /* The function scm_cellp determines whether an SCM value can be regarded as a
1275 * pointer to a cell on the heap. Binary search is used in order to determine
1276 * the heap segment that contains the cell.
1279 scm_cellp (SCM value
)
1281 if (SCM_CELLP (value
)) {
1282 scm_cell
* ptr
= SCM2PTR (value
);
1284 unsigned int j
= scm_n_heap_segs
- 1;
1287 int k
= (i
+ j
) / 2;
1288 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
)) {
1290 } else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
)) {
1295 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1296 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
)
1297 && (scm_heap_table
[i
].span
== 1 || SCM_DOUBLE_CELLP (value
))) {
1309 gc_sweep_freelist_start (scm_freelist_t
*freelist
)
1311 freelist
->cells
= SCM_EOL
;
1312 freelist
->left_to_collect
= freelist
->cluster_size
;
1313 freelist
->clusters_allocated
= 0;
1314 freelist
->clusters
= SCM_EOL
;
1315 freelist
->clustertail
= &freelist
->clusters
;
1316 freelist
->collected_1
= freelist
->collected
;
1317 freelist
->collected
= 0;
1321 gc_sweep_freelist_finish (scm_freelist_t
*freelist
)
1324 *freelist
->clustertail
= freelist
->cells
;
1325 if (SCM_NNULLP (freelist
->cells
))
1327 SCM c
= freelist
->cells
;
1328 SCM_SETCAR (c
, SCM_CDR (c
));
1329 SCM_SETCDR (c
, SCM_EOL
);
1330 freelist
->collected
+=
1331 freelist
->span
* (freelist
->cluster_size
- freelist
->left_to_collect
);
1333 scm_gc_cells_collected
+= freelist
->collected
;
1335 /* Although freelist->min_yield is used to test freelist->collected
1336 * (which is the local GC yield for freelist), it is adjusted so
1337 * that *total* yield is freelist->min_yield_fraction of total heap
1338 * size. This means that a too low yield is compensated by more
1339 * heap on the list which is currently doing most work, which is
1340 * just what we want.
1342 collected
= SCM_MAX (freelist
->collected_1
, freelist
->collected
);
1343 freelist
->grow_heap_p
= (collected
< freelist
->min_yield
);
1349 register SCM_CELLPTR ptr
;
1350 register SCM nfreelist
;
1351 register scm_freelist_t
*freelist
;
1359 gc_sweep_freelist_start (&scm_master_freelist
);
1360 gc_sweep_freelist_start (&scm_master_freelist2
);
1362 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1364 register unsigned int left_to_collect
;
1365 register scm_sizet j
;
1367 /* Unmarked cells go onto the front of the freelist this heap
1368 segment points to. Rather than updating the real freelist
1369 pointer as we go along, we accumulate the new head in
1370 nfreelist. Then, if it turns out that the entire segment is
1371 free, we free (i.e., malloc's free) the whole segment, and
1372 simply don't assign nfreelist back into the real freelist. */
1373 freelist
= scm_heap_table
[i
].freelist
;
1374 nfreelist
= freelist
->cells
;
1375 left_to_collect
= freelist
->left_to_collect
;
1376 span
= scm_heap_table
[i
].span
;
1378 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0], span
);
1379 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1], span
) - ptr
;
1380 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1382 SCM scmptr
= PTR2SCM (ptr
);
1384 switch SCM_TYP7 (scmptr
)
1386 case scm_tcs_cons_gloc
:
1388 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1389 * struct or a gloc. See the corresponding comment in
1392 scm_bits_t word0
= SCM_CELL_WORD_0 (scmptr
) - scm_tc3_cons_gloc
;
1393 scm_bits_t
* vtable_data
= (scm_bits_t
*) word0
; /* access as struct */
1394 if (SCM_GCMARKP (scmptr
))
1396 if (vtable_data
[scm_vtable_index_vcell
] == 1)
1397 vtable_data
[scm_vtable_index_vcell
] = 0;
1402 if (vtable_data
[scm_vtable_index_vcell
] == 0
1403 || vtable_data
[scm_vtable_index_vcell
] == 1)
1405 scm_struct_free_t free
1406 = (scm_struct_free_t
) vtable_data
[scm_struct_i_free
];
1407 m
+= free (vtable_data
, (scm_bits_t
*) SCM_UNPACK (SCM_GCCDR (scmptr
)));
1412 case scm_tcs_cons_imcar
:
1413 case scm_tcs_cons_nimcar
:
1414 case scm_tcs_closures
:
1416 if (SCM_GCMARKP (scmptr
))
1420 if (SCM_GC8MARKP (scmptr
))
1426 m
+= (2 + SCM_LENGTH (scmptr
)) * sizeof (SCM
);
1427 scm_must_free ((char *)(SCM_VELTS (scmptr
) - 2));
1431 case scm_tc7_vector
:
1432 case scm_tc7_lvector
:
1436 if (SCM_GC8MARKP (scmptr
))
1439 m
+= (SCM_LENGTH (scmptr
) * sizeof (SCM
));
1441 scm_must_free (SCM_CHARS (scmptr
));
1442 /* SCM_SETCHARS(scmptr, 0);*/
1446 if SCM_GC8MARKP (scmptr
)
1448 m
+= sizeof (long) * ((SCM_HUGE_LENGTH (scmptr
) + SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1450 case scm_tc7_byvect
:
1451 if SCM_GC8MARKP (scmptr
)
1453 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (char);
1457 if SCM_GC8MARKP (scmptr
)
1459 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long);
1462 if SCM_GC8MARKP (scmptr
)
1464 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (short);
1466 #ifdef HAVE_LONG_LONGS
1467 case scm_tc7_llvect
:
1468 if SCM_GC8MARKP (scmptr
)
1470 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long_long
);
1474 if SCM_GC8MARKP (scmptr
)
1476 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (float);
1479 if SCM_GC8MARKP (scmptr
)
1481 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (double);
1484 if SCM_GC8MARKP (scmptr
)
1486 m
+= SCM_HUGE_LENGTH (scmptr
) * 2 * sizeof (double);
1489 case scm_tc7_substring
:
1490 if (SCM_GC8MARKP (scmptr
))
1493 case scm_tc7_string
:
1494 if (SCM_GC8MARKP (scmptr
))
1496 m
+= SCM_HUGE_LENGTH (scmptr
) + 1;
1498 case scm_tc7_msymbol
:
1499 if (SCM_GC8MARKP (scmptr
))
1501 m
+= (SCM_LENGTH (scmptr
) + 1
1502 + (SCM_CHARS (scmptr
) - (char *) SCM_SLOTS (scmptr
)));
1503 scm_must_free ((char *)SCM_SLOTS (scmptr
));
1505 case scm_tc7_contin
:
1506 if SCM_GC8MARKP (scmptr
)
1508 m
+= SCM_LENGTH (scmptr
) * sizeof (SCM_STACKITEM
) + sizeof (scm_contregs
);
1509 if (SCM_VELTS (scmptr
))
1511 case scm_tc7_ssymbol
:
1512 if SCM_GC8MARKP(scmptr
)
1518 if SCM_GC8MARKP (scmptr
)
1520 if SCM_OPENP (scmptr
)
1522 int k
= SCM_PTOBNUM (scmptr
);
1523 if (!(k
< scm_numptob
))
1525 /* Keep "revealed" ports alive. */
1526 if (scm_revealed_count (scmptr
) > 0)
1528 /* Yes, I really do mean scm_ptobs[k].free */
1529 /* rather than ftobs[k].close. .close */
1530 /* is for explicit CLOSE-PORT by user */
1531 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1532 SCM_SETSTREAM (scmptr
, 0);
1533 scm_remove_from_port_table (scmptr
);
1534 scm_gc_ports_collected
++;
1535 SCM_SETAND_CAR (scmptr
, ~SCM_OPN
);
1539 switch SCM_GCTYP16 (scmptr
)
1541 case scm_tc_free_cell
:
1543 if SCM_GC8MARKP (scmptr
)
1548 if SCM_GC8MARKP (scmptr
)
1550 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1552 #endif /* def SCM_BIGDIG */
1553 case scm_tc16_complex
:
1554 if SCM_GC8MARKP (scmptr
)
1556 m
+= 2 * sizeof (double);
1559 if SCM_GC8MARKP (scmptr
)
1564 k
= SCM_SMOBNUM (scmptr
);
1565 if (!(k
< scm_numsmob
))
1567 m
+= (scm_smobs
[k
].free
) (scmptr
);
1573 sweeperr
:scm_wta (scmptr
, "unknown type in ", "gc_sweep");
1576 if (SCM_CAR (scmptr
) == (SCM
) scm_tc_free_cell
)
1579 if (!--left_to_collect
)
1581 SCM_SETCAR (scmptr
, nfreelist
);
1582 *freelist
->clustertail
= scmptr
;
1583 freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1585 nfreelist
= SCM_EOL
;
1586 freelist
->collected
+= span
* freelist
->cluster_size
;
1587 left_to_collect
= freelist
->cluster_size
;
1591 /* Stick the new cell on the front of nfreelist. It's
1592 critical that we mark this cell as freed; otherwise, the
1593 conservative collector might trace it as some other type
1595 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1596 SCM_SETCDR (scmptr
, nfreelist
);
1602 SCM_CLRGC8MARK (scmptr
);
1605 SCM_CLRGCMARK (scmptr
);
1607 #ifdef GC_FREE_SEGMENTS
1612 freelist
->heap_size
-= seg_size
;
1613 free ((char *) scm_heap_table
[i
].bounds
[0]);
1614 scm_heap_table
[i
].bounds
[0] = 0;
1615 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1616 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1617 scm_n_heap_segs
-= 1;
1618 i
--; /* We need to scan the segment just moved. */
1621 #endif /* ifdef GC_FREE_SEGMENTS */
1623 /* Update the real freelist pointer to point to the head of
1624 the list of free cells we've built for this segment. */
1625 freelist
->cells
= nfreelist
;
1626 freelist
->left_to_collect
= left_to_collect
;
1629 #ifdef GUILE_DEBUG_FREELIST
1630 scm_check_freelist (freelist
== &scm_master_freelist
1633 scm_map_free_list ();
1637 gc_sweep_freelist_finish (&scm_master_freelist
);
1638 gc_sweep_freelist_finish (&scm_master_freelist2
);
1640 /* When we move to POSIX threads private freelists should probably
1641 be GC-protected instead. */
1642 scm_freelist
= SCM_EOL
;
1643 scm_freelist2
= SCM_EOL
;
1645 scm_cells_allocated
= (SCM_HEAP_SIZE
- scm_gc_cells_collected
);
1646 scm_gc_yield
-= scm_cells_allocated
;
1647 scm_mallocated
-= m
;
1648 scm_gc_malloc_collected
= m
;
1654 /* {Front end to malloc}
1656 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1658 * These functions provide services comperable to malloc, realloc, and
1659 * free. They are for allocating malloced parts of scheme objects.
1660 * The primary purpose of the front end is to impose calls to gc.
1665 * Return newly malloced storage or throw an error.
1667 * The parameter WHAT is a string for error reporting.
1668 * If the threshold scm_mtrigger will be passed by this
1669 * allocation, or if the first call to malloc fails,
1670 * garbage collect -- on the presumption that some objects
1671 * using malloced storage may be collected.
1673 * The limit scm_mtrigger may be raised by this allocation.
1676 scm_must_malloc (scm_sizet size
, const char *what
)
1679 unsigned long nm
= scm_mallocated
+ size
;
1681 if (nm
<= scm_mtrigger
)
1683 SCM_SYSCALL (ptr
= malloc (size
));
1686 scm_mallocated
= nm
;
1687 #ifdef GUILE_DEBUG_MALLOC
1688 scm_malloc_register (ptr
, what
);
1696 nm
= scm_mallocated
+ size
;
1697 SCM_SYSCALL (ptr
= malloc (size
));
1700 scm_mallocated
= nm
;
1701 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1702 if (nm
> scm_mtrigger
)
1703 scm_mtrigger
= nm
+ nm
/ 2;
1705 scm_mtrigger
+= scm_mtrigger
/ 2;
1707 #ifdef GUILE_DEBUG_MALLOC
1708 scm_malloc_register (ptr
, what
);
1714 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1715 return 0; /* never reached */
1720 * is similar to scm_must_malloc.
1723 scm_must_realloc (void *where
,
1729 scm_sizet nm
= scm_mallocated
+ size
- old_size
;
1731 if (nm
<= scm_mtrigger
)
1733 SCM_SYSCALL (ptr
= realloc (where
, size
));
1736 scm_mallocated
= nm
;
1737 #ifdef GUILE_DEBUG_MALLOC
1738 scm_malloc_reregister (where
, ptr
, what
);
1746 nm
= scm_mallocated
+ size
- old_size
;
1747 SCM_SYSCALL (ptr
= realloc (where
, size
));
1750 scm_mallocated
= nm
;
1751 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1752 if (nm
> scm_mtrigger
)
1753 scm_mtrigger
= nm
+ nm
/ 2;
1755 scm_mtrigger
+= scm_mtrigger
/ 2;
1757 #ifdef GUILE_DEBUG_MALLOC
1758 scm_malloc_reregister (where
, ptr
, what
);
1763 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1764 return 0; /* never reached */
1768 scm_must_free (void *obj
)
1770 #ifdef GUILE_DEBUG_MALLOC
1771 scm_malloc_unregister (obj
);
1776 scm_wta (SCM_INUM0
, "already free", "");
1779 /* Announce that there has been some malloc done that will be freed
1780 * during gc. A typical use is for a smob that uses some malloced
1781 * memory but can not get it from scm_must_malloc (for whatever
1782 * reason). When a new object of this smob is created you call
1783 * scm_done_malloc with the size of the object. When your smob free
1784 * function is called, be sure to include this size in the return
1788 scm_done_malloc (long size
)
1790 scm_mallocated
+= size
;
1792 if (scm_mallocated
> scm_mtrigger
)
1794 scm_igc ("foreign mallocs");
1795 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
1797 if (scm_mallocated
> scm_mtrigger
)
1798 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
1800 scm_mtrigger
+= scm_mtrigger
/ 2;
1810 * Each heap segment is an array of objects of a particular size.
1811 * Every segment has an associated (possibly shared) freelist.
1812 * A table of segment records is kept that records the upper and
1813 * lower extents of the segment; this is used during the conservative
1814 * phase of gc to identify probably gc roots (because they point
1815 * into valid segments at reasonable offsets). */
1818 * is true if the first segment was smaller than INIT_HEAP_SEG.
1819 * If scm_expmem is set to one, subsequent segment allocations will
1820 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1824 scm_sizet scm_max_segment_size
;
1827 * is the lowest base address of any heap segment.
1829 SCM_CELLPTR scm_heap_org
;
1831 scm_heap_seg_data_t
* scm_heap_table
= 0;
1832 int scm_n_heap_segs
= 0;
1835 * initializes a new heap segment and return the number of objects it contains.
1837 * The segment origin, segment size in bytes, and the span of objects
1838 * in cells are input parameters. The freelist is both input and output.
1840 * This function presume that the scm_heap_table has already been expanded
1841 * to accomodate a new segment record.
1846 init_heap_seg (SCM_CELLPTR seg_org
, scm_sizet size
, scm_freelist_t
*freelist
)
1848 register SCM_CELLPTR ptr
;
1849 SCM_CELLPTR seg_end
;
1852 int span
= freelist
->span
;
1854 if (seg_org
== NULL
)
1857 ptr
= CELL_UP (seg_org
, span
);
1859 /* Compute the ceiling on valid object pointers w/in this segment.
1861 seg_end
= CELL_DN ((char *) seg_org
+ size
, span
);
1863 /* Find the right place and insert the segment record.
1866 for (new_seg_index
= 0;
1867 ( (new_seg_index
< scm_n_heap_segs
)
1868 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
));
1874 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
1875 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
1880 scm_heap_table
[new_seg_index
].span
= span
;
1881 scm_heap_table
[new_seg_index
].freelist
= freelist
;
1882 scm_heap_table
[new_seg_index
].bounds
[0] = ptr
;
1883 scm_heap_table
[new_seg_index
].bounds
[1] = seg_end
;
1886 /* Compute the least valid object pointer w/in this segment
1888 ptr
= CELL_UP (ptr
, span
);
1892 n_new_cells
= seg_end
- ptr
;
1894 freelist
->heap_size
+= n_new_cells
;
1896 /* Partition objects in this segment into clusters */
1899 SCM
*clusterp
= &clusters
;
1900 int n_cluster_cells
= span
* freelist
->cluster_size
;
1902 while (n_new_cells
> span
) /* at least one spine + one freecell */
1904 /* Determine end of cluster
1906 if (n_new_cells
>= n_cluster_cells
)
1908 seg_end
= ptr
+ n_cluster_cells
;
1909 n_new_cells
-= n_cluster_cells
;
1912 /* [cmm] looks like the segment size doesn't divide cleanly by
1913 cluster size. bad cmm! */
1916 /* Allocate cluster spine
1918 *clusterp
= PTR2SCM (ptr
);
1919 SCM_SETCAR (*clusterp
, PTR2SCM (ptr
+ span
));
1920 clusterp
= SCM_CDRLOC (*clusterp
);
1923 while (ptr
< seg_end
)
1925 SCM scmptr
= PTR2SCM (ptr
);
1927 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1928 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ span
));
1932 SCM_SETCDR (PTR2SCM (ptr
- span
), SCM_EOL
);
1935 /* Patch up the last cluster pointer in the segment
1936 * to join it to the input freelist.
1938 *clusterp
= freelist
->clusters
;
1939 freelist
->clusters
= clusters
;
1943 fprintf (stderr
, "H");
1949 round_to_cluster_size (scm_freelist_t
*freelist
, scm_sizet len
)
1951 scm_sizet cluster_size_in_bytes
= CLUSTER_SIZE_IN_BYTES (freelist
);
1954 (len
+ cluster_size_in_bytes
- 1) / cluster_size_in_bytes
* cluster_size_in_bytes
1955 + ALIGNMENT_SLACK (freelist
);
1959 alloc_some_heap (scm_freelist_t
*freelist
)
1961 scm_heap_seg_data_t
* tmptable
;
1965 /* Critical code sections (such as the garbage collector)
1966 * aren't supposed to add heap segments.
1968 if (scm_gc_heap_lock
)
1969 scm_wta (SCM_UNDEFINED
, "need larger initial", "heap");
1971 /* Expand the heap tables to have room for the new segment.
1972 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1973 * only if the allocation of the segment itself succeeds.
1975 len
= (1 + scm_n_heap_segs
) * sizeof (scm_heap_seg_data_t
);
1977 SCM_SYSCALL (tmptable
= ((scm_heap_seg_data_t
*)
1978 realloc ((char *)scm_heap_table
, len
)));
1980 scm_wta (SCM_UNDEFINED
, "could not grow", "hplims");
1982 scm_heap_table
= tmptable
;
1985 /* Pick a size for the new heap segment.
1986 * The rule for picking the size of a segment is explained in
1990 /* Assure that the new segment is predicted to be large enough.
1992 * New yield should at least equal GC fraction of new heap size, i.e.
1994 * y + dh > f * (h + dh)
1997 * f : min yield fraction
1999 * dh : size of new heap segment
2001 * This gives dh > (f * h - y) / (1 - f)
2003 int f
= freelist
->min_yield_fraction
;
2004 long h
= SCM_HEAP_SIZE
;
2005 long min_cells
= (f
* h
- 100 * (long) scm_gc_yield
) / (99 - f
);
2006 len
= SCM_EXPHEAP (freelist
->heap_size
);
2008 fprintf (stderr
, "(%d < %d)", len
, min_cells
);
2010 if (len
< min_cells
)
2011 len
= min_cells
+ freelist
->cluster_size
;
2012 len
*= sizeof (scm_cell
);
2013 /* force new sampling */
2014 freelist
->collected
= LONG_MAX
;
2017 if (len
> scm_max_segment_size
)
2018 len
= scm_max_segment_size
;
2023 smallest
= CLUSTER_SIZE_IN_BYTES (freelist
);
2028 /* Allocate with decaying ambition. */
2029 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2030 && (len
>= smallest
))
2032 scm_sizet rounded_len
= round_to_cluster_size (freelist
, len
);
2033 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (rounded_len
));
2036 init_heap_seg (ptr
, rounded_len
, freelist
);
2043 scm_wta (SCM_UNDEFINED
, "could not grow", "heap");
2047 SCM_DEFINE (scm_unhash_name
, "unhash-name", 1, 0, 0,
2050 #define FUNC_NAME s_scm_unhash_name
2054 SCM_VALIDATE_SYMBOL (1,name
);
2056 bound
= scm_n_heap_segs
;
2057 for (x
= 0; x
< bound
; ++x
)
2061 p
= scm_heap_table
[x
].bounds
[0];
2062 pbound
= scm_heap_table
[x
].bounds
[1];
2065 SCM cell
= PTR2SCM (p
);
2066 if (SCM_TYP3 (cell
) == scm_tc3_cons_gloc
)
2068 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2069 * struct cell. See the corresponding comment in scm_gc_mark.
2071 scm_bits_t word0
= SCM_CELL_WORD_0 (cell
) - scm_tc3_cons_gloc
;
2072 SCM gloc_car
= SCM_PACK (word0
); /* access as gloc */
2073 SCM vcell
= SCM_CELL_OBJECT_1 (gloc_car
);
2074 if ((SCM_EQ_P (name
, SCM_BOOL_T
) || SCM_EQ_P (SCM_CAR (gloc_car
), name
))
2075 && (SCM_UNPACK (vcell
) != 0) && (SCM_UNPACK (vcell
) != 1))
2077 SCM_SET_CELL_OBJECT_0 (cell
, name
);
2090 /* {GC Protection Helper Functions}
2095 scm_remember (SCM
*ptr
)
2100 These crazy functions prevent garbage collection
2101 of arguments after the first argument by
2102 ensuring they remain live throughout the
2103 function because they are used in the last
2104 line of the code block.
2105 It'd be better to have a nice compiler hint to
2106 aid the conservative stack-scanning GC. --03/09/00 gjb */
2108 scm_return_first (SCM elt
, ...)
2114 scm_return_first_int (int i
, ...)
2121 scm_permanent_object (SCM obj
)
2124 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2130 /* Protect OBJ from the garbage collector. OBJ will not be freed,
2131 even if all other references are dropped, until someone applies
2132 scm_unprotect_object to it. This function returns OBJ.
2134 Calls to scm_protect_object nest. For every object OBJ, there is a
2135 counter which scm_protect_object(OBJ) increments and
2136 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
2137 an object's counter is greater than zero, the garbage collector
2138 will not free it. */
2141 scm_protect_object (SCM obj
)
2145 /* This critical section barrier will be replaced by a mutex. */
2148 handle
= scm_hashq_get_handle (scm_protects
, obj
);
2150 if (SCM_IMP (handle
))
2151 scm_hashq_create_handle_x (scm_protects
, obj
, SCM_MAKINUM (1));
2153 SCM_SETCDR (handle
, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle
)) + 1));
2161 /* Remove any protection for OBJ established by a prior call to
2162 scm_protect_object. This function returns OBJ.
2164 See scm_protect_object for more information. */
2166 scm_unprotect_object (SCM obj
)
2170 /* This critical section barrier will be replaced by a mutex. */
2173 handle
= scm_hashq_get_handle (scm_protects
, obj
);
2175 if (SCM_NIMP (handle
))
2177 int count
= SCM_INUM (SCM_CDR (handle
)) - 1;
2179 scm_hashq_remove_x (scm_protects
, obj
);
2181 SCM_SETCDR (handle
, SCM_MAKINUM (count
));
2191 /* called on process termination. */
2197 extern int on_exit (void (*procp
) (), int arg
);
2200 cleanup (int status
, void *arg
)
2202 #error Dont know how to setup a cleanup handler on your system.
2207 scm_flush_all_ports ();
2212 make_initial_segment (scm_sizet init_heap_size
, scm_freelist_t
*freelist
)
2214 scm_sizet rounded_size
= round_to_cluster_size (freelist
, init_heap_size
);
2215 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2219 rounded_size
= round_to_cluster_size (freelist
, SCM_HEAP_SEG_SIZE
);
2220 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2228 if (freelist
->min_yield_fraction
)
2229 freelist
->min_yield
= (freelist
->heap_size
* freelist
->min_yield_fraction
2231 freelist
->grow_heap_p
= (freelist
->heap_size
< freelist
->min_yield
);
2238 init_freelist (scm_freelist_t
*freelist
,
2243 freelist
->clusters
= SCM_EOL
;
2244 freelist
->cluster_size
= cluster_size
+ 1;
2245 freelist
->left_to_collect
= 0;
2246 freelist
->clusters_allocated
= 0;
2247 freelist
->min_yield
= 0;
2248 freelist
->min_yield_fraction
= min_yield
;
2249 freelist
->span
= span
;
2250 freelist
->collected
= 0;
2251 freelist
->collected_1
= 0;
2252 freelist
->heap_size
= 0;
2256 scm_init_storage (scm_sizet init_heap_size_1
, int gc_trigger_1
,
2257 scm_sizet init_heap_size_2
, int gc_trigger_2
,
2258 scm_sizet max_segment_size
)
2262 if (!init_heap_size_1
)
2263 init_heap_size_1
= SCM_INIT_HEAP_SIZE_1
;
2264 if (!init_heap_size_2
)
2265 init_heap_size_2
= SCM_INIT_HEAP_SIZE_2
;
2267 j
= SCM_NUM_PROTECTS
;
2269 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2272 scm_freelist
= SCM_EOL
;
2273 scm_freelist2
= SCM_EOL
;
2274 init_freelist (&scm_master_freelist
,
2275 1, SCM_CLUSTER_SIZE_1
,
2276 gc_trigger_1
? gc_trigger_1
: SCM_MIN_YIELD_1
);
2277 init_freelist (&scm_master_freelist2
,
2278 2, SCM_CLUSTER_SIZE_2
,
2279 gc_trigger_2
? gc_trigger_2
: SCM_MIN_YIELD_2
);
2280 scm_max_segment_size
2281 = max_segment_size
? max_segment_size
: SCM_MAX_SEGMENT_SIZE
;
2285 j
= SCM_HEAP_SEG_SIZE
;
2286 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2287 scm_heap_table
= ((scm_heap_seg_data_t
*)
2288 scm_must_malloc (sizeof (scm_heap_seg_data_t
) * 2, "hplims"));
2290 if (make_initial_segment (init_heap_size_1
, &scm_master_freelist
) ||
2291 make_initial_segment (init_heap_size_2
, &scm_master_freelist2
))
2294 /* scm_hplims[0] can change. do not remove scm_heap_org */
2295 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0], 1);
2297 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2298 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2299 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2300 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2301 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2303 /* Initialise the list of ports. */
2304 scm_port_table
= (scm_port
**)
2305 malloc (sizeof (scm_port
*) * scm_port_table_room
);
2306 if (!scm_port_table
)
2313 on_exit (cleanup
, 0);
2317 scm_undefineds
= scm_cons (SCM_UNDEFINED
, SCM_EOL
);
2318 SCM_SETCDR (scm_undefineds
, scm_undefineds
);
2320 scm_listofnull
= scm_cons (SCM_EOL
, SCM_EOL
);
2321 scm_nullstr
= scm_makstr (0L, 0);
2322 scm_nullvect
= scm_make_vector (SCM_INUM0
, SCM_UNDEFINED
);
2323 scm_symhash
= scm_make_vector (SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2324 scm_weak_symhash
= scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim
));
2325 scm_symhash_vars
= scm_make_vector (SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2326 scm_stand_in_procs
= SCM_EOL
;
2327 scm_permobjs
= SCM_EOL
;
2328 scm_protects
= scm_make_vector (SCM_MAKINUM (31), SCM_EOL
);
2329 scm_asyncs
= SCM_EOL
;
2330 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM
));
2331 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM
));
2333 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD
));
2342 scm_after_gc_hook
= scm_create_hook ("after-gc-hook", 0);
2343 #include "libguile/gc.x"