1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
45 /* #define DEBUGINFO */
49 #include "libguile/_scm.h"
50 #include "libguile/stime.h"
51 #include "libguile/stackchk.h"
52 #include "libguile/struct.h"
53 #include "libguile/smob.h"
54 #include "libguile/unif.h"
55 #include "libguile/async.h"
56 #include "libguile/ports.h"
57 #include "libguile/root.h"
58 #include "libguile/strings.h"
59 #include "libguile/vectors.h"
60 #include "libguile/weaks.h"
61 #include "libguile/hashtab.h"
63 #include "libguile/validate.h"
64 #include "libguile/gc.h"
66 #ifdef GUILE_DEBUG_MALLOC
67 #include "libguile/debug-malloc.h"
80 #define var_start(x, y) va_start(x, y)
83 #define var_start(x, y) va_start(x)
87 /* {heap tuning parameters}
89 * These are parameters for controlling memory allocation. The heap
90 * is the area out of which scm_cons, and object headers are allocated.
92 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
93 * 64 bit machine. The units of the _SIZE parameters are bytes.
94 * Cons pairs and object headers occupy one heap cell.
96 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
97 * allocated initially the heap will grow by half its current size
98 * each subsequent time more heap is needed.
100 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
101 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
102 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
103 * is in scm_init_storage() and alloc_some_heap() in sys.c
105 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
106 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
108 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
111 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
114 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
115 * reclaimed by a GC triggered by must_malloc. If less than this is
116 * reclaimed, the trigger threshold is raised. [I don't know what a
117 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
118 * work around a oscillation that caused almost constant GC.]
122 * Heap size 45000 and 40% min yield gives quick startup and no extra
123 * heap allocation. Having higher values on min yield may lead to
124 * large heaps, especially if code behaviour is varying its
125 * maximum consumption between different freelists.
127 int scm_default_init_heap_size_1
= (45000L * sizeof (scm_cell
));
128 int scm_default_min_yield_1
= 40;
129 #define SCM_CLUSTER_SIZE_1 2000L
131 int scm_default_init_heap_size_2
= (2500L * 2 * sizeof (scm_cell
));
132 /* The following value may seem large, but note that if we get to GC at
133 * all, this means that we have a numerically intensive application
135 int scm_default_min_yield_2
= 40;
136 #define SCM_CLUSTER_SIZE_2 1000L
138 int scm_default_max_segment_size
= 2097000L;/* a little less (adm) than 2 Mb */
140 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
142 # define SCM_HEAP_SEG_SIZE 32768L
145 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
147 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
150 /* Make heap grow with factor 1.5 */
151 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
152 #define SCM_INIT_MALLOC_LIMIT 100000
153 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
155 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
156 bounds for allocated storage */
159 /*in 386 protected mode we must only adjust the offset */
160 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
161 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
164 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
165 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
167 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
168 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
171 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
172 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
173 #define SCM_HEAP_SIZE \
174 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
175 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
182 typedef struct scm_freelist_t
{
183 /* collected cells */
185 /* number of cells left to collect before cluster is full */
186 unsigned int left_to_collect
;
187 /* number of clusters which have been allocated */
188 unsigned int clusters_allocated
;
189 /* a list of freelists, each of size cluster_size,
190 * except the last one which may be shorter
194 /* this is the number of objects in each cluster, including the spine cell */
196 /* indicates that we should grow heap instead of GC:ing
199 /* minimum yield on this list in order not to grow the heap
202 /* defines min_yield as percent of total heap size
204 int min_yield_fraction
;
205 /* number of cells per object on this list */
207 /* number of collected cells during last GC */
209 /* number of collected cells during penultimate GC */
211 /* total number of cells in heap segments
212 * belonging to this list.
217 SCM scm_freelist
= SCM_EOL
;
218 scm_freelist_t scm_master_freelist
= {
219 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_1
, 0, 0, 0, 1, 0, 0
221 SCM scm_freelist2
= SCM_EOL
;
222 scm_freelist_t scm_master_freelist2
= {
223 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_2
, 0, 0, 0, 2, 0, 0
227 * is the number of bytes of must_malloc allocation needed to trigger gc.
229 unsigned long scm_mtrigger
;
233 * If set, don't expand the heap. Set only during gc, during which no allocation
234 * is supposed to take place anyway.
236 int scm_gc_heap_lock
= 0;
239 * Don't pause for collection if this is set -- just
243 int scm_block_gc
= 1;
245 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
246 * collection (GC) more space is allocated for the heap.
248 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
250 /* During collection, this accumulates objects holding
253 SCM scm_weak_vectors
;
255 /* GC Statistics Keeping
257 unsigned long scm_cells_allocated
= 0;
258 long scm_mallocated
= 0;
259 unsigned long scm_gc_cells_collected
;
260 unsigned long scm_gc_yield
;
261 static unsigned long scm_gc_yield_1
= 0; /* previous GC yield */
262 unsigned long scm_gc_malloc_collected
;
263 unsigned long scm_gc_ports_collected
;
264 unsigned long scm_gc_rt
;
265 unsigned long scm_gc_time_taken
= 0;
267 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
268 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
269 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
270 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
271 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
272 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
274 typedef struct scm_heap_seg_data_t
276 /* lower and upper bounds of the segment */
277 SCM_CELLPTR bounds
[2];
279 /* address of the head-of-freelist pointer for this segment's cells.
280 All segments usually point to the same one, scm_freelist. */
281 scm_freelist_t
*freelist
;
283 /* number of cells per object in this segment */
285 } scm_heap_seg_data_t
;
289 static scm_sizet
init_heap_seg (SCM_CELLPTR
, scm_sizet
, scm_freelist_t
*);
290 static void alloc_some_heap (scm_freelist_t
*);
294 /* Debugging functions. */
296 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
298 /* Return the number of the heap segment containing CELL. */
304 for (i
= 0; i
< scm_n_heap_segs
; i
++)
305 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], SCM2PTR (cell
))
306 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], SCM2PTR (cell
)))
308 fprintf (stderr
, "which_seg: can't find segment containing cell %lx\n",
315 map_free_list (scm_freelist_t
*master
, SCM freelist
)
317 int last_seg
= -1, count
= 0;
320 for (f
= freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
))
322 int this_seg
= which_seg (f
);
324 if (this_seg
!= last_seg
)
327 fprintf (stderr
, " %5d %d-cells in segment %d\n",
328 count
, master
->span
, last_seg
);
335 fprintf (stderr
, " %5d %d-cells in segment %d\n",
336 count
, master
->span
, last_seg
);
339 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
341 "Print debugging information about the free-list.\n"
342 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
343 #define FUNC_NAME s_scm_map_free_list
346 fprintf (stderr
, "%d segments total (%d:%d",
348 scm_heap_table
[0].span
,
349 scm_heap_table
[0].bounds
[1] - scm_heap_table
[0].bounds
[0]);
350 for (i
= 1; i
< scm_n_heap_segs
; i
++)
351 fprintf (stderr
, ", %d:%d",
352 scm_heap_table
[i
].span
,
353 scm_heap_table
[i
].bounds
[1] - scm_heap_table
[i
].bounds
[0]);
354 fprintf (stderr
, ")\n");
355 map_free_list (&scm_master_freelist
, scm_freelist
);
356 map_free_list (&scm_master_freelist2
, scm_freelist2
);
359 return SCM_UNSPECIFIED
;
363 static int last_cluster
;
364 static int last_size
;
367 free_list_length (char *title
, int i
, SCM freelist
)
371 for (ls
= freelist
; SCM_NNULLP (ls
); ls
= SCM_CDR (ls
))
372 if (SCM_CELL_TYPE (ls
) == scm_tc_free_cell
)
376 fprintf (stderr
, "bad cell in %s at position %d\n", title
, n
);
383 if (last_cluster
== i
- 1)
384 fprintf (stderr
, "\t%d\n", last_size
);
386 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
389 fprintf (stderr
, "%s %d", title
, i
);
391 fprintf (stderr
, "%s\t%d\n", title
, n
);
399 free_list_lengths (char *title
, scm_freelist_t
*master
, SCM freelist
)
402 int i
= 0, len
, n
= 0;
403 fprintf (stderr
, "%s\n\n", title
);
404 n
+= free_list_length ("free list", -1, freelist
);
405 for (clusters
= master
->clusters
;
406 SCM_NNULLP (clusters
);
407 clusters
= SCM_CDR (clusters
))
409 len
= free_list_length ("cluster", i
++, SCM_CAR (clusters
));
412 if (last_cluster
== i
- 1)
413 fprintf (stderr
, "\t%d\n", last_size
);
415 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
416 fprintf (stderr
, "\ntotal %d objects\n\n", n
);
419 SCM_DEFINE (scm_free_list_length
, "free-list-length", 0, 0, 0,
421 "Print debugging information about the free-list.\n"
422 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
423 #define FUNC_NAME s_scm_free_list_length
425 free_list_lengths ("1-cells", &scm_master_freelist
, scm_freelist
);
426 free_list_lengths ("2-cells", &scm_master_freelist2
, scm_freelist2
);
427 return SCM_UNSPECIFIED
;
433 #ifdef GUILE_DEBUG_FREELIST
435 /* Number of calls to SCM_NEWCELL since startup. */
436 static unsigned long scm_newcell_count
;
437 static unsigned long scm_newcell2_count
;
439 /* Search freelist for anything that isn't marked as a free cell.
440 Abort if we find something. */
442 scm_check_freelist (SCM freelist
)
447 for (f
= freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
), i
++)
448 if (SCM_CAR (f
) != (SCM
) scm_tc_free_cell
)
450 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
451 scm_newcell_count
, i
);
457 static int scm_debug_check_freelist
= 0;
459 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
461 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
462 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
463 "compile-time flag was selected.\n")
464 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
466 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
467 return SCM_UNSPECIFIED
;
473 scm_debug_newcell (void)
478 if (scm_debug_check_freelist
)
480 scm_check_freelist (scm_freelist
);
484 /* The rest of this is supposed to be identical to the SCM_NEWCELL
486 if (SCM_IMP (scm_freelist
))
487 new = scm_gc_for_newcell (&scm_master_freelist
, &scm_freelist
);
491 scm_freelist
= SCM_CDR (scm_freelist
);
492 SCM_SETCAR (new, scm_tc16_allocated
);
499 scm_debug_newcell2 (void)
503 scm_newcell2_count
++;
504 if (scm_debug_check_freelist
)
506 scm_check_freelist (scm_freelist2
);
510 /* The rest of this is supposed to be identical to the SCM_NEWCELL
512 if (SCM_IMP (scm_freelist2
))
513 new = scm_gc_for_newcell (&scm_master_freelist2
, &scm_freelist2
);
517 scm_freelist2
= SCM_CDR (scm_freelist2
);
518 SCM_SETCAR (new, scm_tc16_allocated
);
524 #endif /* GUILE_DEBUG_FREELIST */
529 master_cells_allocated (scm_freelist_t
*master
)
531 int objects
= master
->clusters_allocated
* (master
->cluster_size
- 1);
532 if (SCM_NULLP (master
->clusters
))
533 objects
-= master
->left_to_collect
;
534 return master
->span
* objects
;
538 freelist_length (SCM freelist
)
541 for (n
= 0; SCM_NNULLP (freelist
); freelist
= SCM_CDR (freelist
))
547 compute_cells_allocated ()
549 return (scm_cells_allocated
550 + master_cells_allocated (&scm_master_freelist
)
551 + master_cells_allocated (&scm_master_freelist2
)
552 - scm_master_freelist
.span
* freelist_length (scm_freelist
)
553 - scm_master_freelist2
.span
* freelist_length (scm_freelist2
));
556 /* {Scheme Interface to GC}
559 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
561 "Returns an association list of statistics about Guile's current use of storage. ")
562 #define FUNC_NAME s_scm_gc_stats
567 long int local_scm_mtrigger
;
568 long int local_scm_mallocated
;
569 long int local_scm_heap_size
;
570 long int local_scm_cells_allocated
;
571 long int local_scm_gc_time_taken
;
581 for (i
= scm_n_heap_segs
; i
--; )
582 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
583 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
585 if (scm_n_heap_segs
!= n
)
590 /* Below, we cons to produce the resulting list. We want a snapshot of
591 * the heap situation before consing.
593 local_scm_mtrigger
= scm_mtrigger
;
594 local_scm_mallocated
= scm_mallocated
;
595 local_scm_heap_size
= SCM_HEAP_SIZE
;
596 local_scm_cells_allocated
= compute_cells_allocated ();
597 local_scm_gc_time_taken
= scm_gc_time_taken
;
599 answer
= scm_listify (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
600 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
601 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
602 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
603 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
604 scm_cons (sym_heap_segments
, heap_segs
),
613 scm_gc_start (const char *what
)
615 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ());
616 scm_gc_cells_collected
= 0;
617 scm_gc_yield_1
= scm_gc_yield
;
618 scm_gc_yield
= (scm_cells_allocated
619 + master_cells_allocated (&scm_master_freelist
)
620 + master_cells_allocated (&scm_master_freelist2
));
621 scm_gc_malloc_collected
= 0;
622 scm_gc_ports_collected
= 0;
629 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt
;
630 scm_gc_time_taken
+= scm_gc_rt
;
634 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
636 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
637 "returned by this function for @var{obj}")
638 #define FUNC_NAME s_scm_object_address
640 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj
));
645 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
647 "Scans all of SCM objects and reclaims for further use those that are\n"
648 "no longer accessible.")
649 #define FUNC_NAME s_scm_gc
654 return SCM_UNSPECIFIED
;
660 /* {C Interface For When GC is Triggered}
664 adjust_min_yield (scm_freelist_t
*freelist
)
666 /* min yield is adjusted upwards so that next predicted total yield
667 * (allocated cells actually freed by GC) becomes
668 * `min_yield_fraction' of total heap size. Note, however, that
669 * the absolute value of min_yield will correspond to `collected'
670 * on one master (the one which currently is triggering GC).
672 * The reason why we look at total yield instead of cells collected
673 * on one list is that we want to take other freelists into account.
674 * On this freelist, we know that (local) yield = collected cells,
675 * but that's probably not the case on the other lists.
677 * (We might consider computing a better prediction, for example
678 * by computing an average over multiple GC:s.)
680 if (freelist
->min_yield_fraction
)
682 /* Pick largest of last two yields. */
683 int delta
= ((SCM_HEAP_SIZE
* freelist
->min_yield_fraction
/ 100)
684 - (long) SCM_MAX (scm_gc_yield_1
, scm_gc_yield
));
686 fprintf (stderr
, " after GC = %d, delta = %d\n",
691 freelist
->min_yield
+= delta
;
695 /* When we get POSIX threads support, the master will be global and
696 * common while the freelist will be individual for each thread.
700 scm_gc_for_newcell (scm_freelist_t
*master
, SCM
*freelist
)
706 if (SCM_NULLP (master
->clusters
))
708 if (master
->grow_heap_p
)
710 master
->grow_heap_p
= 0;
711 alloc_some_heap (master
);
716 fprintf (stderr
, "allocated = %d, ",
718 + master_cells_allocated (&scm_master_freelist
)
719 + master_cells_allocated (&scm_master_freelist2
));
722 adjust_min_yield (master
);
725 cell
= SCM_CAR (master
->clusters
);
726 master
->clusters
= SCM_CDR (master
->clusters
);
727 ++master
->clusters_allocated
;
729 while (SCM_NULLP (cell
));
731 *freelist
= SCM_CDR (cell
);
732 SCM_SET_CELL_TYPE (cell
, scm_tc16_allocated
);
737 /* This is a support routine which can be used to reserve a cluster
738 * for some special use, such as debugging. It won't be useful until
739 * free cells are preserved between garbage collections.
743 scm_alloc_cluster (scm_freelist_t
*master
)
746 cell
= scm_gc_for_newcell (master
, &freelist
);
747 SCM_SETCDR (cell
, freelist
);
753 scm_c_hook_t scm_before_gc_c_hook
;
754 scm_c_hook_t scm_before_mark_c_hook
;
755 scm_c_hook_t scm_before_sweep_c_hook
;
756 scm_c_hook_t scm_after_sweep_c_hook
;
757 scm_c_hook_t scm_after_gc_c_hook
;
760 scm_igc (const char *what
)
764 scm_c_hook_run (&scm_before_gc_c_hook
, 0);
767 SCM_NULLP (scm_freelist
)
769 : (SCM_NULLP (scm_freelist2
) ? "o" : "m"));
772 /* During the critical section, only the current thread may run. */
773 SCM_THREAD_CRITICAL_SECTION_START
;
776 /* fprintf (stderr, "gc: %s\n", what); */
780 if (!scm_stack_base
|| scm_block_gc
)
786 if (scm_mallocated
< 0)
787 /* The byte count of allocated objects has underflowed. This is
788 probably because you forgot to report the sizes of objects you
789 have allocated, by calling scm_done_malloc or some such. When
790 the GC freed them, it subtracted their size from
791 scm_mallocated, which underflowed. */
794 if (scm_gc_heap_lock
)
795 /* We've invoked the collector while a GC is already in progress.
796 That should never happen. */
801 /* flush dead entries from the continuation stack */
806 elts
= SCM_VELTS (scm_continuation_stack
);
807 bound
= SCM_LENGTH (scm_continuation_stack
);
808 x
= SCM_INUM (scm_continuation_stack_ptr
);
811 elts
[x
] = SCM_BOOL_F
;
816 scm_c_hook_run (&scm_before_mark_c_hook
, 0);
820 /* Protect from the C stack. This must be the first marking
821 * done because it provides information about what objects
822 * are "in-use" by the C code. "in-use" objects are those
823 * for which the values from SCM_LENGTH and SCM_CHARS must remain
824 * usable. This requirement is stricter than a liveness
825 * requirement -- in particular, it constrains the implementation
826 * of scm_vector_set_length_x.
828 SCM_FLUSH_REGISTER_WINDOWS
;
829 /* This assumes that all registers are saved into the jmp_buf */
830 setjmp (scm_save_regs_gc_mark
);
831 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
832 ( (scm_sizet
) (sizeof (SCM_STACKITEM
) - 1 +
833 sizeof scm_save_regs_gc_mark
)
834 / sizeof (SCM_STACKITEM
)));
837 scm_sizet stack_len
= scm_stack_size (scm_stack_base
);
838 #ifdef SCM_STACK_GROWS_UP
839 scm_mark_locations (scm_stack_base
, stack_len
);
841 scm_mark_locations (scm_stack_base
- stack_len
, stack_len
);
845 #else /* USE_THREADS */
847 /* Mark every thread's stack and registers */
848 scm_threads_mark_stacks ();
850 #endif /* USE_THREADS */
852 /* FIXME: insert a phase to un-protect string-data preserved
853 * in scm_vector_set_length_x.
856 j
= SCM_NUM_PROTECTS
;
858 scm_gc_mark (scm_sys_protects
[j
]);
860 /* FIXME: we should have a means to register C functions to be run
861 * in different phases of GC
863 scm_mark_subr_table ();
866 scm_gc_mark (scm_root
->handle
);
869 scm_c_hook_run (&scm_before_sweep_c_hook
, 0);
873 scm_c_hook_run (&scm_after_sweep_c_hook
, 0);
879 SCM_THREAD_CRITICAL_SECTION_END
;
881 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
891 /* Mark an object precisely.
906 if (SCM_NCELLP (ptr
))
907 scm_wta (ptr
, "rogue pointer in heap", NULL
);
909 switch (SCM_TYP7 (ptr
))
911 case scm_tcs_cons_nimcar
:
912 if (SCM_GCMARKP (ptr
))
915 if (SCM_IMP (SCM_CDR (ptr
))) /* SCM_IMP works even with a GC mark */
920 scm_gc_mark (SCM_CAR (ptr
));
921 ptr
= SCM_GCCDR (ptr
);
923 case scm_tcs_cons_imcar
:
924 if (SCM_GCMARKP (ptr
))
927 ptr
= SCM_GCCDR (ptr
);
930 if (SCM_GCMARKP (ptr
))
933 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr
));
934 ptr
= SCM_GCCDR (ptr
);
936 case scm_tcs_cons_gloc
:
937 if (SCM_GCMARKP (ptr
))
941 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
942 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
943 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
944 * pointer to a struct vtable data region. The fact that these are
945 * accessed in the same way restricts the possibilites to change the
946 * data layout of structs or heap cells.
948 scm_bits_t word0
= SCM_CELL_WORD_0 (ptr
) - scm_tc3_cons_gloc
;
949 scm_bits_t
* vtable_data
= (scm_bits_t
*) word0
; /* access as struct */
950 switch (vtable_data
[scm_vtable_index_vcell
])
955 SCM gloc_car
= SCM_PACK (word0
);
956 scm_gc_mark (gloc_car
);
957 ptr
= SCM_GCCDR (ptr
);
963 /* ptr is a struct */
964 SCM layout
= SCM_PACK (vtable_data
[scm_vtable_index_layout
]);
965 int len
= SCM_LENGTH (layout
);
966 char * fields_desc
= SCM_CHARS (layout
);
967 /* We're using SCM_GCCDR here like STRUCT_DATA, except
968 that it removes the mark */
969 scm_bits_t
* struct_data
= (scm_bits_t
*) SCM_UNPACK (SCM_GCCDR (ptr
));
971 if (vtable_data
[scm_struct_i_flags
] & SCM_STRUCTF_ENTITY
)
973 scm_gc_mark (SCM_PACK (struct_data
[scm_struct_i_procedure
]));
974 scm_gc_mark (SCM_PACK (struct_data
[scm_struct_i_setter
]));
980 for (x
= 0; x
< len
- 2; x
+= 2, ++struct_data
)
981 if (fields_desc
[x
] == 'p')
982 scm_gc_mark (SCM_PACK (*struct_data
));
983 if (fields_desc
[x
] == 'p')
985 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
986 for (x
= *struct_data
; x
; --x
)
987 scm_gc_mark (SCM_PACK (*++struct_data
));
989 scm_gc_mark (SCM_PACK (*struct_data
));
992 if (vtable_data
[scm_vtable_index_vcell
] == 0)
994 vtable_data
[scm_vtable_index_vcell
] = 1;
995 ptr
= SCM_PACK (vtable_data
[scm_vtable_index_vtable
]);
1002 case scm_tcs_closures
:
1003 if (SCM_GCMARKP (ptr
))
1005 SCM_SETGCMARK (ptr
);
1006 if (SCM_IMP (SCM_CDR (ptr
)))
1008 ptr
= SCM_CLOSCAR (ptr
);
1011 scm_gc_mark (SCM_CLOSCAR (ptr
));
1012 ptr
= SCM_GCCDR (ptr
);
1014 case scm_tc7_vector
:
1015 case scm_tc7_lvector
:
1019 if (SCM_GC8MARKP (ptr
))
1021 SCM_SETGC8MARK (ptr
);
1022 i
= SCM_LENGTH (ptr
);
1026 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
1027 scm_gc_mark (SCM_VELTS (ptr
)[i
]);
1028 ptr
= SCM_VELTS (ptr
)[0];
1030 case scm_tc7_contin
:
1033 SCM_SETGC8MARK (ptr
);
1034 if (SCM_VELTS (ptr
))
1035 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr
),
1038 (sizeof (SCM_STACKITEM
) + -1 +
1039 sizeof (scm_contregs
)) /
1040 sizeof (SCM_STACKITEM
)));
1044 case scm_tc7_byvect
:
1051 #ifdef HAVE_LONG_LONGS
1052 case scm_tc7_llvect
:
1055 case scm_tc7_string
:
1056 SCM_SETGC8MARK (ptr
);
1059 case scm_tc7_substring
:
1060 if (SCM_GC8MARKP(ptr
))
1062 SCM_SETGC8MARK (ptr
);
1063 ptr
= SCM_CDR (ptr
);
1067 if (SCM_GC8MARKP(ptr
))
1069 SCM_WVECT_GC_CHAIN (ptr
) = scm_weak_vectors
;
1070 scm_weak_vectors
= ptr
;
1071 SCM_SETGC8MARK (ptr
);
1072 if (SCM_IS_WHVEC_ANY (ptr
))
1079 len
= SCM_LENGTH (ptr
);
1080 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
1081 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
1083 for (x
= 0; x
< len
; ++x
)
1086 alist
= SCM_VELTS (ptr
)[x
];
1088 /* mark everything on the alist except the keys or
1089 * values, according to weak_values and weak_keys. */
1090 while ( SCM_CONSP (alist
)
1091 && !SCM_GCMARKP (alist
)
1092 && SCM_CONSP (SCM_CAR (alist
)))
1097 kvpair
= SCM_CAR (alist
);
1098 next_alist
= SCM_CDR (alist
);
1101 * SCM_SETGCMARK (alist);
1102 * SCM_SETGCMARK (kvpair);
1104 * It may be that either the key or value is protected by
1105 * an escaped reference to part of the spine of this alist.
1106 * If we mark the spine here, and only mark one or neither of the
1107 * key and value, they may never be properly marked.
1108 * This leads to a horrible situation in which an alist containing
1109 * freelist cells is exported.
1111 * So only mark the spines of these arrays last of all marking.
1112 * If somebody confuses us by constructing a weak vector
1113 * with a circular alist then we are hosed, but at least we
1114 * won't prematurely drop table entries.
1117 scm_gc_mark (SCM_CAR (kvpair
));
1119 scm_gc_mark (SCM_GCCDR (kvpair
));
1122 if (SCM_NIMP (alist
))
1123 scm_gc_mark (alist
);
1128 case scm_tc7_msymbol
:
1129 if (SCM_GC8MARKP(ptr
))
1131 SCM_SETGC8MARK (ptr
);
1132 scm_gc_mark (SCM_SYMBOL_FUNC (ptr
));
1133 ptr
= SCM_SYMBOL_PROPS (ptr
);
1135 case scm_tc7_ssymbol
:
1136 if (SCM_GC8MARKP(ptr
))
1138 SCM_SETGC8MARK (ptr
);
1143 i
= SCM_PTOBNUM (ptr
);
1144 if (!(i
< scm_numptob
))
1146 if (SCM_GC8MARKP (ptr
))
1148 SCM_SETGC8MARK (ptr
);
1149 if (SCM_PTAB_ENTRY(ptr
))
1150 scm_gc_mark (SCM_PTAB_ENTRY(ptr
)->file_name
);
1151 if (scm_ptobs
[i
].mark
)
1153 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1160 if (SCM_GC8MARKP (ptr
))
1162 SCM_SETGC8MARK (ptr
);
1163 switch (SCM_GCTYP16 (ptr
))
1164 { /* should be faster than going through scm_smobs */
1165 case scm_tc_free_cell
:
1166 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1167 case scm_tc16_allocated
:
1170 case scm_tc16_complex
:
1173 i
= SCM_SMOBNUM (ptr
);
1174 if (!(i
< scm_numsmob
))
1176 if (scm_smobs
[i
].mark
)
1178 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1186 def
:scm_wta (ptr
, "unknown type in ", "gc_mark");
1191 /* Mark a Region Conservatively
1195 scm_mark_locations (SCM_STACKITEM x
[], scm_sizet n
)
1197 register long m
= n
;
1199 register SCM_CELLPTR ptr
;
1202 if (SCM_CELLP (* (SCM
*) &x
[m
]))
1204 ptr
= SCM2PTR (* (SCM
*) &x
[m
]);
1206 j
= scm_n_heap_segs
- 1;
1207 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1208 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1215 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
1217 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1225 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1229 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1234 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1238 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1244 if (scm_heap_table
[seg_id
].span
== 1
1245 || SCM_DOUBLE_CELLP (* (SCM
*) &x
[m
]))
1246 scm_gc_mark (* (SCM
*) &x
[m
]);
1255 /* The function scm_cellp determines whether an SCM value can be regarded as a
1256 * pointer to a cell on the heap. Binary search is used in order to determine
1257 * the heap segment that contains the cell.
1260 scm_cellp (SCM value
)
1262 if (SCM_CELLP (value
)) {
1263 scm_cell
* ptr
= SCM2PTR (value
);
1265 unsigned int j
= scm_n_heap_segs
- 1;
1268 int k
= (i
+ j
) / 2;
1269 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
)) {
1271 } else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
)) {
1276 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1277 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
)
1278 && (scm_heap_table
[i
].span
== 1 || SCM_DOUBLE_CELLP (value
))) {
1290 gc_sweep_freelist_start (scm_freelist_t
*freelist
)
1292 freelist
->cells
= SCM_EOL
;
1293 freelist
->left_to_collect
= freelist
->cluster_size
;
1294 freelist
->clusters_allocated
= 0;
1295 freelist
->clusters
= SCM_EOL
;
1296 freelist
->clustertail
= &freelist
->clusters
;
1297 freelist
->collected_1
= freelist
->collected
;
1298 freelist
->collected
= 0;
1302 gc_sweep_freelist_finish (scm_freelist_t
*freelist
)
1305 *freelist
->clustertail
= freelist
->cells
;
1306 if (SCM_NNULLP (freelist
->cells
))
1308 SCM c
= freelist
->cells
;
1309 SCM_SETCAR (c
, SCM_CDR (c
));
1310 SCM_SETCDR (c
, SCM_EOL
);
1311 freelist
->collected
+=
1312 freelist
->span
* (freelist
->cluster_size
- freelist
->left_to_collect
);
1314 scm_gc_cells_collected
+= freelist
->collected
;
1316 /* Although freelist->min_yield is used to test freelist->collected
1317 * (which is the local GC yield for freelist), it is adjusted so
1318 * that *total* yield is freelist->min_yield_fraction of total heap
1319 * size. This means that a too low yield is compensated by more
1320 * heap on the list which is currently doing most work, which is
1321 * just what we want.
1323 collected
= SCM_MAX (freelist
->collected_1
, freelist
->collected
);
1324 freelist
->grow_heap_p
= (collected
< freelist
->min_yield
);
1330 register SCM_CELLPTR ptr
;
1331 register SCM nfreelist
;
1332 register scm_freelist_t
*freelist
;
1340 gc_sweep_freelist_start (&scm_master_freelist
);
1341 gc_sweep_freelist_start (&scm_master_freelist2
);
1343 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1345 register unsigned int left_to_collect
;
1346 register scm_sizet j
;
1348 /* Unmarked cells go onto the front of the freelist this heap
1349 segment points to. Rather than updating the real freelist
1350 pointer as we go along, we accumulate the new head in
1351 nfreelist. Then, if it turns out that the entire segment is
1352 free, we free (i.e., malloc's free) the whole segment, and
1353 simply don't assign nfreelist back into the real freelist. */
1354 freelist
= scm_heap_table
[i
].freelist
;
1355 nfreelist
= freelist
->cells
;
1356 left_to_collect
= freelist
->left_to_collect
;
1357 span
= scm_heap_table
[i
].span
;
1359 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0], span
);
1360 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1], span
) - ptr
;
1361 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1363 SCM scmptr
= PTR2SCM (ptr
);
1365 switch SCM_TYP7 (scmptr
)
1367 case scm_tcs_cons_gloc
:
1369 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1370 * struct or a gloc. See the corresponding comment in
1373 scm_bits_t word0
= SCM_CELL_WORD_0 (scmptr
) - scm_tc3_cons_gloc
;
1374 scm_bits_t
* vtable_data
= (scm_bits_t
*) word0
; /* access as struct */
1375 if (SCM_GCMARKP (scmptr
))
1377 if (vtable_data
[scm_vtable_index_vcell
] == 1)
1378 vtable_data
[scm_vtable_index_vcell
] = 0;
1383 if (vtable_data
[scm_vtable_index_vcell
] == 0
1384 || vtable_data
[scm_vtable_index_vcell
] == 1)
1386 scm_struct_free_t free
1387 = (scm_struct_free_t
) vtable_data
[scm_struct_i_free
];
1388 m
+= free (vtable_data
, (scm_bits_t
*) SCM_UNPACK (SCM_GCCDR (scmptr
)));
1393 case scm_tcs_cons_imcar
:
1394 case scm_tcs_cons_nimcar
:
1395 case scm_tcs_closures
:
1397 if (SCM_GCMARKP (scmptr
))
1401 if (SCM_GC8MARKP (scmptr
))
1407 m
+= (2 + SCM_LENGTH (scmptr
)) * sizeof (SCM
);
1408 scm_must_free ((char *)(SCM_VELTS (scmptr
) - 2));
1412 case scm_tc7_vector
:
1413 case scm_tc7_lvector
:
1417 if (SCM_GC8MARKP (scmptr
))
1420 m
+= (SCM_LENGTH (scmptr
) * sizeof (SCM
));
1422 scm_must_free (SCM_CHARS (scmptr
));
1423 /* SCM_SETCHARS(scmptr, 0);*/
1427 if SCM_GC8MARKP (scmptr
)
1429 m
+= sizeof (long) * ((SCM_HUGE_LENGTH (scmptr
) + SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1431 case scm_tc7_byvect
:
1432 if SCM_GC8MARKP (scmptr
)
1434 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (char);
1438 if SCM_GC8MARKP (scmptr
)
1440 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long);
1443 if SCM_GC8MARKP (scmptr
)
1445 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (short);
1447 #ifdef HAVE_LONG_LONGS
1448 case scm_tc7_llvect
:
1449 if SCM_GC8MARKP (scmptr
)
1451 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long_long
);
1455 if SCM_GC8MARKP (scmptr
)
1457 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (float);
1460 if SCM_GC8MARKP (scmptr
)
1462 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (double);
1465 if SCM_GC8MARKP (scmptr
)
1467 m
+= SCM_HUGE_LENGTH (scmptr
) * 2 * sizeof (double);
1470 case scm_tc7_substring
:
1471 if (SCM_GC8MARKP (scmptr
))
1474 case scm_tc7_string
:
1475 if (SCM_GC8MARKP (scmptr
))
1477 m
+= SCM_HUGE_LENGTH (scmptr
) + 1;
1479 case scm_tc7_msymbol
:
1480 if (SCM_GC8MARKP (scmptr
))
1482 m
+= (SCM_LENGTH (scmptr
) + 1
1483 + (SCM_CHARS (scmptr
) - (char *) SCM_SLOTS (scmptr
)));
1484 scm_must_free ((char *)SCM_SLOTS (scmptr
));
1486 case scm_tc7_contin
:
1487 if SCM_GC8MARKP (scmptr
)
1489 m
+= SCM_LENGTH (scmptr
) * sizeof (SCM_STACKITEM
) + sizeof (scm_contregs
);
1490 if (SCM_VELTS (scmptr
))
1492 case scm_tc7_ssymbol
:
1493 if SCM_GC8MARKP(scmptr
)
1499 if SCM_GC8MARKP (scmptr
)
1501 if SCM_OPENP (scmptr
)
1503 int k
= SCM_PTOBNUM (scmptr
);
1504 if (!(k
< scm_numptob
))
1506 /* Keep "revealed" ports alive. */
1507 if (scm_revealed_count (scmptr
) > 0)
1509 /* Yes, I really do mean scm_ptobs[k].free */
1510 /* rather than ftobs[k].close. .close */
1511 /* is for explicit CLOSE-PORT by user */
1512 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1513 SCM_SETSTREAM (scmptr
, 0);
1514 scm_remove_from_port_table (scmptr
);
1515 scm_gc_ports_collected
++;
1516 SCM_SETAND_CAR (scmptr
, ~SCM_OPN
);
1520 switch SCM_GCTYP16 (scmptr
)
1522 case scm_tc_free_cell
:
1524 if SCM_GC8MARKP (scmptr
)
1529 if SCM_GC8MARKP (scmptr
)
1531 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1533 #endif /* def SCM_BIGDIG */
1534 case scm_tc16_complex
:
1535 if SCM_GC8MARKP (scmptr
)
1537 m
+= 2 * sizeof (double);
1540 if SCM_GC8MARKP (scmptr
)
1545 k
= SCM_SMOBNUM (scmptr
);
1546 if (!(k
< scm_numsmob
))
1548 m
+= (scm_smobs
[k
].free
) (scmptr
);
1554 sweeperr
:scm_wta (scmptr
, "unknown type in ", "gc_sweep");
1557 if (SCM_CAR (scmptr
) == (SCM
) scm_tc_free_cell
)
1560 if (!--left_to_collect
)
1562 SCM_SETCAR (scmptr
, nfreelist
);
1563 *freelist
->clustertail
= scmptr
;
1564 freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1566 nfreelist
= SCM_EOL
;
1567 freelist
->collected
+= span
* freelist
->cluster_size
;
1568 left_to_collect
= freelist
->cluster_size
;
1572 /* Stick the new cell on the front of nfreelist. It's
1573 critical that we mark this cell as freed; otherwise, the
1574 conservative collector might trace it as some other type
1576 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1577 SCM_SETCDR (scmptr
, nfreelist
);
1583 SCM_CLRGC8MARK (scmptr
);
1586 SCM_CLRGCMARK (scmptr
);
1588 #ifdef GC_FREE_SEGMENTS
1593 freelist
->heap_size
-= seg_size
;
1594 free ((char *) scm_heap_table
[i
].bounds
[0]);
1595 scm_heap_table
[i
].bounds
[0] = 0;
1596 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1597 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1598 scm_n_heap_segs
-= 1;
1599 i
--; /* We need to scan the segment just moved. */
1602 #endif /* ifdef GC_FREE_SEGMENTS */
1604 /* Update the real freelist pointer to point to the head of
1605 the list of free cells we've built for this segment. */
1606 freelist
->cells
= nfreelist
;
1607 freelist
->left_to_collect
= left_to_collect
;
1610 #ifdef GUILE_DEBUG_FREELIST
1611 scm_check_freelist (freelist
== &scm_master_freelist
1614 scm_map_free_list ();
1618 gc_sweep_freelist_finish (&scm_master_freelist
);
1619 gc_sweep_freelist_finish (&scm_master_freelist2
);
1621 /* When we move to POSIX threads private freelists should probably
1622 be GC-protected instead. */
1623 scm_freelist
= SCM_EOL
;
1624 scm_freelist2
= SCM_EOL
;
1626 scm_cells_allocated
= (SCM_HEAP_SIZE
- scm_gc_cells_collected
);
1627 scm_gc_yield
-= scm_cells_allocated
;
1628 scm_mallocated
-= m
;
1629 scm_gc_malloc_collected
= m
;
1635 /* {Front end to malloc}
1637 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1639 * These functions provide services comperable to malloc, realloc, and
1640 * free. They are for allocating malloced parts of scheme objects.
1641 * The primary purpose of the front end is to impose calls to gc.
1646 * Return newly malloced storage or throw an error.
1648 * The parameter WHAT is a string for error reporting.
1649 * If the threshold scm_mtrigger will be passed by this
1650 * allocation, or if the first call to malloc fails,
1651 * garbage collect -- on the presumption that some objects
1652 * using malloced storage may be collected.
1654 * The limit scm_mtrigger may be raised by this allocation.
1657 scm_must_malloc (scm_sizet size
, const char *what
)
1660 unsigned long nm
= scm_mallocated
+ size
;
1662 if (nm
<= scm_mtrigger
)
1664 SCM_SYSCALL (ptr
= malloc (size
));
1667 scm_mallocated
= nm
;
1668 #ifdef GUILE_DEBUG_MALLOC
1669 scm_malloc_register (ptr
, what
);
1677 nm
= scm_mallocated
+ size
;
1678 SCM_SYSCALL (ptr
= malloc (size
));
1681 scm_mallocated
= nm
;
1682 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1683 if (nm
> scm_mtrigger
)
1684 scm_mtrigger
= nm
+ nm
/ 2;
1686 scm_mtrigger
+= scm_mtrigger
/ 2;
1688 #ifdef GUILE_DEBUG_MALLOC
1689 scm_malloc_register (ptr
, what
);
1695 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1696 return 0; /* never reached */
1701 * is similar to scm_must_malloc.
1704 scm_must_realloc (void *where
,
1710 scm_sizet nm
= scm_mallocated
+ size
- old_size
;
1712 if (nm
<= scm_mtrigger
)
1714 SCM_SYSCALL (ptr
= realloc (where
, size
));
1717 scm_mallocated
= nm
;
1718 #ifdef GUILE_DEBUG_MALLOC
1719 scm_malloc_reregister (where
, ptr
, what
);
1727 nm
= scm_mallocated
+ size
- old_size
;
1728 SCM_SYSCALL (ptr
= realloc (where
, size
));
1731 scm_mallocated
= nm
;
1732 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1733 if (nm
> scm_mtrigger
)
1734 scm_mtrigger
= nm
+ nm
/ 2;
1736 scm_mtrigger
+= scm_mtrigger
/ 2;
1738 #ifdef GUILE_DEBUG_MALLOC
1739 scm_malloc_reregister (where
, ptr
, what
);
1744 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1745 return 0; /* never reached */
1749 scm_must_free (void *obj
)
1751 #ifdef GUILE_DEBUG_MALLOC
1752 scm_malloc_unregister (obj
);
1757 scm_wta (SCM_INUM0
, "already free", "");
1760 /* Announce that there has been some malloc done that will be freed
1761 * during gc. A typical use is for a smob that uses some malloced
1762 * memory but can not get it from scm_must_malloc (for whatever
1763 * reason). When a new object of this smob is created you call
1764 * scm_done_malloc with the size of the object. When your smob free
1765 * function is called, be sure to include this size in the return
1769 scm_done_malloc (long size
)
1771 scm_mallocated
+= size
;
1773 if (scm_mallocated
> scm_mtrigger
)
1775 scm_igc ("foreign mallocs");
1776 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
1778 if (scm_mallocated
> scm_mtrigger
)
1779 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
1781 scm_mtrigger
+= scm_mtrigger
/ 2;
1791 * Each heap segment is an array of objects of a particular size.
1792 * Every segment has an associated (possibly shared) freelist.
1793 * A table of segment records is kept that records the upper and
1794 * lower extents of the segment; this is used during the conservative
1795 * phase of gc to identify probably gc roots (because they point
1796 * into valid segments at reasonable offsets). */
1799 * is true if the first segment was smaller than INIT_HEAP_SEG.
1800 * If scm_expmem is set to one, subsequent segment allocations will
1801 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1805 scm_sizet scm_max_segment_size
;
1808 * is the lowest base address of any heap segment.
1810 SCM_CELLPTR scm_heap_org
;
1812 scm_heap_seg_data_t
* scm_heap_table
= 0;
1813 int scm_n_heap_segs
= 0;
1816 * initializes a new heap segment and return the number of objects it contains.
1818 * The segment origin, segment size in bytes, and the span of objects
1819 * in cells are input parameters. The freelist is both input and output.
1821 * This function presume that the scm_heap_table has already been expanded
1822 * to accomodate a new segment record.
1827 init_heap_seg (SCM_CELLPTR seg_org
, scm_sizet size
, scm_freelist_t
*freelist
)
1829 register SCM_CELLPTR ptr
;
1830 SCM_CELLPTR seg_end
;
1833 int span
= freelist
->span
;
1835 if (seg_org
== NULL
)
1838 ptr
= CELL_UP (seg_org
, span
);
1840 /* Compute the ceiling on valid object pointers w/in this segment.
1842 seg_end
= CELL_DN ((char *) seg_org
+ size
, span
);
1844 /* Find the right place and insert the segment record.
1847 for (new_seg_index
= 0;
1848 ( (new_seg_index
< scm_n_heap_segs
)
1849 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
));
1855 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
1856 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
1861 scm_heap_table
[new_seg_index
].span
= span
;
1862 scm_heap_table
[new_seg_index
].freelist
= freelist
;
1863 scm_heap_table
[new_seg_index
].bounds
[0] = ptr
;
1864 scm_heap_table
[new_seg_index
].bounds
[1] = seg_end
;
1867 /* Compute the least valid object pointer w/in this segment
1869 ptr
= CELL_UP (ptr
, span
);
1873 n_new_cells
= seg_end
- ptr
;
1875 freelist
->heap_size
+= n_new_cells
;
1877 /* Partition objects in this segment into clusters */
1880 SCM
*clusterp
= &clusters
;
1881 int n_cluster_cells
= span
* freelist
->cluster_size
;
1883 while (n_new_cells
> span
) /* at least one spine + one freecell */
1885 /* Determine end of cluster
1887 if (n_new_cells
>= n_cluster_cells
)
1889 seg_end
= ptr
+ n_cluster_cells
;
1890 n_new_cells
-= n_cluster_cells
;
1893 /* [cmm] looks like the segment size doesn't divide cleanly by
1894 cluster size. bad cmm! */
1897 /* Allocate cluster spine
1899 *clusterp
= PTR2SCM (ptr
);
1900 SCM_SETCAR (*clusterp
, PTR2SCM (ptr
+ span
));
1901 clusterp
= SCM_CDRLOC (*clusterp
);
1904 while (ptr
< seg_end
)
1906 SCM scmptr
= PTR2SCM (ptr
);
1908 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1909 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ span
));
1913 SCM_SETCDR (PTR2SCM (ptr
- span
), SCM_EOL
);
1916 /* Patch up the last cluster pointer in the segment
1917 * to join it to the input freelist.
1919 *clusterp
= freelist
->clusters
;
1920 freelist
->clusters
= clusters
;
1924 fprintf (stderr
, "H");
1930 round_to_cluster_size (scm_freelist_t
*freelist
, scm_sizet len
)
1932 scm_sizet cluster_size_in_bytes
= CLUSTER_SIZE_IN_BYTES (freelist
);
1935 (len
+ cluster_size_in_bytes
- 1) / cluster_size_in_bytes
* cluster_size_in_bytes
1936 + ALIGNMENT_SLACK (freelist
);
1940 alloc_some_heap (scm_freelist_t
*freelist
)
1942 scm_heap_seg_data_t
* tmptable
;
1946 /* Critical code sections (such as the garbage collector)
1947 * aren't supposed to add heap segments.
1949 if (scm_gc_heap_lock
)
1950 scm_wta (SCM_UNDEFINED
, "need larger initial", "heap");
1952 /* Expand the heap tables to have room for the new segment.
1953 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1954 * only if the allocation of the segment itself succeeds.
1956 len
= (1 + scm_n_heap_segs
) * sizeof (scm_heap_seg_data_t
);
1958 SCM_SYSCALL (tmptable
= ((scm_heap_seg_data_t
*)
1959 realloc ((char *)scm_heap_table
, len
)));
1961 scm_wta (SCM_UNDEFINED
, "could not grow", "hplims");
1963 scm_heap_table
= tmptable
;
1966 /* Pick a size for the new heap segment.
1967 * The rule for picking the size of a segment is explained in
1971 /* Assure that the new segment is predicted to be large enough.
1973 * New yield should at least equal GC fraction of new heap size, i.e.
1975 * y + dh > f * (h + dh)
1978 * f : min yield fraction
1980 * dh : size of new heap segment
1982 * This gives dh > (f * h - y) / (1 - f)
1984 int f
= freelist
->min_yield_fraction
;
1985 long h
= SCM_HEAP_SIZE
;
1986 long min_cells
= (f
* h
- 100 * (long) scm_gc_yield
) / (99 - f
);
1987 len
= SCM_EXPHEAP (freelist
->heap_size
);
1989 fprintf (stderr
, "(%d < %d)", len
, min_cells
);
1991 if (len
< min_cells
)
1992 len
= min_cells
+ freelist
->cluster_size
;
1993 len
*= sizeof (scm_cell
);
1994 /* force new sampling */
1995 freelist
->collected
= LONG_MAX
;
1998 if (len
> scm_max_segment_size
)
1999 len
= scm_max_segment_size
;
2004 smallest
= CLUSTER_SIZE_IN_BYTES (freelist
);
2009 /* Allocate with decaying ambition. */
2010 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2011 && (len
>= smallest
))
2013 scm_sizet rounded_len
= round_to_cluster_size (freelist
, len
);
2014 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (rounded_len
));
2017 init_heap_seg (ptr
, rounded_len
, freelist
);
2024 scm_wta (SCM_UNDEFINED
, "could not grow", "heap");
2028 SCM_DEFINE (scm_unhash_name
, "unhash-name", 1, 0, 0,
2031 #define FUNC_NAME s_scm_unhash_name
2035 SCM_VALIDATE_SYMBOL (1,name
);
2037 bound
= scm_n_heap_segs
;
2038 for (x
= 0; x
< bound
; ++x
)
2042 p
= scm_heap_table
[x
].bounds
[0];
2043 pbound
= scm_heap_table
[x
].bounds
[1];
2046 SCM cell
= PTR2SCM (p
);
2047 if (SCM_TYP3 (cell
) == scm_tc3_cons_gloc
)
2049 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2050 * struct cell. See the corresponding comment in scm_gc_mark.
2052 scm_bits_t word0
= SCM_CELL_WORD_0 (cell
) - scm_tc3_cons_gloc
;
2053 SCM gloc_car
= SCM_PACK (word0
); /* access as gloc */
2054 SCM vcell
= SCM_CELL_OBJECT_1 (gloc_car
);
2055 if ((SCM_EQ_P (name
, SCM_BOOL_T
) || SCM_EQ_P (SCM_CAR (gloc_car
), name
))
2056 && (SCM_UNPACK (vcell
) != 0) && (SCM_UNPACK (vcell
) != 1))
2058 SCM_SET_CELL_OBJECT_0 (cell
, name
);
2071 /* {GC Protection Helper Functions}
2076 scm_remember (SCM
*ptr
)
2081 These crazy functions prevent garbage collection
2082 of arguments after the first argument by
2083 ensuring they remain live throughout the
2084 function because they are used in the last
2085 line of the code block.
2086 It'd be better to have a nice compiler hint to
2087 aid the conservative stack-scanning GC. --03/09/00 gjb */
2089 scm_return_first (SCM elt
, ...)
2095 scm_return_first_int (int i
, ...)
2102 scm_permanent_object (SCM obj
)
2105 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2111 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2112 other references are dropped, until the object is unprotected by calling
2113 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2114 i. e. it is possible to protect the same object several times, but it is
2115 necessary to unprotect the object the same number of times to actually get
2116 the object unprotected. It is an error to unprotect an object more often
2117 than it has been protected before. The function scm_protect_object returns
2121 /* Implementation note: For every object X, there is a counter which
2122 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2126 scm_protect_object (SCM obj
)
2130 /* This critical section barrier will be replaced by a mutex. */
2133 handle
= scm_hashq_create_handle_x (scm_protects
, obj
, SCM_MAKINUM (0));
2134 SCM_SETCDR (handle
, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle
)) + 1));
2142 /* Remove any protection for OBJ established by a prior call to
2143 scm_protect_object. This function returns OBJ.
2145 See scm_protect_object for more information. */
2147 scm_unprotect_object (SCM obj
)
2151 /* This critical section barrier will be replaced by a mutex. */
2154 handle
= scm_hashq_get_handle (scm_protects
, obj
);
2156 if (SCM_IMP (handle
))
2158 fprintf (stderr
, "scm_unprotect_object called on unprotected object\n");
2163 unsigned long int count
= SCM_INUM (SCM_CDR (handle
)) - 1;
2165 scm_hashq_remove_x (scm_protects
, obj
);
2167 SCM_SETCDR (handle
, SCM_MAKINUM (count
));
2177 /* called on process termination. */
2183 extern int on_exit (void (*procp
) (), int arg
);
2186 cleanup (int status
, void *arg
)
2188 #error Dont know how to setup a cleanup handler on your system.
2193 scm_flush_all_ports ();
2198 make_initial_segment (scm_sizet init_heap_size
, scm_freelist_t
*freelist
)
2200 scm_sizet rounded_size
= round_to_cluster_size (freelist
, init_heap_size
);
2201 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2205 rounded_size
= round_to_cluster_size (freelist
, SCM_HEAP_SEG_SIZE
);
2206 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2214 if (freelist
->min_yield_fraction
)
2215 freelist
->min_yield
= (freelist
->heap_size
* freelist
->min_yield_fraction
2217 freelist
->grow_heap_p
= (freelist
->heap_size
< freelist
->min_yield
);
2224 init_freelist (scm_freelist_t
*freelist
,
2229 freelist
->clusters
= SCM_EOL
;
2230 freelist
->cluster_size
= cluster_size
+ 1;
2231 freelist
->left_to_collect
= 0;
2232 freelist
->clusters_allocated
= 0;
2233 freelist
->min_yield
= 0;
2234 freelist
->min_yield_fraction
= min_yield
;
2235 freelist
->span
= span
;
2236 freelist
->collected
= 0;
2237 freelist
->collected_1
= 0;
2238 freelist
->heap_size
= 0;
2242 scm_init_storage (scm_sizet init_heap_size_1
, int gc_trigger_1
,
2243 scm_sizet init_heap_size_2
, int gc_trigger_2
,
2244 scm_sizet max_segment_size
)
2248 if (!init_heap_size_1
)
2249 init_heap_size_1
= scm_default_init_heap_size_1
;
2250 if (!init_heap_size_2
)
2251 init_heap_size_2
= scm_default_init_heap_size_2
;
2253 j
= SCM_NUM_PROTECTS
;
2255 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2258 scm_freelist
= SCM_EOL
;
2259 scm_freelist2
= SCM_EOL
;
2260 init_freelist (&scm_master_freelist
,
2261 1, SCM_CLUSTER_SIZE_1
,
2262 gc_trigger_1
? gc_trigger_1
: scm_default_min_yield_1
);
2263 init_freelist (&scm_master_freelist2
,
2264 2, SCM_CLUSTER_SIZE_2
,
2265 gc_trigger_2
? gc_trigger_2
: scm_default_min_yield_2
);
2266 scm_max_segment_size
2267 = max_segment_size
? max_segment_size
: scm_default_max_segment_size
;
2271 j
= SCM_HEAP_SEG_SIZE
;
2272 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2273 scm_heap_table
= ((scm_heap_seg_data_t
*)
2274 scm_must_malloc (sizeof (scm_heap_seg_data_t
) * 2, "hplims"));
2276 if (make_initial_segment (init_heap_size_1
, &scm_master_freelist
) ||
2277 make_initial_segment (init_heap_size_2
, &scm_master_freelist2
))
2280 /* scm_hplims[0] can change. do not remove scm_heap_org */
2281 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0], 1);
2283 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2284 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2285 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2286 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2287 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2289 /* Initialise the list of ports. */
2290 scm_port_table
= (scm_port
**)
2291 malloc (sizeof (scm_port
*) * scm_port_table_room
);
2292 if (!scm_port_table
)
2299 on_exit (cleanup
, 0);
2303 scm_undefineds
= scm_cons (SCM_UNDEFINED
, SCM_EOL
);
2304 SCM_SETCDR (scm_undefineds
, scm_undefineds
);
2306 scm_listofnull
= scm_cons (SCM_EOL
, SCM_EOL
);
2307 scm_nullstr
= scm_makstr (0L, 0);
2308 scm_nullvect
= scm_make_vector (SCM_INUM0
, SCM_UNDEFINED
);
2309 scm_symhash
= scm_make_vector (SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2310 scm_weak_symhash
= scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim
));
2311 scm_symhash_vars
= scm_make_vector (SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2312 scm_stand_in_procs
= SCM_EOL
;
2313 scm_permobjs
= SCM_EOL
;
2314 scm_protects
= scm_make_vector (SCM_MAKINUM (31), SCM_EOL
);
2315 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM
));
2316 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM
));
2318 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD
));
2325 SCM scm_after_gc_hook
;
2327 #if (SCM_DEBUG_DEPRECATED == 0)
2328 static SCM scm_gc_vcell
; /* the vcell for gc-thunk. */
2329 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2330 static SCM gc_async
;
2333 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2334 * is run after the gc, as soon as the asynchronous events are handled by the
2338 gc_async_thunk (void)
2340 scm_c_run_hook (scm_after_gc_hook
, SCM_EOL
);
2342 #if (SCM_DEBUG_DEPRECATED == 0)
2344 /* The following code will be removed in Guile 1.5. */
2345 if (SCM_NFALSEP (scm_gc_vcell
))
2347 SCM proc
= SCM_CDR (scm_gc_vcell
);
2349 if (SCM_NFALSEP (proc
) && !SCM_UNBNDP (proc
))
2350 scm_apply (proc
, SCM_EOL
, SCM_EOL
);
2353 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2355 return SCM_UNSPECIFIED
;
2359 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2360 * the garbage collection. The only purpose of this function is to mark the
2361 * gc_async (which will eventually lead to the execution of the
2365 mark_gc_async (void * hook_data
, void *func_data
, void *data
)
2367 scm_system_async_mark (gc_async
);
2377 scm_after_gc_hook
= scm_create_hook ("after-gc-hook", 0);
2379 #if (SCM_DEBUG_DEPRECATED == 0)
2380 scm_gc_vcell
= scm_sysintern ("gc-thunk", SCM_BOOL_F
);
2381 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2382 /* Dirk:FIXME:: We don't really want a binding here. */
2383 after_gc_thunk
= scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk
);
2384 gc_async
= scm_system_async (after_gc_thunk
);
2386 scm_c_hook_add (&scm_after_gc_c_hook
, mark_gc_async
, NULL
, 0);
2388 #include "libguile/gc.x"