1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
45 /* #define DEBUGINFO */
49 #include "libguile/_scm.h"
50 #include "libguile/eval.h"
51 #include "libguile/stime.h"
52 #include "libguile/stackchk.h"
53 #include "libguile/struct.h"
54 #include "libguile/smob.h"
55 #include "libguile/unif.h"
56 #include "libguile/async.h"
57 #include "libguile/ports.h"
58 #include "libguile/root.h"
59 #include "libguile/strings.h"
60 #include "libguile/vectors.h"
61 #include "libguile/weaks.h"
62 #include "libguile/hashtab.h"
64 #include "libguile/validate.h"
65 #include "libguile/gc.h"
67 #ifdef GUILE_DEBUG_MALLOC
68 #include "libguile/debug-malloc.h"
81 #define var_start(x, y) va_start(x, y)
84 #define var_start(x, y) va_start(x)
88 /* {heap tuning parameters}
90 * These are parameters for controlling memory allocation. The heap
91 * is the area out of which scm_cons, and object headers are allocated.
93 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
94 * 64 bit machine. The units of the _SIZE parameters are bytes.
95 * Cons pairs and object headers occupy one heap cell.
97 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
98 * allocated initially the heap will grow by half its current size
99 * each subsequent time more heap is needed.
101 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
102 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
103 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
104 * is in scm_init_storage() and alloc_some_heap() in sys.c
106 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
107 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
109 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
112 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
115 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
116 * reclaimed by a GC triggered by must_malloc. If less than this is
117 * reclaimed, the trigger threshold is raised. [I don't know what a
118 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
119 * work around a oscillation that caused almost constant GC.]
123 * Heap size 45000 and 40% min yield gives quick startup and no extra
124 * heap allocation. Having higher values on min yield may lead to
125 * large heaps, especially if code behaviour is varying its
126 * maximum consumption between different freelists.
128 int scm_default_init_heap_size_1
= (45000L * sizeof (scm_cell
));
129 int scm_default_min_yield_1
= 40;
130 #define SCM_CLUSTER_SIZE_1 2000L
132 int scm_default_init_heap_size_2
= (2500L * 2 * sizeof (scm_cell
));
133 /* The following value may seem large, but note that if we get to GC at
134 * all, this means that we have a numerically intensive application
136 int scm_default_min_yield_2
= 40;
137 #define SCM_CLUSTER_SIZE_2 1000L
139 int scm_default_max_segment_size
= 2097000L;/* a little less (adm) than 2 Mb */
141 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
143 # define SCM_HEAP_SEG_SIZE 32768L
146 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
148 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
151 /* Make heap grow with factor 1.5 */
152 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
153 #define SCM_INIT_MALLOC_LIMIT 100000
154 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
156 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
157 bounds for allocated storage */
160 /*in 386 protected mode we must only adjust the offset */
161 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
162 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
165 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
166 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
168 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
169 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
172 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
173 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
174 #define SCM_HEAP_SIZE \
175 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
176 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
183 typedef struct scm_freelist_t
{
184 /* collected cells */
186 /* number of cells left to collect before cluster is full */
187 unsigned int left_to_collect
;
188 /* number of clusters which have been allocated */
189 unsigned int clusters_allocated
;
190 /* a list of freelists, each of size cluster_size,
191 * except the last one which may be shorter
195 /* this is the number of objects in each cluster, including the spine cell */
197 /* indicates that we should grow heap instead of GC:ing
200 /* minimum yield on this list in order not to grow the heap
203 /* defines min_yield as percent of total heap size
205 int min_yield_fraction
;
206 /* number of cells per object on this list */
208 /* number of collected cells during last GC */
210 /* number of collected cells during penultimate GC */
212 /* total number of cells in heap segments
213 * belonging to this list.
218 SCM scm_freelist
= SCM_EOL
;
219 scm_freelist_t scm_master_freelist
= {
220 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_1
, 0, 0, 0, 1, 0, 0
222 SCM scm_freelist2
= SCM_EOL
;
223 scm_freelist_t scm_master_freelist2
= {
224 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_2
, 0, 0, 0, 2, 0, 0
228 * is the number of bytes of must_malloc allocation needed to trigger gc.
230 unsigned long scm_mtrigger
;
234 * If set, don't expand the heap. Set only during gc, during which no allocation
235 * is supposed to take place anyway.
237 int scm_gc_heap_lock
= 0;
240 * Don't pause for collection if this is set -- just
243 int scm_block_gc
= 1;
245 /* During collection, this accumulates objects holding
248 SCM scm_weak_vectors
;
250 /* GC Statistics Keeping
252 unsigned long scm_cells_allocated
= 0;
253 long scm_mallocated
= 0;
254 unsigned long scm_gc_cells_collected
;
255 unsigned long scm_gc_yield
;
256 static unsigned long scm_gc_yield_1
= 0; /* previous GC yield */
257 unsigned long scm_gc_malloc_collected
;
258 unsigned long scm_gc_ports_collected
;
259 unsigned long scm_gc_rt
;
260 unsigned long scm_gc_time_taken
= 0;
262 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
263 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
264 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
265 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
266 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
267 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
269 typedef struct scm_heap_seg_data_t
271 /* lower and upper bounds of the segment */
272 SCM_CELLPTR bounds
[2];
274 /* address of the head-of-freelist pointer for this segment's cells.
275 All segments usually point to the same one, scm_freelist. */
276 scm_freelist_t
*freelist
;
278 /* number of cells per object in this segment */
280 } scm_heap_seg_data_t
;
284 static scm_sizet
init_heap_seg (SCM_CELLPTR
, scm_sizet
, scm_freelist_t
*);
285 static void alloc_some_heap (scm_freelist_t
*);
289 /* Debugging functions. */
291 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
293 /* Return the number of the heap segment containing CELL. */
299 for (i
= 0; i
< scm_n_heap_segs
; i
++)
300 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], SCM2PTR (cell
))
301 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], SCM2PTR (cell
)))
303 fprintf (stderr
, "which_seg: can't find segment containing cell %lx\n",
310 map_free_list (scm_freelist_t
*master
, SCM freelist
)
312 int last_seg
= -1, count
= 0;
315 for (f
= freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
))
317 int this_seg
= which_seg (f
);
319 if (this_seg
!= last_seg
)
322 fprintf (stderr
, " %5d %d-cells in segment %d\n",
323 count
, master
->span
, last_seg
);
330 fprintf (stderr
, " %5d %d-cells in segment %d\n",
331 count
, master
->span
, last_seg
);
334 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
336 "Print debugging information about the free-list.\n"
337 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
338 #define FUNC_NAME s_scm_map_free_list
341 fprintf (stderr
, "%d segments total (%d:%d",
343 scm_heap_table
[0].span
,
344 scm_heap_table
[0].bounds
[1] - scm_heap_table
[0].bounds
[0]);
345 for (i
= 1; i
< scm_n_heap_segs
; i
++)
346 fprintf (stderr
, ", %d:%d",
347 scm_heap_table
[i
].span
,
348 scm_heap_table
[i
].bounds
[1] - scm_heap_table
[i
].bounds
[0]);
349 fprintf (stderr
, ")\n");
350 map_free_list (&scm_master_freelist
, scm_freelist
);
351 map_free_list (&scm_master_freelist2
, scm_freelist2
);
354 return SCM_UNSPECIFIED
;
358 static int last_cluster
;
359 static int last_size
;
362 free_list_length (char *title
, int i
, SCM freelist
)
366 for (ls
= freelist
; SCM_NNULLP (ls
); ls
= SCM_CDR (ls
))
367 if (SCM_CELL_TYPE (ls
) == scm_tc_free_cell
)
371 fprintf (stderr
, "bad cell in %s at position %d\n", title
, n
);
378 if (last_cluster
== i
- 1)
379 fprintf (stderr
, "\t%d\n", last_size
);
381 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
384 fprintf (stderr
, "%s %d", title
, i
);
386 fprintf (stderr
, "%s\t%d\n", title
, n
);
394 free_list_lengths (char *title
, scm_freelist_t
*master
, SCM freelist
)
397 int i
= 0, len
, n
= 0;
398 fprintf (stderr
, "%s\n\n", title
);
399 n
+= free_list_length ("free list", -1, freelist
);
400 for (clusters
= master
->clusters
;
401 SCM_NNULLP (clusters
);
402 clusters
= SCM_CDR (clusters
))
404 len
= free_list_length ("cluster", i
++, SCM_CAR (clusters
));
407 if (last_cluster
== i
- 1)
408 fprintf (stderr
, "\t%d\n", last_size
);
410 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
411 fprintf (stderr
, "\ntotal %d objects\n\n", n
);
414 SCM_DEFINE (scm_free_list_length
, "free-list-length", 0, 0, 0,
416 "Print debugging information about the free-list.\n"
417 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
418 #define FUNC_NAME s_scm_free_list_length
420 free_list_lengths ("1-cells", &scm_master_freelist
, scm_freelist
);
421 free_list_lengths ("2-cells", &scm_master_freelist2
, scm_freelist2
);
422 return SCM_UNSPECIFIED
;
428 #ifdef GUILE_DEBUG_FREELIST
430 /* Number of calls to SCM_NEWCELL since startup. */
431 static unsigned long scm_newcell_count
;
432 static unsigned long scm_newcell2_count
;
434 /* Search freelist for anything that isn't marked as a free cell.
435 Abort if we find something. */
437 scm_check_freelist (SCM freelist
)
442 for (f
= freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
), i
++)
443 if (SCM_CAR (f
) != (SCM
) scm_tc_free_cell
)
445 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
446 scm_newcell_count
, i
);
452 static int scm_debug_check_freelist
= 0;
454 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
456 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
457 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
458 "compile-time flag was selected.\n")
459 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
461 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
462 return SCM_UNSPECIFIED
;
468 scm_debug_newcell (void)
473 if (scm_debug_check_freelist
)
475 scm_check_freelist (scm_freelist
);
479 /* The rest of this is supposed to be identical to the SCM_NEWCELL
481 if (SCM_IMP (scm_freelist
))
482 new = scm_gc_for_newcell (&scm_master_freelist
, &scm_freelist
);
486 scm_freelist
= SCM_CDR (scm_freelist
);
487 SCM_SETCAR (new, scm_tc16_allocated
);
494 scm_debug_newcell2 (void)
498 scm_newcell2_count
++;
499 if (scm_debug_check_freelist
)
501 scm_check_freelist (scm_freelist2
);
505 /* The rest of this is supposed to be identical to the SCM_NEWCELL
507 if (SCM_IMP (scm_freelist2
))
508 new = scm_gc_for_newcell (&scm_master_freelist2
, &scm_freelist2
);
512 scm_freelist2
= SCM_CDR (scm_freelist2
);
513 SCM_SETCAR (new, scm_tc16_allocated
);
519 #endif /* GUILE_DEBUG_FREELIST */
524 master_cells_allocated (scm_freelist_t
*master
)
526 int objects
= master
->clusters_allocated
* (master
->cluster_size
- 1);
527 if (SCM_NULLP (master
->clusters
))
528 objects
-= master
->left_to_collect
;
529 return master
->span
* objects
;
533 freelist_length (SCM freelist
)
536 for (n
= 0; SCM_NNULLP (freelist
); freelist
= SCM_CDR (freelist
))
542 compute_cells_allocated ()
544 return (scm_cells_allocated
545 + master_cells_allocated (&scm_master_freelist
)
546 + master_cells_allocated (&scm_master_freelist2
)
547 - scm_master_freelist
.span
* freelist_length (scm_freelist
)
548 - scm_master_freelist2
.span
* freelist_length (scm_freelist2
));
551 /* {Scheme Interface to GC}
554 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
556 "Returns an association list of statistics about Guile's current use of storage. ")
557 #define FUNC_NAME s_scm_gc_stats
562 long int local_scm_mtrigger
;
563 long int local_scm_mallocated
;
564 long int local_scm_heap_size
;
565 long int local_scm_cells_allocated
;
566 long int local_scm_gc_time_taken
;
576 for (i
= scm_n_heap_segs
; i
--; )
577 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
578 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
580 if (scm_n_heap_segs
!= n
)
585 /* Below, we cons to produce the resulting list. We want a snapshot of
586 * the heap situation before consing.
588 local_scm_mtrigger
= scm_mtrigger
;
589 local_scm_mallocated
= scm_mallocated
;
590 local_scm_heap_size
= SCM_HEAP_SIZE
;
591 local_scm_cells_allocated
= compute_cells_allocated ();
592 local_scm_gc_time_taken
= scm_gc_time_taken
;
594 answer
= scm_listify (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
595 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
596 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
597 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
598 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
599 scm_cons (sym_heap_segments
, heap_segs
),
608 scm_gc_start (const char *what
)
610 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ());
611 scm_gc_cells_collected
= 0;
612 scm_gc_yield_1
= scm_gc_yield
;
613 scm_gc_yield
= (scm_cells_allocated
614 + master_cells_allocated (&scm_master_freelist
)
615 + master_cells_allocated (&scm_master_freelist2
));
616 scm_gc_malloc_collected
= 0;
617 scm_gc_ports_collected
= 0;
624 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt
;
625 scm_gc_time_taken
+= scm_gc_rt
;
629 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
631 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
632 "returned by this function for @var{obj}")
633 #define FUNC_NAME s_scm_object_address
635 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj
));
640 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
642 "Scans all of SCM objects and reclaims for further use those that are\n"
643 "no longer accessible.")
644 #define FUNC_NAME s_scm_gc
649 return SCM_UNSPECIFIED
;
655 /* {C Interface For When GC is Triggered}
659 adjust_min_yield (scm_freelist_t
*freelist
)
661 /* min yield is adjusted upwards so that next predicted total yield
662 * (allocated cells actually freed by GC) becomes
663 * `min_yield_fraction' of total heap size. Note, however, that
664 * the absolute value of min_yield will correspond to `collected'
665 * on one master (the one which currently is triggering GC).
667 * The reason why we look at total yield instead of cells collected
668 * on one list is that we want to take other freelists into account.
669 * On this freelist, we know that (local) yield = collected cells,
670 * but that's probably not the case on the other lists.
672 * (We might consider computing a better prediction, for example
673 * by computing an average over multiple GC:s.)
675 if (freelist
->min_yield_fraction
)
677 /* Pick largest of last two yields. */
678 int delta
= ((SCM_HEAP_SIZE
* freelist
->min_yield_fraction
/ 100)
679 - (long) SCM_MAX (scm_gc_yield_1
, scm_gc_yield
));
681 fprintf (stderr
, " after GC = %d, delta = %d\n",
686 freelist
->min_yield
+= delta
;
690 /* When we get POSIX threads support, the master will be global and
691 * common while the freelist will be individual for each thread.
695 scm_gc_for_newcell (scm_freelist_t
*master
, SCM
*freelist
)
701 if (SCM_NULLP (master
->clusters
))
703 if (master
->grow_heap_p
|| scm_block_gc
)
705 master
->grow_heap_p
= 0;
706 alloc_some_heap (master
);
711 fprintf (stderr
, "allocated = %d, ",
713 + master_cells_allocated (&scm_master_freelist
)
714 + master_cells_allocated (&scm_master_freelist2
));
717 adjust_min_yield (master
);
720 cell
= SCM_CAR (master
->clusters
);
721 master
->clusters
= SCM_CDR (master
->clusters
);
722 ++master
->clusters_allocated
;
724 while (SCM_NULLP (cell
));
726 *freelist
= SCM_CDR (cell
);
727 SCM_SET_CELL_TYPE (cell
, scm_tc16_allocated
);
732 /* This is a support routine which can be used to reserve a cluster
733 * for some special use, such as debugging. It won't be useful until
734 * free cells are preserved between garbage collections.
738 scm_alloc_cluster (scm_freelist_t
*master
)
741 cell
= scm_gc_for_newcell (master
, &freelist
);
742 SCM_SETCDR (cell
, freelist
);
748 scm_c_hook_t scm_before_gc_c_hook
;
749 scm_c_hook_t scm_before_mark_c_hook
;
750 scm_c_hook_t scm_before_sweep_c_hook
;
751 scm_c_hook_t scm_after_sweep_c_hook
;
752 scm_c_hook_t scm_after_gc_c_hook
;
755 scm_igc (const char *what
)
759 scm_c_hook_run (&scm_before_gc_c_hook
, 0);
762 SCM_NULLP (scm_freelist
)
764 : (SCM_NULLP (scm_freelist2
) ? "o" : "m"));
767 /* During the critical section, only the current thread may run. */
768 SCM_THREAD_CRITICAL_SECTION_START
;
771 /* fprintf (stderr, "gc: %s\n", what); */
775 if (!scm_stack_base
|| scm_block_gc
)
781 if (scm_mallocated
< 0)
782 /* The byte count of allocated objects has underflowed. This is
783 probably because you forgot to report the sizes of objects you
784 have allocated, by calling scm_done_malloc or some such. When
785 the GC freed them, it subtracted their size from
786 scm_mallocated, which underflowed. */
789 if (scm_gc_heap_lock
)
790 /* We've invoked the collector while a GC is already in progress.
791 That should never happen. */
796 /* flush dead entries from the continuation stack */
801 elts
= SCM_VELTS (scm_continuation_stack
);
802 bound
= SCM_LENGTH (scm_continuation_stack
);
803 x
= SCM_INUM (scm_continuation_stack_ptr
);
806 elts
[x
] = SCM_BOOL_F
;
811 scm_c_hook_run (&scm_before_mark_c_hook
, 0);
815 /* Protect from the C stack. This must be the first marking
816 * done because it provides information about what objects
817 * are "in-use" by the C code. "in-use" objects are those
818 * for which the values from SCM_LENGTH and SCM_CHARS must remain
819 * usable. This requirement is stricter than a liveness
820 * requirement -- in particular, it constrains the implementation
821 * of scm_vector_set_length_x.
823 SCM_FLUSH_REGISTER_WINDOWS
;
824 /* This assumes that all registers are saved into the jmp_buf */
825 setjmp (scm_save_regs_gc_mark
);
826 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
827 ( (scm_sizet
) (sizeof (SCM_STACKITEM
) - 1 +
828 sizeof scm_save_regs_gc_mark
)
829 / sizeof (SCM_STACKITEM
)));
832 scm_sizet stack_len
= scm_stack_size (scm_stack_base
);
833 #ifdef SCM_STACK_GROWS_UP
834 scm_mark_locations (scm_stack_base
, stack_len
);
836 scm_mark_locations (scm_stack_base
- stack_len
, stack_len
);
840 #else /* USE_THREADS */
842 /* Mark every thread's stack and registers */
843 scm_threads_mark_stacks ();
845 #endif /* USE_THREADS */
847 /* FIXME: insert a phase to un-protect string-data preserved
848 * in scm_vector_set_length_x.
851 j
= SCM_NUM_PROTECTS
;
853 scm_gc_mark (scm_sys_protects
[j
]);
855 /* FIXME: we should have a means to register C functions to be run
856 * in different phases of GC
858 scm_mark_subr_table ();
861 scm_gc_mark (scm_root
->handle
);
864 scm_c_hook_run (&scm_before_sweep_c_hook
, 0);
868 scm_c_hook_run (&scm_after_sweep_c_hook
, 0);
874 SCM_THREAD_CRITICAL_SECTION_END
;
876 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
886 /* Mark an object precisely.
901 if (SCM_NCELLP (ptr
))
902 scm_wta (ptr
, "rogue pointer in heap", NULL
);
904 switch (SCM_TYP7 (ptr
))
906 case scm_tcs_cons_nimcar
:
907 if (SCM_GCMARKP (ptr
))
910 if (SCM_IMP (SCM_CDR (ptr
))) /* SCM_IMP works even with a GC mark */
915 scm_gc_mark (SCM_CAR (ptr
));
916 ptr
= SCM_GCCDR (ptr
);
918 case scm_tcs_cons_imcar
:
919 if (SCM_GCMARKP (ptr
))
922 ptr
= SCM_GCCDR (ptr
);
925 if (SCM_GCMARKP (ptr
))
928 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr
));
929 ptr
= SCM_GCCDR (ptr
);
931 case scm_tcs_cons_gloc
:
932 if (SCM_GCMARKP (ptr
))
936 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
937 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
938 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
939 * pointer to a struct vtable data region. The fact that these are
940 * accessed in the same way restricts the possibilites to change the
941 * data layout of structs or heap cells.
943 scm_bits_t word0
= SCM_CELL_WORD_0 (ptr
) - scm_tc3_cons_gloc
;
944 scm_bits_t
* vtable_data
= (scm_bits_t
*) word0
; /* access as struct */
945 switch (vtable_data
[scm_vtable_index_vcell
])
950 SCM gloc_car
= SCM_PACK (word0
);
951 scm_gc_mark (gloc_car
);
952 ptr
= SCM_GCCDR (ptr
);
958 /* ptr is a struct */
959 SCM layout
= SCM_PACK (vtable_data
[scm_vtable_index_layout
]);
960 int len
= SCM_LENGTH (layout
);
961 char * fields_desc
= SCM_CHARS (layout
);
962 /* We're using SCM_GCCDR here like STRUCT_DATA, except
963 that it removes the mark */
964 scm_bits_t
* struct_data
= (scm_bits_t
*) SCM_UNPACK (SCM_GCCDR (ptr
));
966 if (vtable_data
[scm_struct_i_flags
] & SCM_STRUCTF_ENTITY
)
968 scm_gc_mark (SCM_PACK (struct_data
[scm_struct_i_procedure
]));
969 scm_gc_mark (SCM_PACK (struct_data
[scm_struct_i_setter
]));
975 for (x
= 0; x
< len
- 2; x
+= 2, ++struct_data
)
976 if (fields_desc
[x
] == 'p')
977 scm_gc_mark (SCM_PACK (*struct_data
));
978 if (fields_desc
[x
] == 'p')
980 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
981 for (x
= *struct_data
; x
; --x
)
982 scm_gc_mark (SCM_PACK (*++struct_data
));
984 scm_gc_mark (SCM_PACK (*struct_data
));
987 if (vtable_data
[scm_vtable_index_vcell
] == 0)
989 vtable_data
[scm_vtable_index_vcell
] = 1;
990 ptr
= SCM_PACK (vtable_data
[scm_vtable_index_vtable
]);
997 case scm_tcs_closures
:
998 if (SCM_GCMARKP (ptr
))
1000 SCM_SETGCMARK (ptr
);
1001 if (SCM_IMP (SCM_CDR (ptr
)))
1003 ptr
= SCM_CLOSCAR (ptr
);
1006 scm_gc_mark (SCM_CLOSCAR (ptr
));
1007 ptr
= SCM_GCCDR (ptr
);
1009 case scm_tc7_vector
:
1010 case scm_tc7_lvector
:
1014 if (SCM_GC8MARKP (ptr
))
1016 SCM_SETGC8MARK (ptr
);
1017 i
= SCM_LENGTH (ptr
);
1021 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
1022 scm_gc_mark (SCM_VELTS (ptr
)[i
]);
1023 ptr
= SCM_VELTS (ptr
)[0];
1025 case scm_tc7_contin
:
1028 SCM_SETGC8MARK (ptr
);
1029 if (SCM_VELTS (ptr
))
1030 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr
),
1033 (sizeof (SCM_STACKITEM
) + -1 +
1034 sizeof (scm_contregs
)) /
1035 sizeof (SCM_STACKITEM
)));
1039 case scm_tc7_byvect
:
1046 #ifdef HAVE_LONG_LONGS
1047 case scm_tc7_llvect
:
1050 case scm_tc7_string
:
1051 SCM_SETGC8MARK (ptr
);
1054 case scm_tc7_substring
:
1055 if (SCM_GC8MARKP(ptr
))
1057 SCM_SETGC8MARK (ptr
);
1058 ptr
= SCM_CDR (ptr
);
1062 if (SCM_GC8MARKP(ptr
))
1064 SCM_WVECT_GC_CHAIN (ptr
) = scm_weak_vectors
;
1065 scm_weak_vectors
= ptr
;
1066 SCM_SETGC8MARK (ptr
);
1067 if (SCM_IS_WHVEC_ANY (ptr
))
1074 len
= SCM_LENGTH (ptr
);
1075 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
1076 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
1078 for (x
= 0; x
< len
; ++x
)
1081 alist
= SCM_VELTS (ptr
)[x
];
1083 /* mark everything on the alist except the keys or
1084 * values, according to weak_values and weak_keys. */
1085 while ( SCM_CONSP (alist
)
1086 && !SCM_GCMARKP (alist
)
1087 && SCM_CONSP (SCM_CAR (alist
)))
1092 kvpair
= SCM_CAR (alist
);
1093 next_alist
= SCM_CDR (alist
);
1096 * SCM_SETGCMARK (alist);
1097 * SCM_SETGCMARK (kvpair);
1099 * It may be that either the key or value is protected by
1100 * an escaped reference to part of the spine of this alist.
1101 * If we mark the spine here, and only mark one or neither of the
1102 * key and value, they may never be properly marked.
1103 * This leads to a horrible situation in which an alist containing
1104 * freelist cells is exported.
1106 * So only mark the spines of these arrays last of all marking.
1107 * If somebody confuses us by constructing a weak vector
1108 * with a circular alist then we are hosed, but at least we
1109 * won't prematurely drop table entries.
1112 scm_gc_mark (SCM_CAR (kvpair
));
1114 scm_gc_mark (SCM_GCCDR (kvpair
));
1117 if (SCM_NIMP (alist
))
1118 scm_gc_mark (alist
);
1123 case scm_tc7_msymbol
:
1124 if (SCM_GC8MARKP(ptr
))
1126 SCM_SETGC8MARK (ptr
);
1127 scm_gc_mark (SCM_SYMBOL_FUNC (ptr
));
1128 ptr
= SCM_SYMBOL_PROPS (ptr
);
1130 case scm_tc7_ssymbol
:
1131 if (SCM_GC8MARKP(ptr
))
1133 SCM_SETGC8MARK (ptr
);
1138 i
= SCM_PTOBNUM (ptr
);
1139 if (!(i
< scm_numptob
))
1141 if (SCM_GC8MARKP (ptr
))
1143 SCM_SETGC8MARK (ptr
);
1144 if (SCM_PTAB_ENTRY(ptr
))
1145 scm_gc_mark (SCM_PTAB_ENTRY(ptr
)->file_name
);
1146 if (scm_ptobs
[i
].mark
)
1148 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1155 if (SCM_GC8MARKP (ptr
))
1157 SCM_SETGC8MARK (ptr
);
1158 switch (SCM_GCTYP16 (ptr
))
1159 { /* should be faster than going through scm_smobs */
1160 case scm_tc_free_cell
:
1161 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1162 case scm_tc16_allocated
:
1165 case scm_tc16_complex
:
1168 i
= SCM_SMOBNUM (ptr
);
1169 if (!(i
< scm_numsmob
))
1171 if (scm_smobs
[i
].mark
)
1173 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1181 def
:scm_wta (ptr
, "unknown type in ", "gc_mark");
1186 /* Mark a Region Conservatively
1190 scm_mark_locations (SCM_STACKITEM x
[], scm_sizet n
)
1192 register long m
= n
;
1194 register SCM_CELLPTR ptr
;
1197 if (SCM_CELLP (* (SCM
*) &x
[m
]))
1199 ptr
= SCM2PTR (* (SCM
*) &x
[m
]);
1201 j
= scm_n_heap_segs
- 1;
1202 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1203 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1210 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
1212 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1220 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1224 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1229 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1233 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1239 if (scm_heap_table
[seg_id
].span
== 1
1240 || SCM_DOUBLE_CELLP (* (SCM
*) &x
[m
]))
1241 scm_gc_mark (* (SCM
*) &x
[m
]);
1250 /* The function scm_cellp determines whether an SCM value can be regarded as a
1251 * pointer to a cell on the heap. Binary search is used in order to determine
1252 * the heap segment that contains the cell.
1255 scm_cellp (SCM value
)
1257 if (SCM_CELLP (value
)) {
1258 scm_cell
* ptr
= SCM2PTR (value
);
1260 unsigned int j
= scm_n_heap_segs
- 1;
1263 int k
= (i
+ j
) / 2;
1264 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
)) {
1266 } else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
)) {
1271 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1272 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
)
1273 && (scm_heap_table
[i
].span
== 1 || SCM_DOUBLE_CELLP (value
))) {
1285 gc_sweep_freelist_start (scm_freelist_t
*freelist
)
1287 freelist
->cells
= SCM_EOL
;
1288 freelist
->left_to_collect
= freelist
->cluster_size
;
1289 freelist
->clusters_allocated
= 0;
1290 freelist
->clusters
= SCM_EOL
;
1291 freelist
->clustertail
= &freelist
->clusters
;
1292 freelist
->collected_1
= freelist
->collected
;
1293 freelist
->collected
= 0;
1297 gc_sweep_freelist_finish (scm_freelist_t
*freelist
)
1300 *freelist
->clustertail
= freelist
->cells
;
1301 if (SCM_NNULLP (freelist
->cells
))
1303 SCM c
= freelist
->cells
;
1304 SCM_SETCAR (c
, SCM_CDR (c
));
1305 SCM_SETCDR (c
, SCM_EOL
);
1306 freelist
->collected
+=
1307 freelist
->span
* (freelist
->cluster_size
- freelist
->left_to_collect
);
1309 scm_gc_cells_collected
+= freelist
->collected
;
1311 /* Although freelist->min_yield is used to test freelist->collected
1312 * (which is the local GC yield for freelist), it is adjusted so
1313 * that *total* yield is freelist->min_yield_fraction of total heap
1314 * size. This means that a too low yield is compensated by more
1315 * heap on the list which is currently doing most work, which is
1316 * just what we want.
1318 collected
= SCM_MAX (freelist
->collected_1
, freelist
->collected
);
1319 freelist
->grow_heap_p
= (collected
< freelist
->min_yield
);
1325 register SCM_CELLPTR ptr
;
1326 register SCM nfreelist
;
1327 register scm_freelist_t
*freelist
;
1335 gc_sweep_freelist_start (&scm_master_freelist
);
1336 gc_sweep_freelist_start (&scm_master_freelist2
);
1338 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1340 register unsigned int left_to_collect
;
1341 register scm_sizet j
;
1343 /* Unmarked cells go onto the front of the freelist this heap
1344 segment points to. Rather than updating the real freelist
1345 pointer as we go along, we accumulate the new head in
1346 nfreelist. Then, if it turns out that the entire segment is
1347 free, we free (i.e., malloc's free) the whole segment, and
1348 simply don't assign nfreelist back into the real freelist. */
1349 freelist
= scm_heap_table
[i
].freelist
;
1350 nfreelist
= freelist
->cells
;
1351 left_to_collect
= freelist
->left_to_collect
;
1352 span
= scm_heap_table
[i
].span
;
1354 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0], span
);
1355 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1], span
) - ptr
;
1356 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1358 SCM scmptr
= PTR2SCM (ptr
);
1360 switch SCM_TYP7 (scmptr
)
1362 case scm_tcs_cons_gloc
:
1364 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1365 * struct or a gloc. See the corresponding comment in
1368 scm_bits_t word0
= SCM_CELL_WORD_0 (scmptr
) - scm_tc3_cons_gloc
;
1369 scm_bits_t
* vtable_data
= (scm_bits_t
*) word0
; /* access as struct */
1370 if (SCM_GCMARKP (scmptr
))
1372 if (vtable_data
[scm_vtable_index_vcell
] == 1)
1373 vtable_data
[scm_vtable_index_vcell
] = 0;
1378 if (vtable_data
[scm_vtable_index_vcell
] == 0
1379 || vtable_data
[scm_vtable_index_vcell
] == 1)
1381 scm_struct_free_t free
1382 = (scm_struct_free_t
) vtable_data
[scm_struct_i_free
];
1383 m
+= free (vtable_data
, (scm_bits_t
*) SCM_UNPACK (SCM_GCCDR (scmptr
)));
1388 case scm_tcs_cons_imcar
:
1389 case scm_tcs_cons_nimcar
:
1390 case scm_tcs_closures
:
1392 if (SCM_GCMARKP (scmptr
))
1396 if (SCM_GC8MARKP (scmptr
))
1402 m
+= (2 + SCM_LENGTH (scmptr
)) * sizeof (SCM
);
1403 scm_must_free ((char *)(SCM_VELTS (scmptr
) - 2));
1407 case scm_tc7_vector
:
1408 case scm_tc7_lvector
:
1412 if (SCM_GC8MARKP (scmptr
))
1415 m
+= (SCM_LENGTH (scmptr
) * sizeof (SCM
));
1417 scm_must_free (SCM_CHARS (scmptr
));
1418 /* SCM_SETCHARS(scmptr, 0);*/
1422 if SCM_GC8MARKP (scmptr
)
1424 m
+= sizeof (long) * ((SCM_HUGE_LENGTH (scmptr
) + SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1426 case scm_tc7_byvect
:
1427 if SCM_GC8MARKP (scmptr
)
1429 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (char);
1433 if SCM_GC8MARKP (scmptr
)
1435 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long);
1438 if SCM_GC8MARKP (scmptr
)
1440 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (short);
1442 #ifdef HAVE_LONG_LONGS
1443 case scm_tc7_llvect
:
1444 if SCM_GC8MARKP (scmptr
)
1446 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long_long
);
1450 if SCM_GC8MARKP (scmptr
)
1452 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (float);
1455 if SCM_GC8MARKP (scmptr
)
1457 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (double);
1460 if SCM_GC8MARKP (scmptr
)
1462 m
+= SCM_HUGE_LENGTH (scmptr
) * 2 * sizeof (double);
1465 case scm_tc7_substring
:
1466 if (SCM_GC8MARKP (scmptr
))
1469 case scm_tc7_string
:
1470 if (SCM_GC8MARKP (scmptr
))
1472 m
+= SCM_HUGE_LENGTH (scmptr
) + 1;
1474 case scm_tc7_msymbol
:
1475 if (SCM_GC8MARKP (scmptr
))
1477 m
+= (SCM_LENGTH (scmptr
) + 1
1478 + (SCM_CHARS (scmptr
) - (char *) SCM_SLOTS (scmptr
)));
1479 scm_must_free ((char *)SCM_SLOTS (scmptr
));
1481 case scm_tc7_contin
:
1482 if SCM_GC8MARKP (scmptr
)
1484 m
+= SCM_LENGTH (scmptr
) * sizeof (SCM_STACKITEM
) + sizeof (scm_contregs
);
1485 if (SCM_VELTS (scmptr
))
1487 case scm_tc7_ssymbol
:
1488 if SCM_GC8MARKP(scmptr
)
1494 if SCM_GC8MARKP (scmptr
)
1496 if SCM_OPENP (scmptr
)
1498 int k
= SCM_PTOBNUM (scmptr
);
1499 if (!(k
< scm_numptob
))
1501 /* Keep "revealed" ports alive. */
1502 if (scm_revealed_count (scmptr
) > 0)
1504 /* Yes, I really do mean scm_ptobs[k].free */
1505 /* rather than ftobs[k].close. .close */
1506 /* is for explicit CLOSE-PORT by user */
1507 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1508 SCM_SETSTREAM (scmptr
, 0);
1509 scm_remove_from_port_table (scmptr
);
1510 scm_gc_ports_collected
++;
1511 SCM_SETAND_CAR (scmptr
, ~SCM_OPN
);
1515 switch SCM_GCTYP16 (scmptr
)
1517 case scm_tc_free_cell
:
1519 if SCM_GC8MARKP (scmptr
)
1524 if SCM_GC8MARKP (scmptr
)
1526 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1528 #endif /* def SCM_BIGDIG */
1529 case scm_tc16_complex
:
1530 if SCM_GC8MARKP (scmptr
)
1532 m
+= 2 * sizeof (double);
1535 if SCM_GC8MARKP (scmptr
)
1540 k
= SCM_SMOBNUM (scmptr
);
1541 if (!(k
< scm_numsmob
))
1543 m
+= (scm_smobs
[k
].free
) (scmptr
);
1549 sweeperr
:scm_wta (scmptr
, "unknown type in ", "gc_sweep");
1552 if (SCM_CAR (scmptr
) == (SCM
) scm_tc_free_cell
)
1555 if (!--left_to_collect
)
1557 SCM_SETCAR (scmptr
, nfreelist
);
1558 *freelist
->clustertail
= scmptr
;
1559 freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1561 nfreelist
= SCM_EOL
;
1562 freelist
->collected
+= span
* freelist
->cluster_size
;
1563 left_to_collect
= freelist
->cluster_size
;
1567 /* Stick the new cell on the front of nfreelist. It's
1568 critical that we mark this cell as freed; otherwise, the
1569 conservative collector might trace it as some other type
1571 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1572 SCM_SETCDR (scmptr
, nfreelist
);
1578 SCM_CLRGC8MARK (scmptr
);
1581 SCM_CLRGCMARK (scmptr
);
1583 #ifdef GC_FREE_SEGMENTS
1588 freelist
->heap_size
-= seg_size
;
1589 free ((char *) scm_heap_table
[i
].bounds
[0]);
1590 scm_heap_table
[i
].bounds
[0] = 0;
1591 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1592 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1593 scm_n_heap_segs
-= 1;
1594 i
--; /* We need to scan the segment just moved. */
1597 #endif /* ifdef GC_FREE_SEGMENTS */
1599 /* Update the real freelist pointer to point to the head of
1600 the list of free cells we've built for this segment. */
1601 freelist
->cells
= nfreelist
;
1602 freelist
->left_to_collect
= left_to_collect
;
1605 #ifdef GUILE_DEBUG_FREELIST
1606 scm_check_freelist (freelist
== &scm_master_freelist
1609 scm_map_free_list ();
1613 gc_sweep_freelist_finish (&scm_master_freelist
);
1614 gc_sweep_freelist_finish (&scm_master_freelist2
);
1616 /* When we move to POSIX threads private freelists should probably
1617 be GC-protected instead. */
1618 scm_freelist
= SCM_EOL
;
1619 scm_freelist2
= SCM_EOL
;
1621 scm_cells_allocated
= (SCM_HEAP_SIZE
- scm_gc_cells_collected
);
1622 scm_gc_yield
-= scm_cells_allocated
;
1623 scm_mallocated
-= m
;
1624 scm_gc_malloc_collected
= m
;
1630 /* {Front end to malloc}
1632 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1634 * These functions provide services comperable to malloc, realloc, and
1635 * free. They are for allocating malloced parts of scheme objects.
1636 * The primary purpose of the front end is to impose calls to gc.
1641 * Return newly malloced storage or throw an error.
1643 * The parameter WHAT is a string for error reporting.
1644 * If the threshold scm_mtrigger will be passed by this
1645 * allocation, or if the first call to malloc fails,
1646 * garbage collect -- on the presumption that some objects
1647 * using malloced storage may be collected.
1649 * The limit scm_mtrigger may be raised by this allocation.
1652 scm_must_malloc (scm_sizet size
, const char *what
)
1655 unsigned long nm
= scm_mallocated
+ size
;
1657 if (nm
<= scm_mtrigger
)
1659 SCM_SYSCALL (ptr
= malloc (size
));
1662 scm_mallocated
= nm
;
1663 #ifdef GUILE_DEBUG_MALLOC
1664 scm_malloc_register (ptr
, what
);
1672 nm
= scm_mallocated
+ size
;
1673 SCM_SYSCALL (ptr
= malloc (size
));
1676 scm_mallocated
= nm
;
1677 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1678 if (nm
> scm_mtrigger
)
1679 scm_mtrigger
= nm
+ nm
/ 2;
1681 scm_mtrigger
+= scm_mtrigger
/ 2;
1683 #ifdef GUILE_DEBUG_MALLOC
1684 scm_malloc_register (ptr
, what
);
1690 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1691 return 0; /* never reached */
1696 * is similar to scm_must_malloc.
1699 scm_must_realloc (void *where
,
1705 scm_sizet nm
= scm_mallocated
+ size
- old_size
;
1707 if (nm
<= scm_mtrigger
)
1709 SCM_SYSCALL (ptr
= realloc (where
, size
));
1712 scm_mallocated
= nm
;
1713 #ifdef GUILE_DEBUG_MALLOC
1714 scm_malloc_reregister (where
, ptr
, what
);
1722 nm
= scm_mallocated
+ size
- old_size
;
1723 SCM_SYSCALL (ptr
= realloc (where
, size
));
1726 scm_mallocated
= nm
;
1727 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1728 if (nm
> scm_mtrigger
)
1729 scm_mtrigger
= nm
+ nm
/ 2;
1731 scm_mtrigger
+= scm_mtrigger
/ 2;
1733 #ifdef GUILE_DEBUG_MALLOC
1734 scm_malloc_reregister (where
, ptr
, what
);
1739 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1740 return 0; /* never reached */
1744 scm_must_free (void *obj
)
1746 #ifdef GUILE_DEBUG_MALLOC
1747 scm_malloc_unregister (obj
);
1752 scm_wta (SCM_INUM0
, "already free", "");
1755 /* Announce that there has been some malloc done that will be freed
1756 * during gc. A typical use is for a smob that uses some malloced
1757 * memory but can not get it from scm_must_malloc (for whatever
1758 * reason). When a new object of this smob is created you call
1759 * scm_done_malloc with the size of the object. When your smob free
1760 * function is called, be sure to include this size in the return
1764 scm_done_malloc (long size
)
1766 scm_mallocated
+= size
;
1768 if (scm_mallocated
> scm_mtrigger
)
1770 scm_igc ("foreign mallocs");
1771 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
1773 if (scm_mallocated
> scm_mtrigger
)
1774 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
1776 scm_mtrigger
+= scm_mtrigger
/ 2;
1786 * Each heap segment is an array of objects of a particular size.
1787 * Every segment has an associated (possibly shared) freelist.
1788 * A table of segment records is kept that records the upper and
1789 * lower extents of the segment; this is used during the conservative
1790 * phase of gc to identify probably gc roots (because they point
1791 * into valid segments at reasonable offsets). */
1794 * is true if the first segment was smaller than INIT_HEAP_SEG.
1795 * If scm_expmem is set to one, subsequent segment allocations will
1796 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1800 scm_sizet scm_max_segment_size
;
1803 * is the lowest base address of any heap segment.
1805 SCM_CELLPTR scm_heap_org
;
1807 scm_heap_seg_data_t
* scm_heap_table
= 0;
1808 int scm_n_heap_segs
= 0;
1811 * initializes a new heap segment and return the number of objects it contains.
1813 * The segment origin, segment size in bytes, and the span of objects
1814 * in cells are input parameters. The freelist is both input and output.
1816 * This function presume that the scm_heap_table has already been expanded
1817 * to accomodate a new segment record.
1822 init_heap_seg (SCM_CELLPTR seg_org
, scm_sizet size
, scm_freelist_t
*freelist
)
1824 register SCM_CELLPTR ptr
;
1825 SCM_CELLPTR seg_end
;
1828 int span
= freelist
->span
;
1830 if (seg_org
== NULL
)
1833 ptr
= CELL_UP (seg_org
, span
);
1835 /* Compute the ceiling on valid object pointers w/in this segment.
1837 seg_end
= CELL_DN ((char *) seg_org
+ size
, span
);
1839 /* Find the right place and insert the segment record.
1842 for (new_seg_index
= 0;
1843 ( (new_seg_index
< scm_n_heap_segs
)
1844 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
));
1850 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
1851 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
1856 scm_heap_table
[new_seg_index
].span
= span
;
1857 scm_heap_table
[new_seg_index
].freelist
= freelist
;
1858 scm_heap_table
[new_seg_index
].bounds
[0] = ptr
;
1859 scm_heap_table
[new_seg_index
].bounds
[1] = seg_end
;
1862 /* Compute the least valid object pointer w/in this segment
1864 ptr
= CELL_UP (ptr
, span
);
1868 n_new_cells
= seg_end
- ptr
;
1870 freelist
->heap_size
+= n_new_cells
;
1872 /* Partition objects in this segment into clusters */
1875 SCM
*clusterp
= &clusters
;
1876 int n_cluster_cells
= span
* freelist
->cluster_size
;
1878 while (n_new_cells
> span
) /* at least one spine + one freecell */
1880 /* Determine end of cluster
1882 if (n_new_cells
>= n_cluster_cells
)
1884 seg_end
= ptr
+ n_cluster_cells
;
1885 n_new_cells
-= n_cluster_cells
;
1888 /* [cmm] looks like the segment size doesn't divide cleanly by
1889 cluster size. bad cmm! */
1892 /* Allocate cluster spine
1894 *clusterp
= PTR2SCM (ptr
);
1895 SCM_SETCAR (*clusterp
, PTR2SCM (ptr
+ span
));
1896 clusterp
= SCM_CDRLOC (*clusterp
);
1899 while (ptr
< seg_end
)
1901 SCM scmptr
= PTR2SCM (ptr
);
1903 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1904 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ span
));
1908 SCM_SETCDR (PTR2SCM (ptr
- span
), SCM_EOL
);
1911 /* Patch up the last cluster pointer in the segment
1912 * to join it to the input freelist.
1914 *clusterp
= freelist
->clusters
;
1915 freelist
->clusters
= clusters
;
1919 fprintf (stderr
, "H");
1925 round_to_cluster_size (scm_freelist_t
*freelist
, scm_sizet len
)
1927 scm_sizet cluster_size_in_bytes
= CLUSTER_SIZE_IN_BYTES (freelist
);
1930 (len
+ cluster_size_in_bytes
- 1) / cluster_size_in_bytes
* cluster_size_in_bytes
1931 + ALIGNMENT_SLACK (freelist
);
1935 alloc_some_heap (scm_freelist_t
*freelist
)
1937 scm_heap_seg_data_t
* tmptable
;
1941 /* Critical code sections (such as the garbage collector)
1942 * aren't supposed to add heap segments.
1944 if (scm_gc_heap_lock
)
1945 scm_wta (SCM_UNDEFINED
, "need larger initial", "heap");
1947 /* Expand the heap tables to have room for the new segment.
1948 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1949 * only if the allocation of the segment itself succeeds.
1951 len
= (1 + scm_n_heap_segs
) * sizeof (scm_heap_seg_data_t
);
1953 SCM_SYSCALL (tmptable
= ((scm_heap_seg_data_t
*)
1954 realloc ((char *)scm_heap_table
, len
)));
1956 scm_wta (SCM_UNDEFINED
, "could not grow", "hplims");
1958 scm_heap_table
= tmptable
;
1961 /* Pick a size for the new heap segment.
1962 * The rule for picking the size of a segment is explained in
1966 /* Assure that the new segment is predicted to be large enough.
1968 * New yield should at least equal GC fraction of new heap size, i.e.
1970 * y + dh > f * (h + dh)
1973 * f : min yield fraction
1975 * dh : size of new heap segment
1977 * This gives dh > (f * h - y) / (1 - f)
1979 int f
= freelist
->min_yield_fraction
;
1980 long h
= SCM_HEAP_SIZE
;
1981 long min_cells
= (f
* h
- 100 * (long) scm_gc_yield
) / (99 - f
);
1982 len
= SCM_EXPHEAP (freelist
->heap_size
);
1984 fprintf (stderr
, "(%d < %d)", len
, min_cells
);
1986 if (len
< min_cells
)
1987 len
= min_cells
+ freelist
->cluster_size
;
1988 len
*= sizeof (scm_cell
);
1989 /* force new sampling */
1990 freelist
->collected
= LONG_MAX
;
1993 if (len
> scm_max_segment_size
)
1994 len
= scm_max_segment_size
;
1999 smallest
= CLUSTER_SIZE_IN_BYTES (freelist
);
2004 /* Allocate with decaying ambition. */
2005 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2006 && (len
>= smallest
))
2008 scm_sizet rounded_len
= round_to_cluster_size (freelist
, len
);
2009 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (rounded_len
));
2012 init_heap_seg (ptr
, rounded_len
, freelist
);
2019 scm_wta (SCM_UNDEFINED
, "could not grow", "heap");
2023 SCM_DEFINE (scm_unhash_name
, "unhash-name", 1, 0, 0,
2026 #define FUNC_NAME s_scm_unhash_name
2030 SCM_VALIDATE_SYMBOL (1,name
);
2032 bound
= scm_n_heap_segs
;
2033 for (x
= 0; x
< bound
; ++x
)
2037 p
= scm_heap_table
[x
].bounds
[0];
2038 pbound
= scm_heap_table
[x
].bounds
[1];
2041 SCM cell
= PTR2SCM (p
);
2042 if (SCM_TYP3 (cell
) == scm_tc3_cons_gloc
)
2044 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2045 * struct cell. See the corresponding comment in scm_gc_mark.
2047 scm_bits_t word0
= SCM_CELL_WORD_0 (cell
) - scm_tc3_cons_gloc
;
2048 SCM gloc_car
= SCM_PACK (word0
); /* access as gloc */
2049 SCM vcell
= SCM_CELL_OBJECT_1 (gloc_car
);
2050 if ((SCM_EQ_P (name
, SCM_BOOL_T
) || SCM_EQ_P (SCM_CAR (gloc_car
), name
))
2051 && (SCM_UNPACK (vcell
) != 0) && (SCM_UNPACK (vcell
) != 1))
2053 SCM_SET_CELL_OBJECT_0 (cell
, name
);
2066 /* {GC Protection Helper Functions}
2071 scm_remember (SCM
*ptr
)
2076 These crazy functions prevent garbage collection
2077 of arguments after the first argument by
2078 ensuring they remain live throughout the
2079 function because they are used in the last
2080 line of the code block.
2081 It'd be better to have a nice compiler hint to
2082 aid the conservative stack-scanning GC. --03/09/00 gjb */
2084 scm_return_first (SCM elt
, ...)
2090 scm_return_first_int (int i
, ...)
2097 scm_permanent_object (SCM obj
)
2100 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2106 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2107 other references are dropped, until the object is unprotected by calling
2108 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2109 i. e. it is possible to protect the same object several times, but it is
2110 necessary to unprotect the object the same number of times to actually get
2111 the object unprotected. It is an error to unprotect an object more often
2112 than it has been protected before. The function scm_protect_object returns
2116 /* Implementation note: For every object X, there is a counter which
2117 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2121 scm_protect_object (SCM obj
)
2125 /* This critical section barrier will be replaced by a mutex. */
2128 handle
= scm_hashq_create_handle_x (scm_protects
, obj
, SCM_MAKINUM (0));
2129 SCM_SETCDR (handle
, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle
)) + 1));
2137 /* Remove any protection for OBJ established by a prior call to
2138 scm_protect_object. This function returns OBJ.
2140 See scm_protect_object for more information. */
2142 scm_unprotect_object (SCM obj
)
2146 /* This critical section barrier will be replaced by a mutex. */
2149 handle
= scm_hashq_get_handle (scm_protects
, obj
);
2151 if (SCM_IMP (handle
))
2153 fprintf (stderr
, "scm_unprotect_object called on unprotected object\n");
2158 unsigned long int count
= SCM_INUM (SCM_CDR (handle
)) - 1;
2160 scm_hashq_remove_x (scm_protects
, obj
);
2162 SCM_SETCDR (handle
, SCM_MAKINUM (count
));
2172 /* called on process termination. */
2178 extern int on_exit (void (*procp
) (), int arg
);
2181 cleanup (int status
, void *arg
)
2183 #error Dont know how to setup a cleanup handler on your system.
2188 scm_flush_all_ports ();
2193 make_initial_segment (scm_sizet init_heap_size
, scm_freelist_t
*freelist
)
2195 scm_sizet rounded_size
= round_to_cluster_size (freelist
, init_heap_size
);
2196 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2200 rounded_size
= round_to_cluster_size (freelist
, SCM_HEAP_SEG_SIZE
);
2201 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2209 if (freelist
->min_yield_fraction
)
2210 freelist
->min_yield
= (freelist
->heap_size
* freelist
->min_yield_fraction
2212 freelist
->grow_heap_p
= (freelist
->heap_size
< freelist
->min_yield
);
2219 init_freelist (scm_freelist_t
*freelist
,
2224 freelist
->clusters
= SCM_EOL
;
2225 freelist
->cluster_size
= cluster_size
+ 1;
2226 freelist
->left_to_collect
= 0;
2227 freelist
->clusters_allocated
= 0;
2228 freelist
->min_yield
= 0;
2229 freelist
->min_yield_fraction
= min_yield
;
2230 freelist
->span
= span
;
2231 freelist
->collected
= 0;
2232 freelist
->collected_1
= 0;
2233 freelist
->heap_size
= 0;
2237 scm_init_storage (scm_sizet init_heap_size_1
, int gc_trigger_1
,
2238 scm_sizet init_heap_size_2
, int gc_trigger_2
,
2239 scm_sizet max_segment_size
)
2243 if (!init_heap_size_1
)
2244 init_heap_size_1
= scm_default_init_heap_size_1
;
2245 if (!init_heap_size_2
)
2246 init_heap_size_2
= scm_default_init_heap_size_2
;
2248 j
= SCM_NUM_PROTECTS
;
2250 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2253 scm_freelist
= SCM_EOL
;
2254 scm_freelist2
= SCM_EOL
;
2255 init_freelist (&scm_master_freelist
,
2256 1, SCM_CLUSTER_SIZE_1
,
2257 gc_trigger_1
? gc_trigger_1
: scm_default_min_yield_1
);
2258 init_freelist (&scm_master_freelist2
,
2259 2, SCM_CLUSTER_SIZE_2
,
2260 gc_trigger_2
? gc_trigger_2
: scm_default_min_yield_2
);
2261 scm_max_segment_size
2262 = max_segment_size
? max_segment_size
: scm_default_max_segment_size
;
2266 j
= SCM_HEAP_SEG_SIZE
;
2267 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2268 scm_heap_table
= ((scm_heap_seg_data_t
*)
2269 scm_must_malloc (sizeof (scm_heap_seg_data_t
) * 2, "hplims"));
2271 if (make_initial_segment (init_heap_size_1
, &scm_master_freelist
) ||
2272 make_initial_segment (init_heap_size_2
, &scm_master_freelist2
))
2275 /* scm_hplims[0] can change. do not remove scm_heap_org */
2276 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0], 1);
2278 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2279 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2280 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2281 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2282 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2284 /* Initialise the list of ports. */
2285 scm_port_table
= (scm_port
**)
2286 malloc (sizeof (scm_port
*) * scm_port_table_room
);
2287 if (!scm_port_table
)
2294 on_exit (cleanup
, 0);
2298 scm_undefineds
= scm_cons (SCM_UNDEFINED
, SCM_EOL
);
2299 SCM_SETCDR (scm_undefineds
, scm_undefineds
);
2301 scm_listofnull
= scm_cons (SCM_EOL
, SCM_EOL
);
2302 scm_nullstr
= scm_makstr (0L, 0);
2303 scm_nullvect
= scm_make_vector (SCM_INUM0
, SCM_UNDEFINED
);
2304 scm_symhash
= scm_make_vector (SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2305 scm_weak_symhash
= scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim
));
2306 scm_symhash_vars
= scm_make_vector (SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2307 scm_stand_in_procs
= SCM_EOL
;
2308 scm_permobjs
= SCM_EOL
;
2309 scm_protects
= scm_make_vector (SCM_MAKINUM (31), SCM_EOL
);
2310 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM
));
2311 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM
));
2313 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD
));
2320 SCM scm_after_gc_hook
;
2322 #if (SCM_DEBUG_DEPRECATED == 0)
2323 static SCM scm_gc_vcell
; /* the vcell for gc-thunk. */
2324 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2325 static SCM gc_async
;
2328 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2329 * is run after the gc, as soon as the asynchronous events are handled by the
2333 gc_async_thunk (void)
2335 scm_c_run_hook (scm_after_gc_hook
, SCM_EOL
);
2337 #if (SCM_DEBUG_DEPRECATED == 0)
2339 /* The following code will be removed in Guile 1.5. */
2340 if (SCM_NFALSEP (scm_gc_vcell
))
2342 SCM proc
= SCM_CDR (scm_gc_vcell
);
2344 if (SCM_NFALSEP (proc
) && !SCM_UNBNDP (proc
))
2345 scm_apply (proc
, SCM_EOL
, SCM_EOL
);
2348 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2350 return SCM_UNSPECIFIED
;
2354 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2355 * the garbage collection. The only purpose of this function is to mark the
2356 * gc_async (which will eventually lead to the execution of the
2360 mark_gc_async (void * hook_data
, void *func_data
, void *data
)
2362 scm_system_async_mark (gc_async
);
2372 scm_after_gc_hook
= scm_create_hook ("after-gc-hook", 0);
2374 #if (SCM_DEBUG_DEPRECATED == 0)
2375 scm_gc_vcell
= scm_sysintern ("gc-thunk", SCM_BOOL_F
);
2376 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2377 /* Dirk:FIXME:: We don't really want a binding here. */
2378 after_gc_thunk
= scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk
);
2379 gc_async
= scm_system_async (after_gc_thunk
);
2381 scm_c_hook_add (&scm_after_gc_c_hook
, mark_gc_async
, NULL
, 0);
2383 #include "libguile/gc.x"