1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
43 /* #define DEBUGINFO */
45 /* SECTION: This code is compiled once.
48 #ifndef MARK_DEPENDENCIES
55 #include "libguile/_scm.h"
56 #include "libguile/eval.h"
57 #include "libguile/stime.h"
58 #include "libguile/stackchk.h"
59 #include "libguile/struct.h"
60 #include "libguile/smob.h"
61 #include "libguile/unif.h"
62 #include "libguile/async.h"
63 #include "libguile/ports.h"
64 #include "libguile/root.h"
65 #include "libguile/strings.h"
66 #include "libguile/vectors.h"
67 #include "libguile/weaks.h"
68 #include "libguile/hashtab.h"
69 #include "libguile/tags.h"
71 #include "libguile/validate.h"
72 #include "libguile/deprecation.h"
73 #include "libguile/gc.h"
75 #ifdef GUILE_DEBUG_MALLOC
76 #include "libguile/debug-malloc.h"
89 #define var_start(x, y) va_start(x, y)
92 #define var_start(x, y) va_start(x)
97 unsigned int scm_gc_running_p
= 0;
101 #if (SCM_DEBUG_CELL_ACCESSES == 1)
103 scm_t_bits scm_tc16_allocated
;
105 /* Set this to != 0 if every cell that is accessed shall be checked:
107 unsigned int scm_debug_cell_accesses_p
= 1;
109 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
110 * the number of cell accesses after which a gc shall be called.
112 static unsigned int debug_cells_gc_interval
= 0;
115 /* If an allocated cell is detected during garbage collection, this means that
116 * some code has just obtained the object but was preempted before the
117 * initialization of the object was completed. This meanst that some entries
118 * of the allocated cell may already contain SCM objects. Therefore,
119 * allocated cells are scanned conservatively. */
121 allocated_mark (SCM allocated
)
123 scm_gc_mark_cell_conservatively (allocated
);
128 /* Assert that the given object is a valid reference to a valid cell. This
129 * test involves to determine whether the object is a cell pointer, whether
130 * this pointer actually points into a heap segment and whether the cell
131 * pointed to is not a free cell. Further, additional garbage collections may
132 * get executed after a user defined number of cell accesses. This helps to
133 * find places in the C code where references are dropped for extremely short
137 scm_assert_cell_valid (SCM cell
)
139 static unsigned int already_running
= 0;
141 if (scm_debug_cell_accesses_p
&& !already_running
)
143 already_running
= 1; /* set to avoid recursion */
145 if (!scm_cellp (cell
))
147 fprintf (stderr
, "scm_assert_cell_valid: Not a cell object: %lux\n",
148 (unsigned long) SCM_UNPACK (cell
));
151 else if (!scm_gc_running_p
)
153 /* Dirk::FIXME:: During garbage collection there occur references to
154 free cells. This is allright during conservative marking, but
155 should not happen otherwise (I think). The case of free cells
156 accessed during conservative marking is handled in function
157 scm_mark_locations. However, there still occur accesses to free
158 cells during gc. I don't understand why this happens. If it is
159 a bug and gets fixed, the following test should also work while
162 if (SCM_FREE_CELL_P (cell
))
164 fprintf (stderr
, "scm_assert_cell_valid: Accessing free cell: %lux\n",
165 (unsigned long) SCM_UNPACK (cell
));
169 /* If desired, perform additional garbage collections after a user
170 * defined number of cell accesses.
172 if (debug_cells_gc_interval
)
174 static unsigned int counter
= 0;
182 counter
= debug_cells_gc_interval
;
183 scm_igc ("scm_assert_cell_valid");
187 already_running
= 0; /* re-enable */
192 SCM_DEFINE (scm_set_debug_cell_accesses_x
, "set-debug-cell-accesses!", 1, 0, 0,
194 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
195 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
196 "but no additional calls to garbage collection are issued.\n"
197 "If @var{flag} is a number, cell access checking is enabled,\n"
198 "with an additional garbage collection after the given\n"
199 "number of cell accesses.\n"
200 "This procedure only exists when the compile-time flag\n"
201 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
202 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
204 if (SCM_FALSEP (flag
)) {
205 scm_debug_cell_accesses_p
= 0;
206 } else if (SCM_EQ_P (flag
, SCM_BOOL_T
)) {
207 debug_cells_gc_interval
= 0;
208 scm_debug_cell_accesses_p
= 1;
209 } else if (SCM_INUMP (flag
)) {
210 long int f
= SCM_INUM (flag
);
211 if (f
<= 0) SCM_OUT_OF_RANGE (1, flag
);
212 debug_cells_gc_interval
= f
;
213 scm_debug_cell_accesses_p
= 1;
215 SCM_WRONG_TYPE_ARG (1, flag
);
217 return SCM_UNSPECIFIED
;
221 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
225 /* {heap tuning parameters}
227 * These are parameters for controlling memory allocation. The heap
228 * is the area out of which scm_cons, and object headers are allocated.
230 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
231 * 64 bit machine. The units of the _SIZE parameters are bytes.
232 * Cons pairs and object headers occupy one heap cell.
234 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
235 * allocated initially the heap will grow by half its current size
236 * each subsequent time more heap is needed.
238 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
239 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
240 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
241 * is in scm_init_storage() and alloc_some_heap() in sys.c
243 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
244 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
246 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
249 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
252 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
253 * reclaimed by a GC triggered by must_malloc. If less than this is
254 * reclaimed, the trigger threshold is raised. [I don't know what a
255 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
256 * work around a oscillation that caused almost constant GC.]
260 * Heap size 45000 and 40% min yield gives quick startup and no extra
261 * heap allocation. Having higher values on min yield may lead to
262 * large heaps, especially if code behaviour is varying its
263 * maximum consumption between different freelists.
266 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
267 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
268 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
269 size_t scm_default_init_heap_size_1
= (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER
- 1)
270 / SCM_CARDS_PER_CLUSTER
) * SCM_GC_CARD_SIZE
);
271 int scm_default_min_yield_1
= 40;
273 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
274 size_t scm_default_init_heap_size_2
= (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER
- 1)
275 / SCM_CARDS_PER_CLUSTER
) * SCM_GC_CARD_SIZE
);
276 /* The following value may seem large, but note that if we get to GC at
277 * all, this means that we have a numerically intensive application
279 int scm_default_min_yield_2
= 40;
281 size_t scm_default_max_segment_size
= 2097000L;/* a little less (adm) than 2 Mb */
283 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
285 # define SCM_HEAP_SEG_SIZE 32768L
288 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
290 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
293 /* Make heap grow with factor 1.5 */
294 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
295 #define SCM_INIT_MALLOC_LIMIT 100000
296 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
298 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
299 aligned inner bounds for allocated storage */
302 /*in 386 protected mode we must only adjust the offset */
303 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
304 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
307 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
308 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
310 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
311 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
315 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
317 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
318 #define CLUSTER_SIZE_IN_BYTES(freelist) \
319 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
325 typedef struct scm_t_freelist
{
326 /* collected cells */
328 /* number of cells left to collect before cluster is full */
329 unsigned int left_to_collect
;
330 /* number of clusters which have been allocated */
331 unsigned int clusters_allocated
;
332 /* a list of freelists, each of size cluster_size,
333 * except the last one which may be shorter
337 /* this is the number of objects in each cluster, including the spine cell */
338 unsigned int cluster_size
;
339 /* indicates that we should grow heap instead of GC:ing
342 /* minimum yield on this list in order not to grow the heap
345 /* defines min_yield as percent of total heap size
347 int min_yield_fraction
;
348 /* number of cells per object on this list */
350 /* number of collected cells during last GC */
351 unsigned long collected
;
352 /* number of collected cells during penultimate GC */
353 unsigned long collected_1
;
354 /* total number of cells in heap segments
355 * belonging to this list.
357 unsigned long heap_size
;
360 SCM scm_freelist
= SCM_EOL
;
361 scm_t_freelist scm_master_freelist
= {
362 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_1
, 0, 0, 0, 1, 0, 0, 0
364 SCM scm_freelist2
= SCM_EOL
;
365 scm_t_freelist scm_master_freelist2
= {
366 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_2
, 0, 0, 0, 2, 0, 0, 0
370 * is the number of bytes of must_malloc allocation needed to trigger gc.
372 unsigned long scm_mtrigger
;
375 * If set, don't expand the heap. Set only during gc, during which no allocation
376 * is supposed to take place anyway.
378 int scm_gc_heap_lock
= 0;
381 * Don't pause for collection if this is set -- just
384 int scm_block_gc
= 1;
386 /* During collection, this accumulates objects holding
389 SCM scm_weak_vectors
;
391 /* During collection, this accumulates structures which are to be freed.
393 SCM scm_structs_to_free
;
395 /* GC Statistics Keeping
397 unsigned long scm_cells_allocated
= 0;
398 unsigned long scm_mallocated
= 0;
399 unsigned long scm_gc_cells_collected
;
400 unsigned long scm_gc_yield
;
401 static unsigned long scm_gc_yield_1
= 0; /* previous GC yield */
402 unsigned long scm_gc_malloc_collected
;
403 unsigned long scm_gc_ports_collected
;
404 unsigned long scm_gc_time_taken
= 0;
405 static unsigned long t_before_gc
;
406 static unsigned long t_before_sweep
;
407 unsigned long scm_gc_mark_time_taken
= 0;
408 unsigned long scm_gc_sweep_time_taken
= 0;
409 unsigned long scm_gc_times
= 0;
410 unsigned long scm_gc_cells_swept
= 0;
411 double scm_gc_cells_marked_acc
= 0.;
412 double scm_gc_cells_swept_acc
= 0.;
414 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
415 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
416 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
417 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
418 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
419 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
420 SCM_SYMBOL (sym_gc_mark_time_taken
, "gc-mark-time-taken");
421 SCM_SYMBOL (sym_gc_sweep_time_taken
, "gc-sweep-time-taken");
422 SCM_SYMBOL (sym_times
, "gc-times");
423 SCM_SYMBOL (sym_cells_marked
, "cells-marked");
424 SCM_SYMBOL (sym_cells_swept
, "cells-swept");
426 typedef struct scm_t_heap_seg_data
428 /* lower and upper bounds of the segment */
429 SCM_CELLPTR bounds
[2];
431 /* address of the head-of-freelist pointer for this segment's cells.
432 All segments usually point to the same one, scm_freelist. */
433 scm_t_freelist
*freelist
;
435 /* number of cells per object in this segment */
437 } scm_t_heap_seg_data
;
441 static size_t init_heap_seg (SCM_CELLPTR
, size_t, scm_t_freelist
*);
443 typedef enum { return_on_error
, abort_on_error
} policy_on_error
;
444 static void alloc_some_heap (scm_t_freelist
*, policy_on_error
);
447 #define SCM_HEAP_SIZE \
448 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
449 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
451 #define BVEC_GROW_SIZE 256
452 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
453 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
455 /* mark space allocation */
457 typedef struct scm_t_mark_space
459 scm_t_c_bvec_limb
*bvec_space
;
460 struct scm_t_mark_space
*next
;
463 static scm_t_mark_space
*current_mark_space
;
464 static scm_t_mark_space
**mark_space_ptr
;
465 static ptrdiff_t current_mark_space_offset
;
466 static scm_t_mark_space
*mark_space_head
;
468 static scm_t_c_bvec_limb
*
470 #define FUNC_NAME "get_bvec"
472 scm_t_c_bvec_limb
*res
;
474 if (!current_mark_space
)
476 SCM_SYSCALL (current_mark_space
= (scm_t_mark_space
*) malloc (sizeof (scm_t_mark_space
)));
477 if (!current_mark_space
)
478 SCM_MISC_ERROR ("could not grow heap", SCM_EOL
);
480 current_mark_space
->bvec_space
= NULL
;
481 current_mark_space
->next
= NULL
;
483 *mark_space_ptr
= current_mark_space
;
484 mark_space_ptr
= &(current_mark_space
->next
);
489 if (!(current_mark_space
->bvec_space
))
491 SCM_SYSCALL (current_mark_space
->bvec_space
=
492 (scm_t_c_bvec_limb
*) calloc (BVEC_GROW_SIZE_IN_BYTES
, 1));
493 if (!(current_mark_space
->bvec_space
))
494 SCM_MISC_ERROR ("could not grow heap", SCM_EOL
);
496 current_mark_space_offset
= 0;
501 if (current_mark_space_offset
== BVEC_GROW_SIZE_IN_LIMBS
)
503 current_mark_space
= NULL
;
508 res
= current_mark_space
->bvec_space
+ current_mark_space_offset
;
509 current_mark_space_offset
+= SCM_GC_CARD_BVEC_SIZE_IN_LIMBS
;
519 scm_t_mark_space
*ms
;
521 for (ms
= mark_space_head
; ms
; ms
= ms
->next
)
522 memset (ms
->bvec_space
, 0, BVEC_GROW_SIZE_IN_BYTES
);
527 /* Debugging functions. */
529 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
532 map_free_list (scm_t_freelist
*master
, SCM freelist
)
534 long last_seg
= -1, count
= 0;
537 for (f
= freelist
; !SCM_NULLP (f
); f
= SCM_FREE_CELL_CDR (f
))
539 long int this_seg
= heap_segment (f
);
544 "map_free_list: can't find segment containing cell %lux\n",
545 (unsigned long int) SCM_UNPACK (cell
));
548 else if (this_seg
!= last_seg
)
551 fprintf (stderr
, " %5ld %d-cells in segment %ld\n",
552 (long) count
, master
->span
, (long) last_seg
);
559 fprintf (stderr
, " %5ld %d-cells in segment %ld\n",
560 (long) count
, master
->span
, (long) last_seg
);
563 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
565 "Print debugging information about the free-list.\n"
566 "@code{map-free-list} is only included in\n"
567 "@code{--enable-guile-debug} builds of Guile.")
568 #define FUNC_NAME s_scm_map_free_list
572 fprintf (stderr
, "%ld segments total (%d:%ld",
573 (long) scm_n_heap_segs
,
574 scm_heap_table
[0].span
,
575 (long) (scm_heap_table
[0].bounds
[1] - scm_heap_table
[0].bounds
[0]));
577 for (i
= 1; i
!= scm_n_heap_segs
; i
++)
578 fprintf (stderr
, ", %d:%ld",
579 scm_heap_table
[i
].span
,
580 (long) (scm_heap_table
[i
].bounds
[1] - scm_heap_table
[i
].bounds
[0]));
581 fprintf (stderr
, ")\n");
582 map_free_list (&scm_master_freelist
, scm_freelist
);
583 map_free_list (&scm_master_freelist2
, scm_freelist2
);
586 return SCM_UNSPECIFIED
;
590 static long last_cluster
;
591 static long last_size
;
594 free_list_length (char *title
, long i
, SCM freelist
)
598 for (ls
= freelist
; !SCM_NULLP (ls
); ls
= SCM_FREE_CELL_CDR (ls
))
599 if (SCM_FREE_CELL_P (ls
))
603 fprintf (stderr
, "bad cell in %s at position %ld\n", title
, (long) n
);
610 if (last_cluster
== i
- 1)
611 fprintf (stderr
, "\t%ld\n", (long) last_size
);
613 fprintf (stderr
, "-%ld\t%ld\n", (long) (i
- 1), (long) last_size
);
616 fprintf (stderr
, "%s %ld", title
, (long) i
);
618 fprintf (stderr
, "%s\t%ld\n", title
, (long) n
);
626 free_list_lengths (char *title
, scm_t_freelist
*master
, SCM freelist
)
629 long i
= 0, len
, n
= 0;
630 fprintf (stderr
, "%s\n\n", title
);
631 n
+= free_list_length ("free list", -1, freelist
);
632 for (clusters
= master
->clusters
;
633 SCM_NNULLP (clusters
);
634 clusters
= SCM_CDR (clusters
))
636 len
= free_list_length ("cluster", i
++, SCM_CAR (clusters
));
639 if (last_cluster
== i
- 1)
640 fprintf (stderr
, "\t%ld\n", (long) last_size
);
642 fprintf (stderr
, "-%ld\t%ld\n", (long) (i
- 1), (long) last_size
);
643 fprintf (stderr
, "\ntotal %ld objects\n\n", (long) n
);
646 SCM_DEFINE (scm_free_list_length
, "free-list-length", 0, 0, 0,
648 "Print debugging information about the free-list.\n"
649 "@code{free-list-length} is only included in\n"
650 "@code{--enable-guile-debug} builds of Guile.")
651 #define FUNC_NAME s_scm_free_list_length
653 free_list_lengths ("1-cells", &scm_master_freelist
, scm_freelist
);
654 free_list_lengths ("2-cells", &scm_master_freelist2
, scm_freelist2
);
655 return SCM_UNSPECIFIED
;
661 #ifdef GUILE_DEBUG_FREELIST
663 /* Non-zero if freelist debugging is in effect. Set this via
664 `gc-set-debug-check-freelist!'. */
665 static int scm_debug_check_freelist
= 0;
667 /* Number of calls to SCM_NEWCELL since startup. */
668 static unsigned long scm_newcell_count
;
669 static unsigned long scm_newcell2_count
;
671 /* Search freelist for anything that isn't marked as a free cell.
672 Abort if we find something. */
674 scm_check_freelist (SCM freelist
)
679 for (f
= freelist
; !SCM_NULLP (f
); f
= SCM_FREE_CELL_CDR (f
), i
++)
680 if (!SCM_FREE_CELL_P (f
))
682 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
683 (long) scm_newcell_count
, (long) i
);
688 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
690 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
691 "on each cell allocation. This procedure only exists when the\n"
692 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
693 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
695 /* [cmm] I did a double-take when I read this code the first time.
697 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
698 return SCM_UNSPECIFIED
;
704 scm_debug_newcell (void)
709 if (scm_debug_check_freelist
)
711 scm_check_freelist (scm_freelist
);
715 /* The rest of this is supposed to be identical to the SCM_NEWCELL
717 if (SCM_NULLP (scm_freelist
))
719 new = scm_gc_for_newcell (&scm_master_freelist
, &scm_freelist
);
720 SCM_GC_SET_ALLOCATED (new);
725 scm_freelist
= SCM_FREE_CELL_CDR (scm_freelist
);
726 SCM_GC_SET_ALLOCATED (new);
733 scm_debug_newcell2 (void)
737 scm_newcell2_count
++;
738 if (scm_debug_check_freelist
)
740 scm_check_freelist (scm_freelist2
);
744 /* The rest of this is supposed to be identical to the SCM_NEWCELL
746 if (SCM_NULLP (scm_freelist2
))
748 new = scm_gc_for_newcell (&scm_master_freelist2
, &scm_freelist2
);
749 SCM_GC_SET_ALLOCATED (new);
754 scm_freelist2
= SCM_FREE_CELL_CDR (scm_freelist2
);
755 SCM_GC_SET_ALLOCATED (new);
761 #endif /* GUILE_DEBUG_FREELIST */
766 master_cells_allocated (scm_t_freelist
*master
)
768 /* the '- 1' below is to ignore the cluster spine cells. */
769 long objects
= master
->clusters_allocated
* (master
->cluster_size
- 1);
770 if (SCM_NULLP (master
->clusters
))
771 objects
-= master
->left_to_collect
;
772 return master
->span
* objects
;
776 freelist_length (SCM freelist
)
779 for (n
= 0; !SCM_NULLP (freelist
); freelist
= SCM_FREE_CELL_CDR (freelist
))
785 compute_cells_allocated ()
787 return (scm_cells_allocated
788 + master_cells_allocated (&scm_master_freelist
)
789 + master_cells_allocated (&scm_master_freelist2
)
790 - scm_master_freelist
.span
* freelist_length (scm_freelist
)
791 - scm_master_freelist2
.span
* freelist_length (scm_freelist2
));
794 /* {Scheme Interface to GC}
797 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
799 "Return an association list of statistics about Guile's current\n"
801 #define FUNC_NAME s_scm_gc_stats
806 unsigned long int local_scm_mtrigger
;
807 unsigned long int local_scm_mallocated
;
808 unsigned long int local_scm_heap_size
;
809 unsigned long int local_scm_cells_allocated
;
810 unsigned long int local_scm_gc_time_taken
;
811 unsigned long int local_scm_gc_times
;
812 unsigned long int local_scm_gc_mark_time_taken
;
813 unsigned long int local_scm_gc_sweep_time_taken
;
814 double local_scm_gc_cells_swept
;
815 double local_scm_gc_cells_marked
;
825 for (i
= scm_n_heap_segs
; i
--; )
826 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
827 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
829 if (scm_n_heap_segs
!= n
)
834 /* Below, we cons to produce the resulting list. We want a snapshot of
835 * the heap situation before consing.
837 local_scm_mtrigger
= scm_mtrigger
;
838 local_scm_mallocated
= scm_mallocated
;
839 local_scm_heap_size
= SCM_HEAP_SIZE
;
840 local_scm_cells_allocated
= compute_cells_allocated ();
841 local_scm_gc_time_taken
= scm_gc_time_taken
;
842 local_scm_gc_mark_time_taken
= scm_gc_mark_time_taken
;
843 local_scm_gc_sweep_time_taken
= scm_gc_sweep_time_taken
;
844 local_scm_gc_times
= scm_gc_times
;
845 local_scm_gc_cells_swept
= scm_gc_cells_swept_acc
;
846 local_scm_gc_cells_marked
= scm_gc_cells_marked_acc
;
848 answer
= scm_list_n (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
849 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
850 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
851 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
852 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
853 scm_cons (sym_times
, scm_ulong2num (local_scm_gc_times
)),
854 scm_cons (sym_gc_mark_time_taken
, scm_ulong2num (local_scm_gc_mark_time_taken
)),
855 scm_cons (sym_gc_sweep_time_taken
, scm_ulong2num (local_scm_gc_sweep_time_taken
)),
856 scm_cons (sym_cells_marked
, scm_i_dbl2big (local_scm_gc_cells_marked
)),
857 scm_cons (sym_cells_swept
, scm_i_dbl2big (local_scm_gc_cells_swept
)),
858 scm_cons (sym_heap_segments
, heap_segs
),
867 gc_start_stats (const char *what SCM_UNUSED
)
869 t_before_gc
= scm_c_get_internal_run_time ();
870 scm_gc_cells_swept
= 0;
871 scm_gc_cells_collected
= 0;
872 scm_gc_yield_1
= scm_gc_yield
;
873 scm_gc_yield
= (scm_cells_allocated
874 + master_cells_allocated (&scm_master_freelist
)
875 + master_cells_allocated (&scm_master_freelist2
));
876 scm_gc_malloc_collected
= 0;
877 scm_gc_ports_collected
= 0;
884 unsigned long t
= scm_c_get_internal_run_time ();
885 scm_gc_time_taken
+= (t
- t_before_gc
);
886 scm_gc_sweep_time_taken
+= (t
- t_before_sweep
);
889 scm_gc_cells_marked_acc
+= scm_gc_cells_swept
- scm_gc_cells_collected
;
890 scm_gc_cells_swept_acc
+= scm_gc_cells_swept
;
894 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
896 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
897 "returned by this function for @var{obj}")
898 #define FUNC_NAME s_scm_object_address
900 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj
));
905 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
907 "Scans all of SCM objects and reclaims for further use those that are\n"
908 "no longer accessible.")
909 #define FUNC_NAME s_scm_gc
914 return SCM_UNSPECIFIED
;
920 /* {C Interface For When GC is Triggered}
924 adjust_min_yield (scm_t_freelist
*freelist
)
926 /* min yield is adjusted upwards so that next predicted total yield
927 * (allocated cells actually freed by GC) becomes
928 * `min_yield_fraction' of total heap size. Note, however, that
929 * the absolute value of min_yield will correspond to `collected'
930 * on one master (the one which currently is triggering GC).
932 * The reason why we look at total yield instead of cells collected
933 * on one list is that we want to take other freelists into account.
934 * On this freelist, we know that (local) yield = collected cells,
935 * but that's probably not the case on the other lists.
937 * (We might consider computing a better prediction, for example
938 * by computing an average over multiple GC:s.)
940 if (freelist
->min_yield_fraction
)
942 /* Pick largest of last two yields. */
943 long delta
= ((SCM_HEAP_SIZE
* freelist
->min_yield_fraction
/ 100)
944 - (long) SCM_MAX (scm_gc_yield_1
, scm_gc_yield
));
946 fprintf (stderr
, " after GC = %lu, delta = %ld\n",
947 (long) scm_cells_allocated
,
951 freelist
->min_yield
+= delta
;
956 /* When we get POSIX threads support, the master will be global and
957 * common while the freelist will be individual for each thread.
961 scm_gc_for_newcell (scm_t_freelist
*master
, SCM
*freelist
)
967 if (SCM_NULLP (master
->clusters
))
969 if (master
->grow_heap_p
|| scm_block_gc
)
971 /* In order to reduce gc frequency, try to allocate a new heap
972 * segment first, even if gc might find some free cells. If we
973 * can't obtain a new heap segment, we will try gc later.
975 master
->grow_heap_p
= 0;
976 alloc_some_heap (master
, return_on_error
);
978 if (SCM_NULLP (master
->clusters
))
980 /* The heap was not grown, either because it wasn't scheduled to
981 * grow, or because there was not enough memory available. In
982 * both cases we have to try gc to get some free cells.
985 fprintf (stderr
, "allocated = %lu, ",
986 (long) (scm_cells_allocated
987 + master_cells_allocated (&scm_master_freelist
)
988 + master_cells_allocated (&scm_master_freelist2
)));
991 adjust_min_yield (master
);
992 if (SCM_NULLP (master
->clusters
))
994 /* gc could not free any cells. Now, we _must_ allocate a
995 * new heap segment, because there is no other possibility
996 * to provide a new cell for the caller.
998 alloc_some_heap (master
, abort_on_error
);
1002 cell
= SCM_CAR (master
->clusters
);
1003 master
->clusters
= SCM_CDR (master
->clusters
);
1004 ++master
->clusters_allocated
;
1006 while (SCM_NULLP (cell
));
1008 #ifdef GUILE_DEBUG_FREELIST
1009 scm_check_freelist (cell
);
1012 --scm_ints_disabled
;
1013 *freelist
= SCM_FREE_CELL_CDR (cell
);
1019 /* This is a support routine which can be used to reserve a cluster
1020 * for some special use, such as debugging. It won't be useful until
1021 * free cells are preserved between garbage collections.
1025 scm_alloc_cluster (scm_t_freelist
*master
)
1028 cell
= scm_gc_for_newcell (master
, &freelist
);
1029 SCM_SETCDR (cell
, freelist
);
1035 scm_t_c_hook scm_before_gc_c_hook
;
1036 scm_t_c_hook scm_before_mark_c_hook
;
1037 scm_t_c_hook scm_before_sweep_c_hook
;
1038 scm_t_c_hook scm_after_sweep_c_hook
;
1039 scm_t_c_hook scm_after_gc_c_hook
;
1043 scm_igc (const char *what
)
1048 scm_c_hook_run (&scm_before_gc_c_hook
, 0);
1051 SCM_NULLP (scm_freelist
)
1053 : (SCM_NULLP (scm_freelist2
) ? "o" : "m"));
1055 /* During the critical section, only the current thread may run. */
1056 SCM_CRITICAL_SECTION_START
;
1058 /* fprintf (stderr, "gc: %s\n", what); */
1060 if (!scm_stack_base
|| scm_block_gc
)
1066 gc_start_stats (what
);
1068 if (scm_gc_heap_lock
)
1069 /* We've invoked the collector while a GC is already in progress.
1070 That should never happen. */
1075 /* flush dead entries from the continuation stack */
1080 elts
= SCM_VELTS (scm_continuation_stack
);
1081 bound
= SCM_VECTOR_LENGTH (scm_continuation_stack
);
1082 x
= SCM_INUM (scm_continuation_stack_ptr
);
1085 elts
[x
] = SCM_BOOL_F
;
1090 scm_c_hook_run (&scm_before_mark_c_hook
, 0);
1092 clear_mark_space ();
1096 /* Mark objects on the C stack. */
1097 SCM_FLUSH_REGISTER_WINDOWS
;
1098 /* This assumes that all registers are saved into the jmp_buf */
1099 setjmp (scm_save_regs_gc_mark
);
1100 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
1101 ( (size_t) (sizeof (SCM_STACKITEM
) - 1 +
1102 sizeof scm_save_regs_gc_mark
)
1103 / sizeof (SCM_STACKITEM
)));
1106 unsigned long stack_len
= scm_stack_size (scm_stack_base
);
1107 #ifdef SCM_STACK_GROWS_UP
1108 scm_mark_locations (scm_stack_base
, stack_len
);
1110 scm_mark_locations (scm_stack_base
- stack_len
, stack_len
);
1114 #else /* USE_THREADS */
1116 /* Mark every thread's stack and registers */
1117 scm_threads_mark_stacks ();
1119 #endif /* USE_THREADS */
1121 j
= SCM_NUM_PROTECTS
;
1123 scm_gc_mark (scm_sys_protects
[j
]);
1125 /* mark the registered roots */
1128 for (i
= 0; i
< SCM_VECTOR_LENGTH (scm_gc_registered_roots
); ++i
) {
1129 SCM l
= SCM_VELTS (scm_gc_registered_roots
)[i
];
1130 for (; !SCM_NULLP (l
); l
= SCM_CDR (l
)) {
1131 SCM
*p
= (SCM
*) (scm_num2long (SCM_CAAR (l
), 0, NULL
));
1137 /* FIXME: we should have a means to register C functions to be run
1138 * in different phases of GC
1140 scm_mark_subr_table ();
1143 scm_gc_mark (scm_root
->handle
);
1146 t_before_sweep
= scm_c_get_internal_run_time ();
1147 scm_gc_mark_time_taken
+= (t_before_sweep
- t_before_gc
);
1149 scm_c_hook_run (&scm_before_sweep_c_hook
, 0);
1153 scm_c_hook_run (&scm_after_sweep_c_hook
, 0);
1158 SCM_CRITICAL_SECTION_END
;
1159 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
1168 #define MARK scm_gc_mark
1169 #define FNAME "scm_gc_mark"
1171 #endif /*!MARK_DEPENDENCIES*/
1173 /* Mark an object precisely.
1177 #define FUNC_NAME FNAME
1181 scm_t_bits cell_type
;
1183 #ifndef MARK_DEPENDENCIES
1184 # define RECURSE scm_gc_mark
1186 /* go through the usual marking, but not for self-cycles. */
1187 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1191 #ifdef MARK_DEPENDENCIES
1192 goto gc_mark_loop_first_time
;
1195 /* A simple hack for debugging. Chose the second branch to get a
1196 meaningful backtrace for crashes inside the GC.
1199 #define goto_gc_mark_loop goto gc_mark_loop
1200 #define goto_gc_mark_nimp goto gc_mark_nimp
1202 #define goto_gc_mark_loop RECURSE(ptr); return
1203 #define goto_gc_mark_nimp RECURSE(ptr); return
1212 #ifdef MARK_DEPENDENCIES
1213 if (SCM_EQ_P (ptr
, p
))
1219 gc_mark_loop_first_time
:
1222 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1223 /* We are in debug mode. Check the ptr exhaustively. */
1224 if (!scm_cellp (ptr
))
1225 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL
);
1227 /* In non-debug mode, do at least some cheap testing. */
1228 if (!SCM_CELLP (ptr
))
1229 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL
);
1232 #ifndef MARK_DEPENDENCIES
1234 if (SCM_GCMARKP (ptr
))
1237 SCM_SETGCMARK (ptr
);
1241 cell_type
= SCM_GC_CELL_TYPE (ptr
);
1242 switch (SCM_ITAG7 (cell_type
))
1244 case scm_tcs_cons_nimcar
:
1245 if (SCM_IMP (SCM_CDR (ptr
)))
1247 ptr
= SCM_CAR (ptr
);
1250 RECURSE (SCM_CAR (ptr
));
1251 ptr
= SCM_CDR (ptr
);
1253 case scm_tcs_cons_imcar
:
1254 ptr
= SCM_CDR (ptr
);
1257 RECURSE (SCM_SETTER (ptr
));
1258 ptr
= SCM_PROCEDURE (ptr
);
1260 case scm_tcs_cons_gloc
:
1262 /* Dirk:FIXME:: The following code is super ugly: ptr may be a
1263 * struct or a gloc. If it is a gloc, the cell word #0 of ptr
1264 * is the address of a scm_tc16_variable smob. If it is a
1265 * struct, the cell word #0 of ptr is a pointer to a struct
1266 * vtable data region. (The fact that these are accessed in
1267 * the same way restricts the possibilites to change the data
1268 * layout of structs or heap cells.) To discriminate between
1269 * the two, it is guaranteed that the scm_vtable_index_vcell
1270 * element of the prospective vtable is always zero. For a
1271 * gloc, this location has the CDR of the variable smob, which
1272 * is guaranteed to be non-zero.
1274 scm_t_bits word0
= SCM_CELL_WORD_0 (ptr
) - scm_tc3_cons_gloc
;
1275 scm_t_bits
* vtable_data
= (scm_t_bits
*) word0
; /* access as struct */
1276 if (vtable_data
[scm_vtable_index_vcell
] != 0)
1279 SCM gloc_car
= SCM_PACK (word0
);
1281 ptr
= SCM_CDR (ptr
);
1286 /* ptr is a struct */
1287 SCM layout
= SCM_PACK (vtable_data
[scm_vtable_index_layout
]);
1288 long len
= SCM_SYMBOL_LENGTH (layout
);
1289 char * fields_desc
= SCM_SYMBOL_CHARS (layout
);
1290 scm_t_bits
* struct_data
= (scm_t_bits
*) SCM_STRUCT_DATA (ptr
);
1292 if (vtable_data
[scm_struct_i_flags
] & SCM_STRUCTF_ENTITY
)
1294 RECURSE (SCM_PACK (struct_data
[scm_struct_i_procedure
]));
1295 RECURSE (SCM_PACK (struct_data
[scm_struct_i_setter
]));
1301 for (x
= 0; x
< len
- 2; x
+= 2, ++struct_data
)
1302 if (fields_desc
[x
] == 'p')
1303 RECURSE (SCM_PACK (*struct_data
));
1304 if (fields_desc
[x
] == 'p')
1306 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
1307 for (x
= *struct_data
++; x
; --x
, ++struct_data
)
1308 RECURSE (SCM_PACK (*struct_data
));
1310 RECURSE (SCM_PACK (*struct_data
));
1314 ptr
= SCM_PACK (vtable_data
[scm_vtable_index_vtable
]);
1319 case scm_tcs_closures
:
1320 if (SCM_IMP (SCM_ENV (ptr
)))
1322 ptr
= SCM_CLOSCAR (ptr
);
1325 RECURSE (SCM_CLOSCAR (ptr
));
1326 ptr
= SCM_ENV (ptr
);
1328 case scm_tc7_vector
:
1329 i
= SCM_VECTOR_LENGTH (ptr
);
1333 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
1334 RECURSE (SCM_VELTS (ptr
)[i
]);
1335 ptr
= SCM_VELTS (ptr
)[0];
1340 size_t i
= SCM_CCLO_LENGTH (ptr
);
1342 for (j
= 1; j
!= i
; ++j
)
1344 SCM obj
= SCM_CCLO_REF (ptr
, j
);
1348 ptr
= SCM_CCLO_REF (ptr
, 0);
1354 case scm_tc7_byvect
:
1361 #ifdef HAVE_LONG_LONGS
1362 case scm_tc7_llvect
:
1365 case scm_tc7_string
:
1368 case scm_tc7_substring
:
1369 ptr
= SCM_CDR (ptr
);
1373 SCM_SET_WVECT_GC_CHAIN (ptr
, scm_weak_vectors
);
1374 scm_weak_vectors
= ptr
;
1375 if (SCM_IS_WHVEC_ANY (ptr
))
1382 len
= SCM_VECTOR_LENGTH (ptr
);
1383 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
1384 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
1386 for (x
= 0; x
< len
; ++x
)
1389 alist
= SCM_VELTS (ptr
)[x
];
1391 /* mark everything on the alist except the keys or
1392 * values, according to weak_values and weak_keys. */
1393 while ( SCM_CONSP (alist
)
1394 && !SCM_GCMARKP (alist
)
1395 && SCM_CONSP (SCM_CAR (alist
)))
1400 kvpair
= SCM_CAR (alist
);
1401 next_alist
= SCM_CDR (alist
);
1404 * SCM_SETGCMARK (alist);
1405 * SCM_SETGCMARK (kvpair);
1407 * It may be that either the key or value is protected by
1408 * an escaped reference to part of the spine of this alist.
1409 * If we mark the spine here, and only mark one or neither of the
1410 * key and value, they may never be properly marked.
1411 * This leads to a horrible situation in which an alist containing
1412 * freelist cells is exported.
1414 * So only mark the spines of these arrays last of all marking.
1415 * If somebody confuses us by constructing a weak vector
1416 * with a circular alist then we are hosed, but at least we
1417 * won't prematurely drop table entries.
1420 RECURSE (SCM_CAR (kvpair
));
1422 RECURSE (SCM_CDR (kvpair
));
1425 if (SCM_NIMP (alist
))
1431 case scm_tc7_symbol
:
1432 ptr
= SCM_PROP_SLOTS (ptr
);
1434 case scm_tc7_variable
:
1435 ptr
= SCM_CELL_OBJECT_1 (ptr
);
1440 i
= SCM_PTOBNUM (ptr
);
1441 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1442 if (!(i
< scm_numptob
))
1443 SCM_MISC_ERROR ("undefined port type", SCM_EOL
);
1445 if (SCM_PTAB_ENTRY(ptr
))
1446 RECURSE (SCM_FILENAME (ptr
));
1447 if (scm_ptobs
[i
].mark
)
1449 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1456 switch (SCM_TYP16 (ptr
))
1457 { /* should be faster than going through scm_smobs */
1458 case scm_tc_free_cell
:
1459 /* We have detected a free cell. This can happen if non-object data
1460 * on the C stack points into guile's heap and is scanned during
1461 * conservative marking. */
1462 #if (SCM_DEBUG_CELL_ACCESSES == 0)
1463 /* If cell debugging is disabled, there is a second situation in
1464 * which a free cell can be encountered, namely if with preemptive
1465 * threading one thread has just obtained a fresh cell and was
1466 * preempted before the cell initialization was completed. In this
1467 * case, some entries of the cell may already contain objects.
1468 * Thus, if cell debugging is disabled, free cells are scanned
1469 * conservatively. */
1470 scm_gc_mark_cell_conservatively (ptr
);
1471 #else /* SCM_DEBUG_CELL_ACCESSES == 1 */
1472 /* With cell debugging enabled, a freshly obtained but not fully
1473 * initialized cell is guaranteed to be of type scm_tc16_allocated.
1474 * Thus, no conservative scanning for free cells is necessary, but
1475 * instead cells of type scm_tc16_allocated have to be scanned
1476 * conservatively. This is done in the mark function of the
1477 * scm_tc16_allocated smob type. */
1482 case scm_tc16_complex
:
1485 i
= SCM_SMOBNUM (ptr
);
1486 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1487 if (!(i
< scm_numsmob
))
1488 SCM_MISC_ERROR ("undefined smob type", SCM_EOL
);
1490 if (scm_smobs
[i
].mark
)
1492 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1500 SCM_MISC_ERROR ("unknown type", SCM_EOL
);
1506 #ifndef MARK_DEPENDENCIES
1511 /* And here we define `scm_gc_mark_dependencies', by including this
1512 * same file in itself.
1514 #define MARK scm_gc_mark_dependencies
1515 #define FNAME "scm_gc_mark_dependencies"
1516 #define MARK_DEPENDENCIES
1518 #undef MARK_DEPENDENCIES
1523 /* Determine whether the given value does actually represent a cell in some
1524 * heap segment. If this is the case, the number of the heap segment is
1525 * returned. Otherwise, -1 is returned. Binary search is used in order to
1526 * determine the heap segment that contains the cell.*/
1527 /* FIXME: To be used within scm_gc_mark_cell_conservatively,
1528 * scm_mark_locations and scm_cellp this function should be an inline
1531 heap_segment (SCM obj
)
1533 if (!SCM_CELLP (obj
))
1537 SCM_CELLPTR ptr
= SCM2PTR (obj
);
1538 unsigned long int i
= 0;
1539 unsigned long int j
= scm_n_heap_segs
- 1;
1541 if (SCM_PTR_LT (ptr
, scm_heap_table
[i
].bounds
[0]))
1543 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[1], ptr
))
1549 if (SCM_PTR_LT (ptr
, scm_heap_table
[i
].bounds
[1]))
1553 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1560 unsigned long int k
= (i
+ j
) / 2;
1564 else if (SCM_PTR_LT (ptr
, scm_heap_table
[k
].bounds
[1]))
1568 if (SCM_PTR_LT (ptr
, scm_heap_table
[i
].bounds
[0]))
1571 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1575 if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[1], ptr
))
1581 if (!DOUBLECELL_ALIGNED_P (obj
) && scm_heap_table
[i
].span
== 2)
1583 else if (SCM_GC_IN_CARD_HEADERP (ptr
))
1592 /* Mark the entries of a cell conservatively. The given cell is known to be
1593 * on the heap. Still we have to determine its heap segment in order to
1594 * figure out whether it is a single or a double cell. Then, each of the cell
1595 * elements itself is checked and potentially marked. */
1597 scm_gc_mark_cell_conservatively (SCM cell
)
1599 unsigned long int cell_segment
= heap_segment (cell
);
1600 unsigned int span
= scm_heap_table
[cell_segment
].span
;
1603 for (i
= 1; i
!= span
* 2; ++i
)
1605 SCM obj
= SCM_CELL_OBJECT (cell
, i
);
1606 long int obj_segment
= heap_segment (obj
);
1607 if (obj_segment
>= 0)
1613 /* Mark a region conservatively */
1615 scm_mark_locations (SCM_STACKITEM x
[], unsigned long n
)
1619 for (m
= 0; m
< n
; ++m
)
1621 SCM obj
= * (SCM
*) &x
[m
];
1622 long int segment
= heap_segment (obj
);
1629 /* The function scm_cellp determines whether an SCM value can be regarded as a
1630 * pointer to a cell on the heap.
1633 scm_cellp (SCM value
)
1635 long int segment
= heap_segment (value
);
1636 return (segment
>= 0);
1641 gc_sweep_freelist_start (scm_t_freelist
*freelist
)
1643 freelist
->cells
= SCM_EOL
;
1644 freelist
->left_to_collect
= freelist
->cluster_size
;
1645 freelist
->clusters_allocated
= 0;
1646 freelist
->clusters
= SCM_EOL
;
1647 freelist
->clustertail
= &freelist
->clusters
;
1648 freelist
->collected_1
= freelist
->collected
;
1649 freelist
->collected
= 0;
1653 gc_sweep_freelist_finish (scm_t_freelist
*freelist
)
1656 *freelist
->clustertail
= freelist
->cells
;
1657 if (!SCM_NULLP (freelist
->cells
))
1659 SCM c
= freelist
->cells
;
1660 SCM_SET_CELL_WORD_0 (c
, SCM_FREE_CELL_CDR (c
));
1661 SCM_SET_CELL_WORD_1 (c
, SCM_EOL
);
1662 freelist
->collected
+=
1663 freelist
->span
* (freelist
->cluster_size
- freelist
->left_to_collect
);
1665 scm_gc_cells_collected
+= freelist
->collected
;
1667 /* Although freelist->min_yield is used to test freelist->collected
1668 * (which is the local GC yield for freelist), it is adjusted so
1669 * that *total* yield is freelist->min_yield_fraction of total heap
1670 * size. This means that a too low yield is compensated by more
1671 * heap on the list which is currently doing most work, which is
1672 * just what we want.
1674 collected
= SCM_MAX (freelist
->collected_1
, freelist
->collected
);
1675 freelist
->grow_heap_p
= (collected
< freelist
->min_yield
);
1678 #define NEXT_DATA_CELL(ptr, span) \
1680 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1681 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1682 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1688 #define FUNC_NAME "scm_gc_sweep"
1690 register SCM_CELLPTR ptr
;
1691 register SCM nfreelist
;
1692 register scm_t_freelist
*freelist
;
1693 register unsigned long m
;
1700 gc_sweep_freelist_start (&scm_master_freelist
);
1701 gc_sweep_freelist_start (&scm_master_freelist2
);
1703 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1705 register long left_to_collect
;
1708 /* Unmarked cells go onto the front of the freelist this heap
1709 segment points to. Rather than updating the real freelist
1710 pointer as we go along, we accumulate the new head in
1711 nfreelist. Then, if it turns out that the entire segment is
1712 free, we free (i.e., malloc's free) the whole segment, and
1713 simply don't assign nfreelist back into the real freelist. */
1714 freelist
= scm_heap_table
[i
].freelist
;
1715 nfreelist
= freelist
->cells
;
1716 left_to_collect
= freelist
->left_to_collect
;
1717 span
= scm_heap_table
[i
].span
;
1719 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0], span
);
1720 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1], span
) - ptr
;
1722 /* use only data cells in seg_size */
1723 seg_size
= (seg_size
/ SCM_GC_CARD_N_CELLS
) * (SCM_GC_CARD_N_DATA_CELLS
/ span
) * span
;
1725 scm_gc_cells_swept
+= seg_size
;
1727 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1731 if (SCM_GC_IN_CARD_HEADERP (ptr
))
1737 NEXT_DATA_CELL (nxt
, span
);
1744 scmptr
= PTR2SCM (ptr
);
1746 if (SCM_GCMARKP (scmptr
))
1749 switch SCM_TYP7 (scmptr
)
1751 case scm_tcs_cons_gloc
:
1753 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1754 * struct or a gloc. See the corresponding comment in
1757 scm_t_bits word0
= (SCM_CELL_WORD_0 (scmptr
)
1758 - scm_tc3_cons_gloc
);
1759 /* access as struct */
1760 scm_t_bits
* vtable_data
= (scm_t_bits
*) word0
;
1761 if (vtable_data
[scm_vtable_index_vcell
] == 0)
1763 /* Structs need to be freed in a special order.
1764 * This is handled by GC C hooks in struct.c.
1766 SCM_SET_STRUCT_GC_CHAIN (scmptr
, scm_structs_to_free
);
1767 scm_structs_to_free
= scmptr
;
1770 /* fall through so that scmptr gets collected */
1773 case scm_tcs_cons_imcar
:
1774 case scm_tcs_cons_nimcar
:
1775 case scm_tcs_closures
:
1779 case scm_tc7_vector
:
1781 unsigned long int length
= SCM_VECTOR_LENGTH (scmptr
);
1784 m
+= length
* sizeof (scm_t_bits
);
1785 scm_must_free (SCM_VECTOR_BASE (scmptr
));
1791 m
+= (SCM_CCLO_LENGTH (scmptr
) * sizeof (SCM
));
1792 scm_must_free (SCM_CCLO_BASE (scmptr
));
1798 unsigned long int length
= SCM_BITVECTOR_LENGTH (scmptr
);
1801 m
+= sizeof (long) * ((length
+ SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1802 scm_must_free (SCM_BITVECTOR_BASE (scmptr
));
1806 case scm_tc7_byvect
:
1810 #ifdef HAVE_LONG_LONGS
1811 case scm_tc7_llvect
:
1816 m
+= SCM_UVECTOR_LENGTH (scmptr
) * scm_uniform_element_size (scmptr
);
1817 scm_must_free (SCM_UVECTOR_BASE (scmptr
));
1820 case scm_tc7_substring
:
1822 case scm_tc7_string
:
1823 m
+= SCM_STRING_LENGTH (scmptr
) + 1;
1824 scm_must_free (SCM_STRING_CHARS (scmptr
));
1826 case scm_tc7_symbol
:
1827 m
+= SCM_SYMBOL_LENGTH (scmptr
) + 1;
1828 scm_must_free (SCM_SYMBOL_CHARS (scmptr
));
1831 /* the various "subrs" (primitives) are never freed */
1834 if SCM_OPENP (scmptr
)
1836 int k
= SCM_PTOBNUM (scmptr
);
1837 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1838 if (!(k
< scm_numptob
))
1839 SCM_MISC_ERROR ("undefined port type", SCM_EOL
);
1841 /* Keep "revealed" ports alive. */
1842 if (scm_revealed_count (scmptr
) > 0)
1844 /* Yes, I really do mean scm_ptobs[k].free */
1845 /* rather than ftobs[k].close. .close */
1846 /* is for explicit CLOSE-PORT by user */
1847 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1848 SCM_SETSTREAM (scmptr
, 0);
1849 scm_remove_from_port_table (scmptr
);
1850 scm_gc_ports_collected
++;
1851 SCM_CLR_PORT_OPEN_FLAG (scmptr
);
1855 switch SCM_TYP16 (scmptr
)
1857 case scm_tc_free_cell
:
1862 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1863 scm_must_free (SCM_BDIGITS (scmptr
));
1865 #endif /* def SCM_BIGDIG */
1866 case scm_tc16_complex
:
1867 m
+= sizeof (scm_t_complex
);
1868 scm_must_free (SCM_COMPLEX_MEM (scmptr
));
1873 k
= SCM_SMOBNUM (scmptr
);
1874 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1875 if (!(k
< scm_numsmob
))
1876 SCM_MISC_ERROR ("undefined smob type", SCM_EOL
);
1878 if (scm_smobs
[k
].free
)
1879 m
+= (scm_smobs
[k
].free
) (scmptr
);
1885 SCM_MISC_ERROR ("unknown type", SCM_EOL
);
1888 if (!--left_to_collect
)
1890 SCM_SET_CELL_WORD_0 (scmptr
, nfreelist
);
1891 *freelist
->clustertail
= scmptr
;
1892 freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1894 nfreelist
= SCM_EOL
;
1895 freelist
->collected
+= span
* freelist
->cluster_size
;
1896 left_to_collect
= freelist
->cluster_size
;
1900 /* Stick the new cell on the front of nfreelist. It's
1901 critical that we mark this cell as freed; otherwise, the
1902 conservative collector might trace it as some other type
1904 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1905 SCM_SET_FREE_CELL_CDR (scmptr
, nfreelist
);
1910 #ifdef GC_FREE_SEGMENTS
1915 freelist
->heap_size
-= seg_size
;
1916 free ((char *) scm_heap_table
[i
].bounds
[0]);
1917 scm_heap_table
[i
].bounds
[0] = 0;
1918 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1919 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1920 scm_n_heap_segs
-= 1;
1921 i
--; /* We need to scan the segment just moved. */
1924 #endif /* ifdef GC_FREE_SEGMENTS */
1926 /* Update the real freelist pointer to point to the head of
1927 the list of free cells we've built for this segment. */
1928 freelist
->cells
= nfreelist
;
1929 freelist
->left_to_collect
= left_to_collect
;
1932 #ifdef GUILE_DEBUG_FREELIST
1933 scm_map_free_list ();
1937 gc_sweep_freelist_finish (&scm_master_freelist
);
1938 gc_sweep_freelist_finish (&scm_master_freelist2
);
1940 /* When we move to POSIX threads private freelists should probably
1941 be GC-protected instead. */
1942 scm_freelist
= SCM_EOL
;
1943 scm_freelist2
= SCM_EOL
;
1945 scm_cells_allocated
= (SCM_HEAP_SIZE
- scm_gc_cells_collected
);
1946 scm_gc_yield
-= scm_cells_allocated
;
1948 if (scm_mallocated
< m
)
1949 /* The byte count of allocated objects has underflowed. This is
1950 probably because you forgot to report the sizes of objects you
1951 have allocated, by calling scm_done_malloc or some such. When
1952 the GC freed them, it subtracted their size from
1953 scm_mallocated, which underflowed. */
1956 scm_mallocated
-= m
;
1957 scm_gc_malloc_collected
= m
;
1963 /* {Front end to malloc}
1965 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1968 * These functions provide services comparable to malloc, realloc, and
1969 * free. They should be used when allocating memory that will be under
1970 * control of the garbage collector, i.e., if the memory may be freed
1971 * during garbage collection.
1975 * Return newly malloced storage or throw an error.
1977 * The parameter WHAT is a string for error reporting.
1978 * If the threshold scm_mtrigger will be passed by this
1979 * allocation, or if the first call to malloc fails,
1980 * garbage collect -- on the presumption that some objects
1981 * using malloced storage may be collected.
1983 * The limit scm_mtrigger may be raised by this allocation.
1986 scm_must_malloc (size_t size
, const char *what
)
1989 unsigned long nm
= scm_mallocated
+ size
;
1992 /* The byte count of allocated objects has overflowed. This is
1993 probably because you forgot to report the correct size of freed
1994 memory in some of your smob free methods. */
1997 if (nm
<= scm_mtrigger
)
1999 SCM_SYSCALL (ptr
= malloc (size
));
2002 scm_mallocated
= nm
;
2003 #ifdef GUILE_DEBUG_MALLOC
2004 scm_malloc_register (ptr
, what
);
2012 nm
= scm_mallocated
+ size
;
2015 /* The byte count of allocated objects has overflowed. This is
2016 probably because you forgot to report the correct size of freed
2017 memory in some of your smob free methods. */
2020 SCM_SYSCALL (ptr
= malloc (size
));
2023 scm_mallocated
= nm
;
2024 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
2025 if (nm
> scm_mtrigger
)
2026 scm_mtrigger
= nm
+ nm
/ 2;
2028 scm_mtrigger
+= scm_mtrigger
/ 2;
2030 #ifdef GUILE_DEBUG_MALLOC
2031 scm_malloc_register (ptr
, what
);
2037 scm_memory_error (what
);
2042 * is similar to scm_must_malloc.
2045 scm_must_realloc (void *where
,
2053 if (size
<= old_size
)
2056 nm
= scm_mallocated
+ size
- old_size
;
2058 if (nm
< (size
- old_size
))
2059 /* The byte count of allocated objects has overflowed. This is
2060 probably because you forgot to report the correct size of freed
2061 memory in some of your smob free methods. */
2064 if (nm
<= scm_mtrigger
)
2066 SCM_SYSCALL (ptr
= realloc (where
, size
));
2069 scm_mallocated
= nm
;
2070 #ifdef GUILE_DEBUG_MALLOC
2071 scm_malloc_reregister (where
, ptr
, what
);
2079 nm
= scm_mallocated
+ size
- old_size
;
2081 if (nm
< (size
- old_size
))
2082 /* The byte count of allocated objects has overflowed. This is
2083 probably because you forgot to report the correct size of freed
2084 memory in some of your smob free methods. */
2087 SCM_SYSCALL (ptr
= realloc (where
, size
));
2090 scm_mallocated
= nm
;
2091 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
2092 if (nm
> scm_mtrigger
)
2093 scm_mtrigger
= nm
+ nm
/ 2;
2095 scm_mtrigger
+= scm_mtrigger
/ 2;
2097 #ifdef GUILE_DEBUG_MALLOC
2098 scm_malloc_reregister (where
, ptr
, what
);
2103 scm_memory_error (what
);
2107 scm_must_strndup (const char *str
, size_t length
)
2109 char * dst
= scm_must_malloc (length
+ 1, "scm_must_strndup");
2110 memcpy (dst
, str
, length
);
2116 scm_must_strdup (const char *str
)
2118 return scm_must_strndup (str
, strlen (str
));
2122 scm_must_free (void *obj
)
2123 #define FUNC_NAME "scm_must_free"
2125 #ifdef GUILE_DEBUG_MALLOC
2126 scm_malloc_unregister (obj
);
2131 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL
);
2136 /* Announce that there has been some malloc done that will be freed
2137 * during gc. A typical use is for a smob that uses some malloced
2138 * memory but can not get it from scm_must_malloc (for whatever
2139 * reason). When a new object of this smob is created you call
2140 * scm_done_malloc with the size of the object. When your smob free
2141 * function is called, be sure to include this size in the return
2144 * If you can't actually free the memory in the smob free function,
2145 * for whatever reason (like reference counting), you still can (and
2146 * should) report the amount of memory freed when you actually free it.
2147 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2148 * eh? Or even better, call scm_done_free. */
2151 scm_done_malloc (long size
)
2154 if (scm_mallocated
< size
)
2155 /* The byte count of allocated objects has underflowed. This is
2156 probably because you forgot to report the sizes of objects you
2157 have allocated, by calling scm_done_malloc or some such. When
2158 the GC freed them, it subtracted their size from
2159 scm_mallocated, which underflowed. */
2162 unsigned long nm
= scm_mallocated
+ size
;
2164 /* The byte count of allocated objects has overflowed. This is
2165 probably because you forgot to report the correct size of freed
2166 memory in some of your smob free methods. */
2170 scm_mallocated
+= size
;
2172 if (scm_mallocated
> scm_mtrigger
)
2174 scm_igc ("foreign mallocs");
2175 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
2177 if (scm_mallocated
> scm_mtrigger
)
2178 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
2180 scm_mtrigger
+= scm_mtrigger
/ 2;
2186 scm_done_free (long size
)
2189 if (scm_mallocated
< size
)
2190 /* The byte count of allocated objects has underflowed. This is
2191 probably because you forgot to report the sizes of objects you
2192 have allocated, by calling scm_done_malloc or some such. When
2193 the GC freed them, it subtracted their size from
2194 scm_mallocated, which underflowed. */
2197 unsigned long nm
= scm_mallocated
+ size
;
2199 /* The byte count of allocated objects has overflowed. This is
2200 probably because you forgot to report the correct size of freed
2201 memory in some of your smob free methods. */
2205 scm_mallocated
-= size
;
2212 * Each heap segment is an array of objects of a particular size.
2213 * Every segment has an associated (possibly shared) freelist.
2214 * A table of segment records is kept that records the upper and
2215 * lower extents of the segment; this is used during the conservative
2216 * phase of gc to identify probably gc roots (because they point
2217 * into valid segments at reasonable offsets). */
2220 * is true if the first segment was smaller than INIT_HEAP_SEG.
2221 * If scm_expmem is set to one, subsequent segment allocations will
2222 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2226 size_t scm_max_segment_size
;
2229 * is the lowest base address of any heap segment.
2231 SCM_CELLPTR scm_heap_org
;
2233 scm_t_heap_seg_data
* scm_heap_table
= 0;
2234 static size_t heap_segment_table_size
= 0;
2235 size_t scm_n_heap_segs
= 0;
2238 * initializes a new heap segment and returns the number of objects it contains.
2240 * The segment origin and segment size in bytes are input parameters.
2241 * The freelist is both input and output.
2243 * This function presumes that the scm_heap_table has already been expanded
2244 * to accomodate a new segment record and that the markbit space was reserved
2245 * for all the cards in this segment.
2248 #define INIT_CARD(card, span) \
2250 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2252 SCM_GC_SET_CARD_DOUBLECELL (card); \
2256 init_heap_seg (SCM_CELLPTR seg_org
, size_t size
, scm_t_freelist
*freelist
)
2258 register SCM_CELLPTR ptr
;
2259 SCM_CELLPTR seg_end
;
2260 size_t new_seg_index
;
2261 ptrdiff_t n_new_cells
;
2262 int span
= freelist
->span
;
2264 if (seg_org
== NULL
)
2267 /* Align the begin ptr up.
2269 ptr
= SCM_GC_CARD_UP (seg_org
);
2271 /* Compute the ceiling on valid object pointers w/in this segment.
2273 seg_end
= SCM_GC_CARD_DOWN ((char *)seg_org
+ size
);
2275 /* Find the right place and insert the segment record.
2278 while (new_seg_index
< scm_n_heap_segs
2279 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
))
2284 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
2285 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
2290 scm_heap_table
[new_seg_index
].span
= span
;
2291 scm_heap_table
[new_seg_index
].freelist
= freelist
;
2292 scm_heap_table
[new_seg_index
].bounds
[0] = ptr
;
2293 scm_heap_table
[new_seg_index
].bounds
[1] = seg_end
;
2296 n_new_cells
= seg_end
- ptr
;
2298 freelist
->heap_size
+= n_new_cells
;
2300 /* Partition objects in this segment into clusters */
2303 SCM
*clusterp
= &clusters
;
2305 NEXT_DATA_CELL (ptr
, span
);
2306 while (ptr
< seg_end
)
2308 scm_cell
*nxt
= ptr
;
2309 scm_cell
*prv
= NULL
;
2310 scm_cell
*last_card
= NULL
;
2311 int n_data_cells
= (SCM_GC_CARD_N_DATA_CELLS
/ span
) * SCM_CARDS_PER_CLUSTER
- 1;
2312 NEXT_DATA_CELL(nxt
, span
);
2314 /* Allocate cluster spine
2316 *clusterp
= PTR2SCM (ptr
);
2317 SCM_SETCAR (*clusterp
, PTR2SCM (nxt
));
2318 clusterp
= SCM_CDRLOC (*clusterp
);
2321 while (n_data_cells
--)
2323 scm_cell
*card
= SCM_GC_CELL_CARD (ptr
);
2324 SCM scmptr
= PTR2SCM (ptr
);
2326 NEXT_DATA_CELL (nxt
, span
);
2329 if (card
!= last_card
)
2331 INIT_CARD (card
, span
);
2335 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
2336 SCM_SET_FREE_CELL_CDR (scmptr
, PTR2SCM (nxt
));
2341 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv
), SCM_EOL
);
2346 scm_cell
*ref
= seg_end
;
2347 NEXT_DATA_CELL (ref
, span
);
2349 /* [cmm] looks like the segment size doesn't divide cleanly by
2350 cluster size. bad cmm! */
2354 /* Patch up the last cluster pointer in the segment
2355 * to join it to the input freelist.
2357 *clusterp
= freelist
->clusters
;
2358 freelist
->clusters
= clusters
;
2362 fprintf (stderr
, "H");
2368 round_to_cluster_size (scm_t_freelist
*freelist
, size_t len
)
2370 size_t cluster_size_in_bytes
= CLUSTER_SIZE_IN_BYTES (freelist
);
2373 (len
+ cluster_size_in_bytes
- 1) / cluster_size_in_bytes
* cluster_size_in_bytes
2374 + ALIGNMENT_SLACK (freelist
);
2378 alloc_some_heap (scm_t_freelist
*freelist
, policy_on_error error_policy
)
2379 #define FUNC_NAME "alloc_some_heap"
2384 if (scm_gc_heap_lock
)
2386 /* Critical code sections (such as the garbage collector) aren't
2387 * supposed to add heap segments.
2389 fprintf (stderr
, "alloc_some_heap: Can not extend locked heap.\n");
2393 if (scm_n_heap_segs
== heap_segment_table_size
)
2395 /* We have to expand the heap segment table to have room for the new
2396 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2397 * init_heap_seg only if the allocation of the segment itself succeeds.
2399 size_t new_table_size
= scm_n_heap_segs
+ 1;
2400 size_t size
= new_table_size
* sizeof (scm_t_heap_seg_data
);
2401 scm_t_heap_seg_data
*new_heap_table
;
2403 SCM_SYSCALL (new_heap_table
= ((scm_t_heap_seg_data
*)
2404 realloc ((char *)scm_heap_table
, size
)));
2405 if (!new_heap_table
)
2407 if (error_policy
== abort_on_error
)
2409 fprintf (stderr
, "alloc_some_heap: Could not grow heap segment table.\n");
2419 scm_heap_table
= new_heap_table
;
2420 heap_segment_table_size
= new_table_size
;
2424 /* Pick a size for the new heap segment.
2425 * The rule for picking the size of a segment is explained in
2429 /* Assure that the new segment is predicted to be large enough.
2431 * New yield should at least equal GC fraction of new heap size, i.e.
2433 * y + dh > f * (h + dh)
2436 * f : min yield fraction
2438 * dh : size of new heap segment
2440 * This gives dh > (f * h - y) / (1 - f)
2442 int f
= freelist
->min_yield_fraction
;
2443 unsigned long h
= SCM_HEAP_SIZE
;
2444 size_t min_cells
= (f
* h
- 100 * (long) scm_gc_yield
) / (99 - f
);
2445 len
= SCM_EXPHEAP (freelist
->heap_size
);
2447 fprintf (stderr
, "(%ld < %ld)", (long) len
, (long) min_cells
);
2449 if (len
< min_cells
)
2450 len
= min_cells
+ freelist
->cluster_size
;
2451 len
*= sizeof (scm_cell
);
2452 /* force new sampling */
2453 freelist
->collected
= LONG_MAX
;
2456 if (len
> scm_max_segment_size
)
2457 len
= scm_max_segment_size
;
2462 smallest
= CLUSTER_SIZE_IN_BYTES (freelist
);
2467 /* Allocate with decaying ambition. */
2468 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2469 && (len
>= smallest
))
2471 size_t rounded_len
= round_to_cluster_size (freelist
, len
);
2472 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (rounded_len
));
2475 init_heap_seg (ptr
, rounded_len
, freelist
);
2482 if (error_policy
== abort_on_error
)
2484 fprintf (stderr
, "alloc_some_heap: Could not grow heap.\n");
2491 /* {GC Protection Helper Functions}
2496 * If within a function you need to protect one or more scheme objects from
2497 * garbage collection, pass them as parameters to one of the
2498 * scm_remember_upto_here* functions below. These functions don't do
2499 * anything, but since the compiler does not know that they are actually
2500 * no-ops, it will generate code that calls these functions with the given
2501 * parameters. Therefore, you can be sure that the compiler will keep those
2502 * scheme values alive (on the stack or in a register) up to the point where
2503 * scm_remember_upto_here* is called. In other words, place the call to
2504 * scm_remember_upto_here* _behind_ the last code in your function, that
2505 * depends on the scheme object to exist.
2507 * Example: We want to make sure, that the string object str does not get
2508 * garbage collected during the execution of 'some_function', because
2509 * otherwise the characters belonging to str would be freed and
2510 * 'some_function' might access freed memory. To make sure that the compiler
2511 * keeps str alive on the stack or in a register such that it is visible to
2512 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2513 * call to 'some_function'. Note that this would not be necessary if str was
2514 * used anyway after the call to 'some_function'.
2515 * char *chars = SCM_STRING_CHARS (str);
2516 * some_function (chars);
2517 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2521 scm_remember_upto_here_1 (SCM obj SCM_UNUSED
)
2523 /* Empty. Protects a single object from garbage collection. */
2527 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED
, SCM obj2 SCM_UNUSED
)
2529 /* Empty. Protects two objects from garbage collection. */
2533 scm_remember_upto_here (SCM obj SCM_UNUSED
, ...)
2535 /* Empty. Protects any number of objects from garbage collection. */
2539 #if (SCM_DEBUG_DEPRECATED == 0)
2542 scm_remember (SCM
*ptr
)
2544 scm_c_issue_deprecation_warning ("`scm_remember' is deprecated. "
2545 "Use the `scm_remember_upto_here*' family of functions instead.");
2549 scm_protect_object (SCM obj
)
2551 scm_c_issue_deprecation_warning ("`scm_protect_object' is deprecated. "
2552 "Use `scm_gc_protect_object' instead.");
2553 return scm_gc_protect_object (obj
);
2557 scm_unprotect_object (SCM obj
)
2559 scm_c_issue_deprecation_warning ("`scm_unprotect_object' is deprecated. "
2560 "Use `scm_gc_unprotect_object' instead.");
2561 return scm_gc_unprotect_object (obj
);
2564 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2567 These crazy functions prevent garbage collection
2568 of arguments after the first argument by
2569 ensuring they remain live throughout the
2570 function because they are used in the last
2571 line of the code block.
2572 It'd be better to have a nice compiler hint to
2573 aid the conservative stack-scanning GC. --03/09/00 gjb */
2575 scm_return_first (SCM elt
, ...)
2581 scm_return_first_int (int i
, ...)
2588 scm_permanent_object (SCM obj
)
2591 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2597 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2598 other references are dropped, until the object is unprotected by calling
2599 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
2600 i. e. it is possible to protect the same object several times, but it is
2601 necessary to unprotect the object the same number of times to actually get
2602 the object unprotected. It is an error to unprotect an object more often
2603 than it has been protected before. The function scm_protect_object returns
2607 /* Implementation note: For every object X, there is a counter which
2608 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
2612 scm_gc_protect_object (SCM obj
)
2616 /* This critical section barrier will be replaced by a mutex. */
2619 handle
= scm_hashq_create_handle_x (scm_protects
, obj
, SCM_MAKINUM (0));
2620 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), SCM_MAKINUM (1)));
2628 /* Remove any protection for OBJ established by a prior call to
2629 scm_protect_object. This function returns OBJ.
2631 See scm_protect_object for more information. */
2633 scm_gc_unprotect_object (SCM obj
)
2637 /* This critical section barrier will be replaced by a mutex. */
2640 handle
= scm_hashq_get_handle (scm_protects
, obj
);
2642 if (SCM_FALSEP (handle
))
2644 fprintf (stderr
, "scm_unprotect_object called on unprotected object\n");
2649 SCM count
= scm_difference (SCM_CDR (handle
), SCM_MAKINUM (1));
2650 if (SCM_EQ_P (count
, SCM_MAKINUM (0)))
2651 scm_hashq_remove_x (scm_protects
, obj
);
2653 SCM_SETCDR (handle
, count
);
2662 scm_gc_register_root (SCM
*p
)
2665 SCM key
= scm_long2num ((long) p
);
2667 /* This critical section barrier will be replaced by a mutex. */
2670 handle
= scm_hashv_create_handle_x (scm_gc_registered_roots
, key
, SCM_MAKINUM (0));
2671 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), SCM_MAKINUM (1)));
2677 scm_gc_unregister_root (SCM
*p
)
2680 SCM key
= scm_long2num ((long) p
);
2682 /* This critical section barrier will be replaced by a mutex. */
2685 handle
= scm_hashv_get_handle (scm_gc_registered_roots
, key
);
2687 if (SCM_FALSEP (handle
))
2689 fprintf (stderr
, "scm_gc_unregister_root called on unregistered root\n");
2694 SCM count
= scm_difference (SCM_CDR (handle
), SCM_MAKINUM (1));
2695 if (SCM_EQ_P (count
, SCM_MAKINUM (0)))
2696 scm_hashv_remove_x (scm_gc_registered_roots
, key
);
2698 SCM_SETCDR (handle
, count
);
2705 scm_gc_register_roots (SCM
*b
, unsigned long n
)
2708 for (; p
< b
+ n
; ++p
)
2709 scm_gc_register_root (p
);
2713 scm_gc_unregister_roots (SCM
*b
, unsigned long n
)
2716 for (; p
< b
+ n
; ++p
)
2717 scm_gc_unregister_root (p
);
2722 /* called on process termination. */
2728 extern int on_exit (void (*procp
) (), int arg
);
2731 cleanup (int status
, void *arg
)
2733 #error Dont know how to setup a cleanup handler on your system.
2738 scm_flush_all_ports ();
2743 make_initial_segment (size_t init_heap_size
, scm_t_freelist
*freelist
)
2745 size_t rounded_size
= round_to_cluster_size (freelist
, init_heap_size
);
2747 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2751 rounded_size
= round_to_cluster_size (freelist
, SCM_HEAP_SEG_SIZE
);
2752 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2760 if (freelist
->min_yield_fraction
)
2761 freelist
->min_yield
= (freelist
->heap_size
* freelist
->min_yield_fraction
2763 freelist
->grow_heap_p
= (freelist
->heap_size
< freelist
->min_yield
);
2770 init_freelist (scm_t_freelist
*freelist
,
2775 freelist
->clusters
= SCM_EOL
;
2776 freelist
->cluster_size
= cluster_size
+ 1;
2777 freelist
->left_to_collect
= 0;
2778 freelist
->clusters_allocated
= 0;
2779 freelist
->min_yield
= 0;
2780 freelist
->min_yield_fraction
= min_yield
;
2781 freelist
->span
= span
;
2782 freelist
->collected
= 0;
2783 freelist
->collected_1
= 0;
2784 freelist
->heap_size
= 0;
2788 /* Get an integer from an environment variable. */
2790 scm_i_getenv_int (const char *var
, int def
)
2792 char *end
, *val
= getenv (var
);
2796 res
= strtol (val
, &end
, 10);
2806 unsigned long gc_trigger_1
;
2807 unsigned long gc_trigger_2
;
2808 size_t init_heap_size_1
;
2809 size_t init_heap_size_2
;
2812 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2813 scm_tc16_allocated
= scm_make_smob_type ("allocated cell", 0);
2814 scm_set_smob_mark (scm_tc16_allocated
, allocated_mark
);
2815 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2817 j
= SCM_NUM_PROTECTS
;
2819 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2822 scm_freelist
= SCM_EOL
;
2823 scm_freelist2
= SCM_EOL
;
2824 gc_trigger_1
= scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1
);
2825 init_freelist (&scm_master_freelist
, 1, SCM_CLUSTER_SIZE_1
, gc_trigger_1
);
2826 gc_trigger_2
= scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2
);
2827 init_freelist (&scm_master_freelist2
, 2, SCM_CLUSTER_SIZE_2
, gc_trigger_2
);
2828 scm_max_segment_size
= scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size
);
2832 j
= SCM_HEAP_SEG_SIZE
;
2833 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2834 scm_heap_table
= ((scm_t_heap_seg_data
*)
2835 scm_must_malloc (sizeof (scm_t_heap_seg_data
) * 2, "hplims"));
2836 heap_segment_table_size
= 2;
2838 mark_space_ptr
= &mark_space_head
;
2840 init_heap_size_1
= scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1
);
2841 init_heap_size_2
= scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2
);
2842 if (make_initial_segment (init_heap_size_1
, &scm_master_freelist
) ||
2843 make_initial_segment (init_heap_size_2
, &scm_master_freelist2
))
2846 /* scm_hplims[0] can change. do not remove scm_heap_org */
2847 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0], 1);
2849 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2850 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2851 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2852 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2853 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2855 /* Initialise the list of ports. */
2856 scm_t_portable
= (scm_t_port
**)
2857 malloc (sizeof (scm_t_port
*) * scm_t_portable_room
);
2858 if (!scm_t_portable
)
2865 on_exit (cleanup
, 0);
2869 scm_stand_in_procs
= SCM_EOL
;
2870 scm_permobjs
= SCM_EOL
;
2871 scm_protects
= scm_c_make_hash_table (31);
2872 scm_gc_registered_roots
= scm_c_make_hash_table (31);
2879 SCM scm_after_gc_hook
;
2881 static SCM gc_async
;
2883 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2884 * is run after the gc, as soon as the asynchronous events are handled by the
2888 gc_async_thunk (void)
2890 scm_c_run_hook (scm_after_gc_hook
, SCM_EOL
);
2891 return SCM_UNSPECIFIED
;
2895 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2896 * the garbage collection. The only purpose of this function is to mark the
2897 * gc_async (which will eventually lead to the execution of the
2901 mark_gc_async (void * hook_data SCM_UNUSED
,
2902 void *func_data SCM_UNUSED
,
2903 void *data SCM_UNUSED
)
2905 /* If cell access debugging is enabled, the user may choose to perform
2906 * additional garbage collections after an arbitrary number of cell
2907 * accesses. We don't want the scheme level after-gc-hook to be performed
2908 * for each of these garbage collections for the following reason: The
2909 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2910 * after-gc-hook was performed with every gc, and if the gc was performed
2911 * after a very small number of cell accesses, then the number of cell
2912 * accesses during the execution of the after-gc-hook will suffice to cause
2913 * the execution of the next gc. Then, guile would keep executing the
2914 * after-gc-hook over and over again, and would never come to do other
2917 * To overcome this problem, if cell access debugging with additional
2918 * garbage collections is enabled, the after-gc-hook is never run by the
2919 * garbage collecter. When running guile with cell access debugging and the
2920 * execution of the after-gc-hook is desired, then it is necessary to run
2921 * the hook explicitly from the user code. This has the effect, that from
2922 * the scheme level point of view it seems that garbage collection is
2923 * performed with a much lower frequency than it actually is. Obviously,
2924 * this will not work for code that depends on a fixed one to one
2925 * relationship between the execution counts of the C level garbage
2926 * collection hooks and the execution count of the scheme level
2929 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2930 if (debug_cells_gc_interval
== 0)
2931 scm_system_async_mark (gc_async
);
2933 scm_system_async_mark (gc_async
);
2945 scm_after_gc_hook
= scm_permanent_object (scm_make_hook (SCM_INUM0
));
2946 scm_c_define ("after-gc-hook", scm_after_gc_hook
);
2948 after_gc_thunk
= scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0
,
2950 gc_async
= scm_system_async (after_gc_thunk
); /* protected via scm_asyncs */
2952 scm_c_hook_add (&scm_after_gc_c_hook
, mark_gc_async
, NULL
, 0);
2954 #ifndef SCM_MAGIC_SNARFER
2955 #include "libguile/gc.x"
2959 #endif /*MARK_DEPENDENCIES*/