1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
43 /* #define DEBUGINFO */
45 /* SECTION: This code is compiled once.
48 #ifndef MARK_DEPENDENCIES
57 extern unsigned long * __libc_ia64_register_backing_store_base
;
60 #include "libguile/_scm.h"
61 #include "libguile/eval.h"
62 #include "libguile/stime.h"
63 #include "libguile/stackchk.h"
64 #include "libguile/struct.h"
65 #include "libguile/smob.h"
66 #include "libguile/unif.h"
67 #include "libguile/async.h"
68 #include "libguile/ports.h"
69 #include "libguile/root.h"
70 #include "libguile/strings.h"
71 #include "libguile/vectors.h"
72 #include "libguile/weaks.h"
73 #include "libguile/hashtab.h"
74 #include "libguile/tags.h"
76 #include "libguile/validate.h"
77 #include "libguile/deprecation.h"
78 #include "libguile/gc.h"
80 #ifdef GUILE_DEBUG_MALLOC
81 #include "libguile/debug-malloc.h"
94 #define var_start(x, y) va_start(x, y)
97 #define var_start(x, y) va_start(x)
102 #define CELL_P(x) (SCM_ITAG3 (x) == scm_tc3_cons)
104 unsigned int scm_gc_running_p
= 0;
108 #if (SCM_DEBUG_CELL_ACCESSES == 1)
110 scm_t_bits scm_tc16_allocated
;
112 /* Set this to != 0 if every cell that is accessed shall be checked:
114 unsigned int scm_debug_cell_accesses_p
= 1;
116 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
117 * the number of cell accesses after which a gc shall be called.
119 static unsigned int debug_cells_gc_interval
= 0;
122 /* Assert that the given object is a valid reference to a valid cell. This
123 * test involves to determine whether the object is a cell pointer, whether
124 * this pointer actually points into a heap segment and whether the cell
125 * pointed to is not a free cell. Further, additional garbage collections may
126 * get executed after a user defined number of cell accesses. This helps to
127 * find places in the C code where references are dropped for extremely short
131 scm_assert_cell_valid (SCM cell
)
133 static unsigned int already_running
= 0;
135 if (scm_debug_cell_accesses_p
&& !already_running
)
137 already_running
= 1; /* set to avoid recursion */
139 if (!scm_cellp (cell
))
141 fprintf (stderr
, "scm_assert_cell_valid: Not a cell object: %lux\n",
142 (unsigned long) SCM_UNPACK (cell
));
145 else if (!scm_gc_running_p
)
147 /* Dirk::FIXME:: During garbage collection there occur references to
148 free cells. This is allright during conservative marking, but
149 should not happen otherwise (I think). The case of free cells
150 accessed during conservative marking is handled in function
151 scm_mark_locations. However, there still occur accesses to free
152 cells during gc. I don't understand why this happens. If it is
153 a bug and gets fixed, the following test should also work while
156 if (SCM_FREE_CELL_P (cell
))
158 fprintf (stderr
, "scm_assert_cell_valid: Accessing free cell: %lux\n",
159 (unsigned long) SCM_UNPACK (cell
));
163 /* If desired, perform additional garbage collections after a user
164 * defined number of cell accesses.
166 if (debug_cells_gc_interval
)
168 static unsigned int counter
= 0;
176 counter
= debug_cells_gc_interval
;
177 scm_igc ("scm_assert_cell_valid");
181 already_running
= 0; /* re-enable */
186 SCM_DEFINE (scm_set_debug_cell_accesses_x
, "set-debug-cell-accesses!", 1, 0, 0,
188 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
189 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
190 "but no additional calls to garbage collection are issued.\n"
191 "If @var{flag} is a number, cell access checking is enabled,\n"
192 "with an additional garbage collection after the given\n"
193 "number of cell accesses.\n"
194 "This procedure only exists when the compile-time flag\n"
195 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
196 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
198 if (SCM_FALSEP (flag
)) {
199 scm_debug_cell_accesses_p
= 0;
200 } else if (SCM_EQ_P (flag
, SCM_BOOL_T
)) {
201 debug_cells_gc_interval
= 0;
202 scm_debug_cell_accesses_p
= 1;
203 } else if (SCM_INUMP (flag
)) {
204 long int f
= SCM_INUM (flag
);
205 if (f
<= 0) SCM_OUT_OF_RANGE (1, flag
);
206 debug_cells_gc_interval
= f
;
207 scm_debug_cell_accesses_p
= 1;
209 SCM_WRONG_TYPE_ARG (1, flag
);
211 return SCM_UNSPECIFIED
;
215 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
219 /* {heap tuning parameters}
221 * These are parameters for controlling memory allocation. The heap
222 * is the area out of which scm_cons, and object headers are allocated.
224 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
225 * 64 bit machine. The units of the _SIZE parameters are bytes.
226 * Cons pairs and object headers occupy one heap cell.
228 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
229 * allocated initially the heap will grow by half its current size
230 * each subsequent time more heap is needed.
232 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
233 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
234 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
235 * is in scm_init_storage() and alloc_some_heap() in sys.c
237 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
238 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
240 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
243 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
246 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
247 * reclaimed by a GC triggered by must_malloc. If less than this is
248 * reclaimed, the trigger threshold is raised. [I don't know what a
249 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
250 * work around a oscillation that caused almost constant GC.]
254 * Heap size 45000 and 40% min yield gives quick startup and no extra
255 * heap allocation. Having higher values on min yield may lead to
256 * large heaps, especially if code behaviour is varying its
257 * maximum consumption between different freelists.
260 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
261 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
262 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
263 size_t scm_default_init_heap_size_1
= (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER
- 1)
264 / SCM_CARDS_PER_CLUSTER
) * SCM_GC_CARD_SIZE
);
265 int scm_default_min_yield_1
= 40;
267 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
268 size_t scm_default_init_heap_size_2
= (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER
- 1)
269 / SCM_CARDS_PER_CLUSTER
) * SCM_GC_CARD_SIZE
);
270 /* The following value may seem large, but note that if we get to GC at
271 * all, this means that we have a numerically intensive application
273 int scm_default_min_yield_2
= 40;
275 size_t scm_default_max_segment_size
= 2097000L;/* a little less (adm) than 2 Mb */
277 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
279 # define SCM_HEAP_SEG_SIZE 32768L
282 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
284 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
287 /* Make heap grow with factor 1.5 */
288 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
289 #define SCM_INIT_MALLOC_LIMIT 100000
290 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
292 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
293 aligned inner bounds for allocated storage */
296 /*in 386 protected mode we must only adjust the offset */
297 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
298 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
301 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
302 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
304 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
305 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
309 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
311 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
312 #define CLUSTER_SIZE_IN_BYTES(freelist) \
313 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
319 typedef struct scm_t_freelist
{
320 /* collected cells */
322 /* number of cells left to collect before cluster is full */
323 unsigned int left_to_collect
;
324 /* number of clusters which have been allocated */
325 unsigned int clusters_allocated
;
326 /* a list of freelists, each of size cluster_size,
327 * except the last one which may be shorter
331 /* this is the number of objects in each cluster, including the spine cell */
332 unsigned int cluster_size
;
333 /* indicates that we should grow heap instead of GC:ing
336 /* minimum yield on this list in order not to grow the heap
339 /* defines min_yield as percent of total heap size
341 int min_yield_fraction
;
342 /* number of cells per object on this list */
344 /* number of collected cells during last GC */
345 unsigned long collected
;
346 /* number of collected cells during penultimate GC */
347 unsigned long collected_1
;
348 /* total number of cells in heap segments
349 * belonging to this list.
351 unsigned long heap_size
;
354 SCM scm_freelist
= SCM_EOL
;
355 scm_t_freelist scm_master_freelist
= {
356 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_1
, 0, 0, 0, 1, 0, 0, 0
358 SCM scm_freelist2
= SCM_EOL
;
359 scm_t_freelist scm_master_freelist2
= {
360 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_2
, 0, 0, 0, 2, 0, 0, 0
364 * is the number of bytes of must_malloc allocation needed to trigger gc.
366 unsigned long scm_mtrigger
;
369 * If set, don't expand the heap. Set only during gc, during which no allocation
370 * is supposed to take place anyway.
372 int scm_gc_heap_lock
= 0;
375 * Don't pause for collection if this is set -- just
378 int scm_block_gc
= 1;
380 /* During collection, this accumulates objects holding
383 SCM scm_weak_vectors
;
385 /* During collection, this accumulates structures which are to be freed.
387 SCM scm_structs_to_free
;
389 /* GC Statistics Keeping
391 unsigned long scm_cells_allocated
= 0;
392 unsigned long scm_mallocated
= 0;
393 unsigned long scm_gc_cells_collected
;
394 unsigned long scm_gc_yield
;
395 static unsigned long scm_gc_yield_1
= 0; /* previous GC yield */
396 unsigned long scm_gc_malloc_collected
;
397 unsigned long scm_gc_ports_collected
;
398 unsigned long scm_gc_time_taken
= 0;
399 static unsigned long t_before_gc
;
400 static unsigned long t_before_sweep
;
401 unsigned long scm_gc_mark_time_taken
= 0;
402 unsigned long scm_gc_sweep_time_taken
= 0;
403 unsigned long scm_gc_times
= 0;
404 unsigned long scm_gc_cells_swept
= 0;
405 double scm_gc_cells_marked_acc
= 0.;
406 double scm_gc_cells_swept_acc
= 0.;
408 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
409 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
410 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
411 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
412 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
413 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
414 SCM_SYMBOL (sym_gc_mark_time_taken
, "gc-mark-time-taken");
415 SCM_SYMBOL (sym_gc_sweep_time_taken
, "gc-sweep-time-taken");
416 SCM_SYMBOL (sym_times
, "gc-times");
417 SCM_SYMBOL (sym_cells_marked
, "cells-marked");
418 SCM_SYMBOL (sym_cells_swept
, "cells-swept");
420 typedef struct scm_t_heap_seg_data
422 /* lower and upper bounds of the segment */
423 SCM_CELLPTR bounds
[2];
425 /* address of the head-of-freelist pointer for this segment's cells.
426 All segments usually point to the same one, scm_freelist. */
427 scm_t_freelist
*freelist
;
429 /* number of cells per object in this segment */
431 } scm_t_heap_seg_data
;
435 static size_t init_heap_seg (SCM_CELLPTR
, size_t, scm_t_freelist
*);
437 typedef enum { return_on_error
, abort_on_error
} policy_on_error
;
438 static void alloc_some_heap (scm_t_freelist
*, policy_on_error
);
441 #define SCM_HEAP_SIZE \
442 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
443 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
445 #define BVEC_GROW_SIZE 256
446 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
447 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
449 /* mark space allocation */
451 typedef struct scm_t_mark_space
453 scm_t_c_bvec_limb
*bvec_space
;
454 struct scm_t_mark_space
*next
;
457 static scm_t_mark_space
*current_mark_space
;
458 static scm_t_mark_space
**mark_space_ptr
;
459 static ptrdiff_t current_mark_space_offset
;
460 static scm_t_mark_space
*mark_space_head
;
462 static scm_t_c_bvec_limb
*
464 #define FUNC_NAME "get_bvec"
466 scm_t_c_bvec_limb
*res
;
468 if (!current_mark_space
)
470 SCM_SYSCALL (current_mark_space
= (scm_t_mark_space
*) malloc (sizeof (scm_t_mark_space
)));
471 if (!current_mark_space
)
472 SCM_MISC_ERROR ("could not grow heap", SCM_EOL
);
474 current_mark_space
->bvec_space
= NULL
;
475 current_mark_space
->next
= NULL
;
477 *mark_space_ptr
= current_mark_space
;
478 mark_space_ptr
= &(current_mark_space
->next
);
483 if (!(current_mark_space
->bvec_space
))
485 SCM_SYSCALL (current_mark_space
->bvec_space
=
486 (scm_t_c_bvec_limb
*) calloc (BVEC_GROW_SIZE_IN_BYTES
, 1));
487 if (!(current_mark_space
->bvec_space
))
488 SCM_MISC_ERROR ("could not grow heap", SCM_EOL
);
490 current_mark_space_offset
= 0;
495 if (current_mark_space_offset
== BVEC_GROW_SIZE_IN_LIMBS
)
497 current_mark_space
= NULL
;
502 res
= current_mark_space
->bvec_space
+ current_mark_space_offset
;
503 current_mark_space_offset
+= SCM_GC_CARD_BVEC_SIZE_IN_LIMBS
;
513 scm_t_mark_space
*ms
;
515 for (ms
= mark_space_head
; ms
; ms
= ms
->next
)
516 memset (ms
->bvec_space
, 0, BVEC_GROW_SIZE_IN_BYTES
);
521 /* Debugging functions. */
523 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
525 static long int heap_segment (SCM obj
); /* forw decl: non-debugging func */
528 map_free_list (scm_t_freelist
*master
, SCM freelist
)
530 long last_seg
= -1, count
= 0;
533 for (f
= freelist
; !SCM_NULLP (f
); f
= SCM_FREE_CELL_CDR (f
))
535 long int this_seg
= heap_segment (f
);
540 "map_free_list: can't find segment containing cell %lux\n",
541 (unsigned long int) SCM_UNPACK (f
));
544 else if (this_seg
!= last_seg
)
547 fprintf (stderr
, " %5ld %d-cells in segment %ld\n",
548 (long) count
, master
->span
, (long) last_seg
);
555 fprintf (stderr
, " %5ld %d-cells in segment %ld\n",
556 (long) count
, master
->span
, (long) last_seg
);
559 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
561 "Print debugging information about the free-list.\n"
562 "@code{map-free-list} is only included in\n"
563 "@code{--enable-guile-debug} builds of Guile.")
564 #define FUNC_NAME s_scm_map_free_list
568 fprintf (stderr
, "%ld segments total (%d:%ld",
569 (long) scm_n_heap_segs
,
570 scm_heap_table
[0].span
,
571 (long) (scm_heap_table
[0].bounds
[1] - scm_heap_table
[0].bounds
[0]));
573 for (i
= 1; i
!= scm_n_heap_segs
; i
++)
574 fprintf (stderr
, ", %d:%ld",
575 scm_heap_table
[i
].span
,
576 (long) (scm_heap_table
[i
].bounds
[1] - scm_heap_table
[i
].bounds
[0]));
577 fprintf (stderr
, ")\n");
578 map_free_list (&scm_master_freelist
, scm_freelist
);
579 map_free_list (&scm_master_freelist2
, scm_freelist2
);
582 return SCM_UNSPECIFIED
;
586 static long last_cluster
;
587 static long last_size
;
590 free_list_length (char *title
, long i
, SCM freelist
)
594 for (ls
= freelist
; !SCM_NULLP (ls
); ls
= SCM_FREE_CELL_CDR (ls
))
595 if (SCM_FREE_CELL_P (ls
))
599 fprintf (stderr
, "bad cell in %s at position %ld\n", title
, (long) n
);
606 if (last_cluster
== i
- 1)
607 fprintf (stderr
, "\t%ld\n", (long) last_size
);
609 fprintf (stderr
, "-%ld\t%ld\n", (long) (i
- 1), (long) last_size
);
612 fprintf (stderr
, "%s %ld", title
, (long) i
);
614 fprintf (stderr
, "%s\t%ld\n", title
, (long) n
);
622 free_list_lengths (char *title
, scm_t_freelist
*master
, SCM freelist
)
625 long i
= 0, len
, n
= 0;
626 fprintf (stderr
, "%s\n\n", title
);
627 n
+= free_list_length ("free list", -1, freelist
);
628 for (clusters
= master
->clusters
;
629 SCM_NNULLP (clusters
);
630 clusters
= SCM_CDR (clusters
))
632 len
= free_list_length ("cluster", i
++, SCM_CAR (clusters
));
635 if (last_cluster
== i
- 1)
636 fprintf (stderr
, "\t%ld\n", (long) last_size
);
638 fprintf (stderr
, "-%ld\t%ld\n", (long) (i
- 1), (long) last_size
);
639 fprintf (stderr
, "\ntotal %ld objects\n\n", (long) n
);
642 SCM_DEFINE (scm_free_list_length
, "free-list-length", 0, 0, 0,
644 "Print debugging information about the free-list.\n"
645 "@code{free-list-length} is only included in\n"
646 "@code{--enable-guile-debug} builds of Guile.")
647 #define FUNC_NAME s_scm_free_list_length
649 free_list_lengths ("1-cells", &scm_master_freelist
, scm_freelist
);
650 free_list_lengths ("2-cells", &scm_master_freelist2
, scm_freelist2
);
651 return SCM_UNSPECIFIED
;
655 #endif /* defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) */
657 #ifdef GUILE_DEBUG_FREELIST
659 /* Non-zero if freelist debugging is in effect. Set this via
660 `gc-set-debug-check-freelist!'. */
661 static int scm_debug_check_freelist
= 0;
663 /* Number of calls to SCM_NEWCELL since startup. */
664 static unsigned long scm_newcell_count
;
665 static unsigned long scm_newcell2_count
;
667 /* Search freelist for anything that isn't marked as a free cell.
668 Abort if we find something. */
670 scm_check_freelist (SCM freelist
)
675 for (f
= freelist
; !SCM_NULLP (f
); f
= SCM_FREE_CELL_CDR (f
), i
++)
676 if (!SCM_FREE_CELL_P (f
))
678 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
679 (long) scm_newcell_count
, (long) i
);
684 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
686 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
687 "on each cell allocation. This procedure only exists when the\n"
688 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
689 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
691 /* [cmm] I did a double-take when I read this code the first time.
693 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
694 return SCM_UNSPECIFIED
;
700 scm_debug_newcell (void)
705 if (scm_debug_check_freelist
)
707 scm_check_freelist (scm_freelist
);
711 /* The rest of this is supposed to be identical to the SCM_NEWCELL
713 if (SCM_NULLP (scm_freelist
))
715 new = scm_gc_for_newcell (&scm_master_freelist
, &scm_freelist
);
716 SCM_GC_SET_ALLOCATED (new);
721 scm_freelist
= SCM_FREE_CELL_CDR (scm_freelist
);
722 SCM_GC_SET_ALLOCATED (new);
729 scm_debug_newcell2 (void)
733 scm_newcell2_count
++;
734 if (scm_debug_check_freelist
)
736 scm_check_freelist (scm_freelist2
);
740 /* The rest of this is supposed to be identical to the SCM_NEWCELL
742 if (SCM_NULLP (scm_freelist2
))
744 new = scm_gc_for_newcell (&scm_master_freelist2
, &scm_freelist2
);
745 SCM_GC_SET_ALLOCATED (new);
750 scm_freelist2
= SCM_FREE_CELL_CDR (scm_freelist2
);
751 SCM_GC_SET_ALLOCATED (new);
757 #endif /* GUILE_DEBUG_FREELIST */
762 master_cells_allocated (scm_t_freelist
*master
)
764 /* the '- 1' below is to ignore the cluster spine cells. */
765 long objects
= master
->clusters_allocated
* (master
->cluster_size
- 1);
766 if (SCM_NULLP (master
->clusters
))
767 objects
-= master
->left_to_collect
;
768 return master
->span
* objects
;
772 freelist_length (SCM freelist
)
775 for (n
= 0; !SCM_NULLP (freelist
); freelist
= SCM_FREE_CELL_CDR (freelist
))
781 compute_cells_allocated ()
783 return (scm_cells_allocated
784 + master_cells_allocated (&scm_master_freelist
)
785 + master_cells_allocated (&scm_master_freelist2
)
786 - scm_master_freelist
.span
* freelist_length (scm_freelist
)
787 - scm_master_freelist2
.span
* freelist_length (scm_freelist2
));
790 /* {Scheme Interface to GC}
793 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
795 "Return an association list of statistics about Guile's current\n"
797 #define FUNC_NAME s_scm_gc_stats
802 unsigned long int local_scm_mtrigger
;
803 unsigned long int local_scm_mallocated
;
804 unsigned long int local_scm_heap_size
;
805 unsigned long int local_scm_cells_allocated
;
806 unsigned long int local_scm_gc_time_taken
;
807 unsigned long int local_scm_gc_times
;
808 unsigned long int local_scm_gc_mark_time_taken
;
809 unsigned long int local_scm_gc_sweep_time_taken
;
810 double local_scm_gc_cells_swept
;
811 double local_scm_gc_cells_marked
;
821 for (i
= scm_n_heap_segs
; i
--; )
822 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
823 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
825 if (scm_n_heap_segs
!= n
)
830 /* Below, we cons to produce the resulting list. We want a snapshot of
831 * the heap situation before consing.
833 local_scm_mtrigger
= scm_mtrigger
;
834 local_scm_mallocated
= scm_mallocated
;
835 local_scm_heap_size
= SCM_HEAP_SIZE
;
836 local_scm_cells_allocated
= compute_cells_allocated ();
837 local_scm_gc_time_taken
= scm_gc_time_taken
;
838 local_scm_gc_mark_time_taken
= scm_gc_mark_time_taken
;
839 local_scm_gc_sweep_time_taken
= scm_gc_sweep_time_taken
;
840 local_scm_gc_times
= scm_gc_times
;
841 local_scm_gc_cells_swept
= scm_gc_cells_swept_acc
;
842 local_scm_gc_cells_marked
= scm_gc_cells_marked_acc
;
844 answer
= scm_list_n (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
845 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
846 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
847 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
848 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
849 scm_cons (sym_times
, scm_ulong2num (local_scm_gc_times
)),
850 scm_cons (sym_gc_mark_time_taken
, scm_ulong2num (local_scm_gc_mark_time_taken
)),
851 scm_cons (sym_gc_sweep_time_taken
, scm_ulong2num (local_scm_gc_sweep_time_taken
)),
852 scm_cons (sym_cells_marked
, scm_i_dbl2big (local_scm_gc_cells_marked
)),
853 scm_cons (sym_cells_swept
, scm_i_dbl2big (local_scm_gc_cells_swept
)),
854 scm_cons (sym_heap_segments
, heap_segs
),
863 gc_start_stats (const char *what SCM_UNUSED
)
865 t_before_gc
= scm_c_get_internal_run_time ();
866 scm_gc_cells_swept
= 0;
867 scm_gc_cells_collected
= 0;
868 scm_gc_yield_1
= scm_gc_yield
;
869 scm_gc_yield
= (scm_cells_allocated
870 + master_cells_allocated (&scm_master_freelist
)
871 + master_cells_allocated (&scm_master_freelist2
));
872 scm_gc_malloc_collected
= 0;
873 scm_gc_ports_collected
= 0;
880 unsigned long t
= scm_c_get_internal_run_time ();
881 scm_gc_time_taken
+= (t
- t_before_gc
);
882 scm_gc_sweep_time_taken
+= (t
- t_before_sweep
);
885 scm_gc_cells_marked_acc
+= scm_gc_cells_swept
- scm_gc_cells_collected
;
886 scm_gc_cells_swept_acc
+= scm_gc_cells_swept
;
890 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
892 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
893 "returned by this function for @var{obj}")
894 #define FUNC_NAME s_scm_object_address
896 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj
));
901 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
903 "Scans all of SCM objects and reclaims for further use those that are\n"
904 "no longer accessible.")
905 #define FUNC_NAME s_scm_gc
910 return SCM_UNSPECIFIED
;
916 /* {C Interface For When GC is Triggered}
920 adjust_min_yield (scm_t_freelist
*freelist
)
922 /* min yield is adjusted upwards so that next predicted total yield
923 * (allocated cells actually freed by GC) becomes
924 * `min_yield_fraction' of total heap size. Note, however, that
925 * the absolute value of min_yield will correspond to `collected'
926 * on one master (the one which currently is triggering GC).
928 * The reason why we look at total yield instead of cells collected
929 * on one list is that we want to take other freelists into account.
930 * On this freelist, we know that (local) yield = collected cells,
931 * but that's probably not the case on the other lists.
933 * (We might consider computing a better prediction, for example
934 * by computing an average over multiple GC:s.)
936 if (freelist
->min_yield_fraction
)
938 /* Pick largest of last two yields. */
939 long delta
= ((SCM_HEAP_SIZE
* freelist
->min_yield_fraction
/ 100)
940 - (long) SCM_MAX (scm_gc_yield_1
, scm_gc_yield
));
942 fprintf (stderr
, " after GC = %lu, delta = %ld\n",
943 (long) scm_cells_allocated
,
947 freelist
->min_yield
+= delta
;
952 /* When we get POSIX threads support, the master will be global and
953 * common while the freelist will be individual for each thread.
957 scm_gc_for_newcell (scm_t_freelist
*master
, SCM
*freelist
)
963 if (SCM_NULLP (master
->clusters
))
965 if (master
->grow_heap_p
|| scm_block_gc
)
967 /* In order to reduce gc frequency, try to allocate a new heap
968 * segment first, even if gc might find some free cells. If we
969 * can't obtain a new heap segment, we will try gc later.
971 master
->grow_heap_p
= 0;
972 alloc_some_heap (master
, return_on_error
);
974 if (SCM_NULLP (master
->clusters
))
976 /* The heap was not grown, either because it wasn't scheduled to
977 * grow, or because there was not enough memory available. In
978 * both cases we have to try gc to get some free cells.
981 fprintf (stderr
, "allocated = %lu, ",
982 (long) (scm_cells_allocated
983 + master_cells_allocated (&scm_master_freelist
)
984 + master_cells_allocated (&scm_master_freelist2
)));
987 adjust_min_yield (master
);
988 if (SCM_NULLP (master
->clusters
))
990 /* gc could not free any cells. Now, we _must_ allocate a
991 * new heap segment, because there is no other possibility
992 * to provide a new cell for the caller.
994 alloc_some_heap (master
, abort_on_error
);
998 cell
= SCM_CAR (master
->clusters
);
999 master
->clusters
= SCM_CDR (master
->clusters
);
1000 ++master
->clusters_allocated
;
1002 while (SCM_NULLP (cell
));
1004 #ifdef GUILE_DEBUG_FREELIST
1005 scm_check_freelist (cell
);
1008 --scm_ints_disabled
;
1009 *freelist
= SCM_FREE_CELL_CDR (cell
);
1015 /* This is a support routine which can be used to reserve a cluster
1016 * for some special use, such as debugging. It won't be useful until
1017 * free cells are preserved between garbage collections.
1021 scm_alloc_cluster (scm_t_freelist
*master
)
1024 cell
= scm_gc_for_newcell (master
, &freelist
);
1025 SCM_SETCDR (cell
, freelist
);
1031 scm_t_c_hook scm_before_gc_c_hook
;
1032 scm_t_c_hook scm_before_mark_c_hook
;
1033 scm_t_c_hook scm_before_sweep_c_hook
;
1034 scm_t_c_hook scm_after_sweep_c_hook
;
1035 scm_t_c_hook scm_after_gc_c_hook
;
1038 # define SCM_MARK_BACKING_STORE() do { \
1040 SCM_STACKITEM * top, * bot; \
1041 getcontext (&ctx); \
1042 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1043 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1044 / sizeof (SCM_STACKITEM))); \
1045 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
1046 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
1047 scm_mark_locations (bot, top - bot); } while (0)
1049 # define SCM_MARK_BACKING_STORE()
1053 scm_igc (const char *what
)
1058 scm_c_hook_run (&scm_before_gc_c_hook
, 0);
1061 SCM_NULLP (scm_freelist
)
1063 : (SCM_NULLP (scm_freelist2
) ? "o" : "m"));
1065 /* During the critical section, only the current thread may run. */
1066 SCM_CRITICAL_SECTION_START
;
1068 if (!scm_stack_base
|| scm_block_gc
)
1074 gc_start_stats (what
);
1076 if (scm_gc_heap_lock
)
1077 /* We've invoked the collector while a GC is already in progress.
1078 That should never happen. */
1083 scm_c_hook_run (&scm_before_mark_c_hook
, 0);
1085 clear_mark_space ();
1089 /* Mark objects on the C stack. */
1090 SCM_FLUSH_REGISTER_WINDOWS
;
1091 /* This assumes that all registers are saved into the jmp_buf */
1092 setjmp (scm_save_regs_gc_mark
);
1093 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
1094 ( (size_t) (sizeof (SCM_STACKITEM
) - 1 +
1095 sizeof scm_save_regs_gc_mark
)
1096 / sizeof (SCM_STACKITEM
)));
1099 unsigned long stack_len
= scm_stack_size (scm_stack_base
);
1100 #ifdef SCM_STACK_GROWS_UP
1101 scm_mark_locations (scm_stack_base
, stack_len
);
1103 scm_mark_locations (scm_stack_base
- stack_len
, stack_len
);
1106 SCM_MARK_BACKING_STORE();
1108 #else /* USE_THREADS */
1110 /* Mark every thread's stack and registers */
1111 scm_threads_mark_stacks ();
1113 #endif /* USE_THREADS */
1115 j
= SCM_NUM_PROTECTS
;
1117 scm_gc_mark (scm_sys_protects
[j
]);
1119 /* mark the registered roots */
1122 for (i
= 0; i
< SCM_VECTOR_LENGTH (scm_gc_registered_roots
); ++i
) {
1123 SCM l
= SCM_VELTS (scm_gc_registered_roots
)[i
];
1124 for (; !SCM_NULLP (l
); l
= SCM_CDR (l
)) {
1125 SCM
*p
= (SCM
*) (scm_num2long (SCM_CAAR (l
), 0, NULL
));
1131 /* FIXME: we should have a means to register C functions to be run
1132 * in different phases of GC
1134 scm_mark_subr_table ();
1137 scm_gc_mark (scm_root
->handle
);
1140 t_before_sweep
= scm_c_get_internal_run_time ();
1141 scm_gc_mark_time_taken
+= (t_before_sweep
- t_before_gc
);
1143 scm_c_hook_run (&scm_before_sweep_c_hook
, 0);
1147 scm_c_hook_run (&scm_after_sweep_c_hook
, 0);
1152 SCM_CRITICAL_SECTION_END
;
1153 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
1162 #define MARK scm_gc_mark
1163 #define FNAME "scm_gc_mark"
1165 #endif /*!MARK_DEPENDENCIES*/
1167 /* Mark an object precisely.
1171 #define FUNC_NAME FNAME
1175 scm_t_bits cell_type
;
1177 #ifndef MARK_DEPENDENCIES
1178 # define RECURSE scm_gc_mark
1180 /* go through the usual marking, but not for self-cycles. */
1181 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1185 #ifdef MARK_DEPENDENCIES
1186 goto gc_mark_loop_first_time
;
1189 /* A simple hack for debugging. Chose the second branch to get a
1190 meaningful backtrace for crashes inside the GC.
1193 #define goto_gc_mark_loop goto gc_mark_loop
1194 #define goto_gc_mark_nimp goto gc_mark_nimp
1196 #define goto_gc_mark_loop RECURSE(ptr); return
1197 #define goto_gc_mark_nimp RECURSE(ptr); return
1206 #ifdef MARK_DEPENDENCIES
1207 if (SCM_EQ_P (ptr
, p
))
1213 gc_mark_loop_first_time
:
1216 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1217 /* We are in debug mode. Check the ptr exhaustively. */
1218 if (!scm_cellp (ptr
))
1219 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL
);
1221 /* In non-debug mode, do at least some cheap testing. */
1223 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL
);
1226 #ifndef MARK_DEPENDENCIES
1228 if (SCM_GCMARKP (ptr
))
1231 SCM_SETGCMARK (ptr
);
1235 cell_type
= SCM_GC_CELL_TYPE (ptr
);
1236 switch (SCM_ITAG7 (cell_type
))
1238 case scm_tcs_cons_nimcar
:
1239 if (SCM_IMP (SCM_CDR (ptr
)))
1241 ptr
= SCM_CAR (ptr
);
1244 RECURSE (SCM_CAR (ptr
));
1245 ptr
= SCM_CDR (ptr
);
1247 case scm_tcs_cons_imcar
:
1248 ptr
= SCM_CDR (ptr
);
1251 RECURSE (SCM_SETTER (ptr
));
1252 ptr
= SCM_PROCEDURE (ptr
);
1254 case scm_tcs_struct
:
1256 /* XXX - use less explicit code. */
1257 scm_t_bits word0
= SCM_CELL_WORD_0 (ptr
) - scm_tc3_struct
;
1258 scm_t_bits
* vtable_data
= (scm_t_bits
*) word0
;
1259 SCM layout
= SCM_PACK (vtable_data
[scm_vtable_index_layout
]);
1260 long len
= SCM_SYMBOL_LENGTH (layout
);
1261 char * fields_desc
= SCM_SYMBOL_CHARS (layout
);
1262 scm_t_bits
* struct_data
= (scm_t_bits
*) SCM_STRUCT_DATA (ptr
);
1264 if (vtable_data
[scm_struct_i_flags
] & SCM_STRUCTF_ENTITY
)
1266 RECURSE (SCM_PACK (struct_data
[scm_struct_i_procedure
]));
1267 RECURSE (SCM_PACK (struct_data
[scm_struct_i_setter
]));
1273 for (x
= 0; x
< len
- 2; x
+= 2, ++struct_data
)
1274 if (fields_desc
[x
] == 'p')
1275 RECURSE (SCM_PACK (*struct_data
));
1276 if (fields_desc
[x
] == 'p')
1278 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
1279 for (x
= *struct_data
++; x
; --x
, ++struct_data
)
1280 RECURSE (SCM_PACK (*struct_data
));
1282 RECURSE (SCM_PACK (*struct_data
));
1286 ptr
= SCM_PACK (vtable_data
[scm_vtable_index_vtable
]);
1290 case scm_tcs_closures
:
1291 if (SCM_IMP (SCM_ENV (ptr
)))
1293 ptr
= SCM_CLOSCAR (ptr
);
1296 RECURSE (SCM_CLOSCAR (ptr
));
1297 ptr
= SCM_ENV (ptr
);
1299 case scm_tc7_vector
:
1300 i
= SCM_VECTOR_LENGTH (ptr
);
1304 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
1305 RECURSE (SCM_VELTS (ptr
)[i
]);
1306 ptr
= SCM_VELTS (ptr
)[0];
1311 size_t i
= SCM_CCLO_LENGTH (ptr
);
1313 for (j
= 1; j
!= i
; ++j
)
1315 SCM obj
= SCM_CCLO_REF (ptr
, j
);
1319 ptr
= SCM_CCLO_REF (ptr
, 0);
1325 case scm_tc7_byvect
:
1332 #ifdef HAVE_LONG_LONGS
1333 case scm_tc7_llvect
:
1336 case scm_tc7_string
:
1340 SCM_SET_WVECT_GC_CHAIN (ptr
, scm_weak_vectors
);
1341 scm_weak_vectors
= ptr
;
1342 if (SCM_IS_WHVEC_ANY (ptr
))
1349 len
= SCM_VECTOR_LENGTH (ptr
);
1350 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
1351 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
1353 for (x
= 0; x
< len
; ++x
)
1356 alist
= SCM_VELTS (ptr
)[x
];
1358 /* mark everything on the alist except the keys or
1359 * values, according to weak_values and weak_keys. */
1360 while ( SCM_CONSP (alist
)
1361 && !SCM_GCMARKP (alist
)
1362 && SCM_CONSP (SCM_CAR (alist
)))
1367 kvpair
= SCM_CAR (alist
);
1368 next_alist
= SCM_CDR (alist
);
1371 * SCM_SETGCMARK (alist);
1372 * SCM_SETGCMARK (kvpair);
1374 * It may be that either the key or value is protected by
1375 * an escaped reference to part of the spine of this alist.
1376 * If we mark the spine here, and only mark one or neither of the
1377 * key and value, they may never be properly marked.
1378 * This leads to a horrible situation in which an alist containing
1379 * freelist cells is exported.
1381 * So only mark the spines of these arrays last of all marking.
1382 * If somebody confuses us by constructing a weak vector
1383 * with a circular alist then we are hosed, but at least we
1384 * won't prematurely drop table entries.
1387 RECURSE (SCM_CAR (kvpair
));
1389 RECURSE (SCM_CDR (kvpair
));
1392 if (SCM_NIMP (alist
))
1398 case scm_tc7_symbol
:
1399 ptr
= SCM_PROP_SLOTS (ptr
);
1401 case scm_tc7_variable
:
1402 ptr
= SCM_CELL_OBJECT_1 (ptr
);
1407 i
= SCM_PTOBNUM (ptr
);
1408 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1409 if (!(i
< scm_numptob
))
1410 SCM_MISC_ERROR ("undefined port type", SCM_EOL
);
1412 if (SCM_PTAB_ENTRY(ptr
))
1413 RECURSE (SCM_FILENAME (ptr
));
1414 if (scm_ptobs
[i
].mark
)
1416 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1423 switch (SCM_TYP16 (ptr
))
1424 { /* should be faster than going through scm_smobs */
1425 case scm_tc_free_cell
:
1426 /* We have detected a free cell. This can happen if non-object data
1427 * on the C stack points into guile's heap and is scanned during
1428 * conservative marking. */
1432 case scm_tc16_complex
:
1435 i
= SCM_SMOBNUM (ptr
);
1436 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1437 if (!(i
< scm_numsmob
))
1438 SCM_MISC_ERROR ("undefined smob type", SCM_EOL
);
1440 if (scm_smobs
[i
].mark
)
1442 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1450 SCM_MISC_ERROR ("unknown type", SCM_EOL
);
1456 #ifndef MARK_DEPENDENCIES
1461 /* And here we define `scm_gc_mark_dependencies', by including this
1462 * same file in itself.
1464 #define MARK scm_gc_mark_dependencies
1465 #define FNAME "scm_gc_mark_dependencies"
1466 #define MARK_DEPENDENCIES
1468 #undef MARK_DEPENDENCIES
1473 /* Determine whether the given value does actually represent a cell in some
1474 * heap segment. If this is the case, the number of the heap segment is
1475 * returned. Otherwise, -1 is returned. Binary search is used in order to
1476 * determine the heap segment that contains the cell.*/
1477 /* FIXME: To be used within scm_mark_locations and scm_cellp this function
1478 * should be an inline function. */
1480 heap_segment (SCM obj
)
1486 SCM_CELLPTR ptr
= SCM2PTR (obj
);
1487 unsigned long int i
= 0;
1488 unsigned long int j
= scm_n_heap_segs
- 1;
1490 if (SCM_PTR_LT (ptr
, scm_heap_table
[i
].bounds
[0]))
1492 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[1], ptr
))
1498 if (SCM_PTR_LT (ptr
, scm_heap_table
[i
].bounds
[1]))
1502 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1509 unsigned long int k
= (i
+ j
) / 2;
1513 else if (SCM_PTR_LT (ptr
, scm_heap_table
[k
].bounds
[1]))
1517 if (SCM_PTR_LT (ptr
, scm_heap_table
[i
].bounds
[0]))
1520 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1524 if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[1], ptr
))
1530 if (!DOUBLECELL_ALIGNED_P (obj
) && scm_heap_table
[i
].span
== 2)
1532 else if (SCM_GC_IN_CARD_HEADERP (ptr
))
1541 /* Mark a region conservatively */
1543 scm_mark_locations (SCM_STACKITEM x
[], unsigned long n
)
1547 for (m
= 0; m
< n
; ++m
)
1549 SCM obj
= * (SCM
*) &x
[m
];
1550 long int segment
= heap_segment (obj
);
1557 /* The function scm_cellp determines whether an SCM value can be regarded as a
1558 * pointer to a cell on the heap.
1561 scm_cellp (SCM value
)
1563 long int segment
= heap_segment (value
);
1564 return (segment
>= 0);
1569 gc_sweep_freelist_start (scm_t_freelist
*freelist
)
1571 freelist
->cells
= SCM_EOL
;
1572 freelist
->left_to_collect
= freelist
->cluster_size
;
1573 freelist
->clusters_allocated
= 0;
1574 freelist
->clusters
= SCM_EOL
;
1575 freelist
->clustertail
= &freelist
->clusters
;
1576 freelist
->collected_1
= freelist
->collected
;
1577 freelist
->collected
= 0;
1581 gc_sweep_freelist_finish (scm_t_freelist
*freelist
)
1584 *freelist
->clustertail
= freelist
->cells
;
1585 if (!SCM_NULLP (freelist
->cells
))
1587 SCM c
= freelist
->cells
;
1588 SCM_SET_CELL_WORD_0 (c
, SCM_FREE_CELL_CDR (c
));
1589 SCM_SET_CELL_WORD_1 (c
, SCM_EOL
);
1590 freelist
->collected
+=
1591 freelist
->span
* (freelist
->cluster_size
- freelist
->left_to_collect
);
1593 scm_gc_cells_collected
+= freelist
->collected
;
1595 /* Although freelist->min_yield is used to test freelist->collected
1596 * (which is the local GC yield for freelist), it is adjusted so
1597 * that *total* yield is freelist->min_yield_fraction of total heap
1598 * size. This means that a too low yield is compensated by more
1599 * heap on the list which is currently doing most work, which is
1600 * just what we want.
1602 collected
= SCM_MAX (freelist
->collected_1
, freelist
->collected
);
1603 freelist
->grow_heap_p
= (collected
< freelist
->min_yield
);
1606 #define NEXT_DATA_CELL(ptr, span) \
1608 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1609 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1610 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1616 #define FUNC_NAME "scm_gc_sweep"
1618 register SCM_CELLPTR ptr
;
1619 register SCM nfreelist
;
1620 register scm_t_freelist
*freelist
;
1621 register unsigned long m
;
1628 gc_sweep_freelist_start (&scm_master_freelist
);
1629 gc_sweep_freelist_start (&scm_master_freelist2
);
1631 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1633 register long left_to_collect
;
1636 /* Unmarked cells go onto the front of the freelist this heap
1637 segment points to. Rather than updating the real freelist
1638 pointer as we go along, we accumulate the new head in
1639 nfreelist. Then, if it turns out that the entire segment is
1640 free, we free (i.e., malloc's free) the whole segment, and
1641 simply don't assign nfreelist back into the real freelist. */
1642 freelist
= scm_heap_table
[i
].freelist
;
1643 nfreelist
= freelist
->cells
;
1644 left_to_collect
= freelist
->left_to_collect
;
1645 span
= scm_heap_table
[i
].span
;
1647 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0], span
);
1648 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1], span
) - ptr
;
1650 /* use only data cells in seg_size */
1651 seg_size
= (seg_size
/ SCM_GC_CARD_N_CELLS
) * (SCM_GC_CARD_N_DATA_CELLS
/ span
) * span
;
1653 scm_gc_cells_swept
+= seg_size
;
1655 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1659 if (SCM_GC_IN_CARD_HEADERP (ptr
))
1665 NEXT_DATA_CELL (nxt
, span
);
1672 scmptr
= PTR2SCM (ptr
);
1674 if (SCM_GCMARKP (scmptr
))
1677 switch SCM_TYP7 (scmptr
)
1679 case scm_tcs_struct
:
1681 /* Structs need to be freed in a special order.
1682 * This is handled by GC C hooks in struct.c.
1684 SCM_SET_STRUCT_GC_CHAIN (scmptr
, scm_structs_to_free
);
1685 scm_structs_to_free
= scmptr
;
1688 case scm_tcs_cons_imcar
:
1689 case scm_tcs_cons_nimcar
:
1690 case scm_tcs_closures
:
1694 case scm_tc7_vector
:
1696 unsigned long int length
= SCM_VECTOR_LENGTH (scmptr
);
1699 m
+= length
* sizeof (scm_t_bits
);
1700 scm_must_free (SCM_VECTOR_BASE (scmptr
));
1706 m
+= (SCM_CCLO_LENGTH (scmptr
) * sizeof (SCM
));
1707 scm_must_free (SCM_CCLO_BASE (scmptr
));
1713 unsigned long int length
= SCM_BITVECTOR_LENGTH (scmptr
);
1716 m
+= sizeof (long) * ((length
+ SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1717 scm_must_free (SCM_BITVECTOR_BASE (scmptr
));
1721 case scm_tc7_byvect
:
1725 #ifdef HAVE_LONG_LONGS
1726 case scm_tc7_llvect
:
1731 m
+= SCM_UVECTOR_LENGTH (scmptr
) * scm_uniform_element_size (scmptr
);
1732 scm_must_free (SCM_UVECTOR_BASE (scmptr
));
1735 case scm_tc7_string
:
1736 m
+= SCM_STRING_LENGTH (scmptr
) + 1;
1737 scm_must_free (SCM_STRING_CHARS (scmptr
));
1739 case scm_tc7_symbol
:
1740 m
+= SCM_SYMBOL_LENGTH (scmptr
) + 1;
1741 scm_must_free (SCM_SYMBOL_CHARS (scmptr
));
1743 case scm_tc7_variable
:
1746 /* the various "subrs" (primitives) are never freed */
1749 if SCM_OPENP (scmptr
)
1751 int k
= SCM_PTOBNUM (scmptr
);
1752 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1753 if (!(k
< scm_numptob
))
1754 SCM_MISC_ERROR ("undefined port type", SCM_EOL
);
1756 /* Keep "revealed" ports alive. */
1757 if (scm_revealed_count (scmptr
) > 0)
1759 /* Yes, I really do mean scm_ptobs[k].free */
1760 /* rather than ftobs[k].close. .close */
1761 /* is for explicit CLOSE-PORT by user */
1762 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1763 SCM_SETSTREAM (scmptr
, 0);
1764 scm_remove_from_port_table (scmptr
);
1765 scm_gc_ports_collected
++;
1766 SCM_CLR_PORT_OPEN_FLAG (scmptr
);
1770 switch SCM_TYP16 (scmptr
)
1772 case scm_tc_free_cell
:
1777 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1778 scm_must_free (SCM_BDIGITS (scmptr
));
1780 #endif /* def SCM_BIGDIG */
1781 case scm_tc16_complex
:
1782 m
+= sizeof (scm_t_complex
);
1783 scm_must_free (SCM_COMPLEX_MEM (scmptr
));
1788 k
= SCM_SMOBNUM (scmptr
);
1789 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1790 if (!(k
< scm_numsmob
))
1791 SCM_MISC_ERROR ("undefined smob type", SCM_EOL
);
1793 if (scm_smobs
[k
].free
)
1794 m
+= (scm_smobs
[k
].free
) (scmptr
);
1800 SCM_MISC_ERROR ("unknown type", SCM_EOL
);
1803 if (!--left_to_collect
)
1805 SCM_SET_CELL_WORD_0 (scmptr
, nfreelist
);
1806 *freelist
->clustertail
= scmptr
;
1807 freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1809 nfreelist
= SCM_EOL
;
1810 freelist
->collected
+= span
* freelist
->cluster_size
;
1811 left_to_collect
= freelist
->cluster_size
;
1815 /* Stick the new cell on the front of nfreelist. It's
1816 critical that we mark this cell as freed; otherwise, the
1817 conservative collector might trace it as some other type
1819 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1820 SCM_SET_FREE_CELL_CDR (scmptr
, nfreelist
);
1825 #ifdef GC_FREE_SEGMENTS
1830 freelist
->heap_size
-= seg_size
;
1831 free ((char *) scm_heap_table
[i
].bounds
[0]);
1832 scm_heap_table
[i
].bounds
[0] = 0;
1833 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1834 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1835 scm_n_heap_segs
-= 1;
1836 i
--; /* We need to scan the segment just moved. */
1839 #endif /* ifdef GC_FREE_SEGMENTS */
1841 /* Update the real freelist pointer to point to the head of
1842 the list of free cells we've built for this segment. */
1843 freelist
->cells
= nfreelist
;
1844 freelist
->left_to_collect
= left_to_collect
;
1847 #ifdef GUILE_DEBUG_FREELIST
1848 scm_map_free_list ();
1852 gc_sweep_freelist_finish (&scm_master_freelist
);
1853 gc_sweep_freelist_finish (&scm_master_freelist2
);
1855 /* When we move to POSIX threads private freelists should probably
1856 be GC-protected instead. */
1857 scm_freelist
= SCM_EOL
;
1858 scm_freelist2
= SCM_EOL
;
1860 scm_cells_allocated
= (SCM_HEAP_SIZE
- scm_gc_cells_collected
);
1861 scm_gc_yield
-= scm_cells_allocated
;
1863 if (scm_mallocated
< m
)
1864 /* The byte count of allocated objects has underflowed. This is
1865 probably because you forgot to report the sizes of objects you
1866 have allocated, by calling scm_done_malloc or some such. When
1867 the GC freed them, it subtracted their size from
1868 scm_mallocated, which underflowed. */
1871 scm_mallocated
-= m
;
1872 scm_gc_malloc_collected
= m
;
1878 /* {Front end to malloc}
1880 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1883 * These functions provide services comparable to malloc, realloc, and
1884 * free. They should be used when allocating memory that will be under
1885 * control of the garbage collector, i.e., if the memory may be freed
1886 * during garbage collection.
1890 * Return newly malloced storage or throw an error.
1892 * The parameter WHAT is a string for error reporting.
1893 * If the threshold scm_mtrigger will be passed by this
1894 * allocation, or if the first call to malloc fails,
1895 * garbage collect -- on the presumption that some objects
1896 * using malloced storage may be collected.
1898 * The limit scm_mtrigger may be raised by this allocation.
1901 scm_must_malloc (size_t size
, const char *what
)
1904 unsigned long nm
= scm_mallocated
+ size
;
1907 /* The byte count of allocated objects has overflowed. This is
1908 probably because you forgot to report the correct size of freed
1909 memory in some of your smob free methods. */
1912 if (nm
<= scm_mtrigger
)
1914 SCM_SYSCALL (ptr
= malloc (size
));
1917 scm_mallocated
= nm
;
1918 #ifdef GUILE_DEBUG_MALLOC
1919 scm_malloc_register (ptr
, what
);
1927 nm
= scm_mallocated
+ size
;
1930 /* The byte count of allocated objects has overflowed. This is
1931 probably because you forgot to report the correct size of freed
1932 memory in some of your smob free methods. */
1935 SCM_SYSCALL (ptr
= malloc (size
));
1938 scm_mallocated
= nm
;
1940 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1941 unsigned long old_trigger
= scm_mtrigger
;
1942 if (nm
> scm_mtrigger
)
1943 scm_mtrigger
= nm
+ nm
/ 2;
1945 scm_mtrigger
+= scm_mtrigger
/ 2;
1946 if (scm_mtrigger
< old_trigger
)
1949 #ifdef GUILE_DEBUG_MALLOC
1950 scm_malloc_register (ptr
, what
);
1956 scm_memory_error (what
);
1961 * is similar to scm_must_malloc.
1964 scm_must_realloc (void *where
,
1972 if (size
<= old_size
)
1975 nm
= scm_mallocated
+ size
- old_size
;
1977 if (nm
< (size
- old_size
))
1978 /* The byte count of allocated objects has overflowed. This is
1979 probably because you forgot to report the correct size of freed
1980 memory in some of your smob free methods. */
1983 if (nm
<= scm_mtrigger
)
1985 SCM_SYSCALL (ptr
= realloc (where
, size
));
1988 scm_mallocated
= nm
;
1989 #ifdef GUILE_DEBUG_MALLOC
1990 scm_malloc_reregister (where
, ptr
, what
);
1998 nm
= scm_mallocated
+ size
- old_size
;
2000 if (nm
< (size
- old_size
))
2001 /* The byte count of allocated objects has overflowed. This is
2002 probably because you forgot to report the correct size of freed
2003 memory in some of your smob free methods. */
2006 SCM_SYSCALL (ptr
= realloc (where
, size
));
2009 scm_mallocated
= nm
;
2010 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
2011 unsigned long old_trigger
= scm_mtrigger
;
2012 if (nm
> scm_mtrigger
)
2013 scm_mtrigger
= nm
+ nm
/ 2;
2015 scm_mtrigger
+= scm_mtrigger
/ 2;
2016 if (scm_mtrigger
< old_trigger
)
2019 #ifdef GUILE_DEBUG_MALLOC
2020 scm_malloc_reregister (where
, ptr
, what
);
2025 scm_memory_error (what
);
2029 scm_must_strndup (const char *str
, size_t length
)
2031 char * dst
= scm_must_malloc (length
+ 1, "scm_must_strndup");
2032 memcpy (dst
, str
, length
);
2038 scm_must_strdup (const char *str
)
2040 return scm_must_strndup (str
, strlen (str
));
2044 scm_must_free (void *obj
)
2045 #define FUNC_NAME "scm_must_free"
2047 #ifdef GUILE_DEBUG_MALLOC
2048 scm_malloc_unregister (obj
);
2053 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL
);
2058 /* Announce that there has been some malloc done that will be freed
2059 * during gc. A typical use is for a smob that uses some malloced
2060 * memory but can not get it from scm_must_malloc (for whatever
2061 * reason). When a new object of this smob is created you call
2062 * scm_done_malloc with the size of the object. When your smob free
2063 * function is called, be sure to include this size in the return
2066 * If you can't actually free the memory in the smob free function,
2067 * for whatever reason (like reference counting), you still can (and
2068 * should) report the amount of memory freed when you actually free it.
2069 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2070 * eh? Or even better, call scm_done_free. */
2073 scm_done_malloc (long size
)
2076 if (scm_mallocated
< size
)
2077 /* The byte count of allocated objects has underflowed. This is
2078 probably because you forgot to report the sizes of objects you
2079 have allocated, by calling scm_done_malloc or some such. When
2080 the GC freed them, it subtracted their size from
2081 scm_mallocated, which underflowed. */
2084 unsigned long nm
= scm_mallocated
+ size
;
2086 /* The byte count of allocated objects has overflowed. This is
2087 probably because you forgot to report the correct size of freed
2088 memory in some of your smob free methods. */
2092 scm_mallocated
+= size
;
2094 if (scm_mallocated
> scm_mtrigger
)
2096 scm_igc ("foreign mallocs");
2097 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
2099 if (scm_mallocated
> scm_mtrigger
)
2100 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
2102 scm_mtrigger
+= scm_mtrigger
/ 2;
2108 scm_done_free (long size
)
2111 if (scm_mallocated
< size
)
2112 /* The byte count of allocated objects has underflowed. This is
2113 probably because you forgot to report the sizes of objects you
2114 have allocated, by calling scm_done_malloc or some such. When
2115 the GC freed them, it subtracted their size from
2116 scm_mallocated, which underflowed. */
2119 unsigned long nm
= scm_mallocated
- size
;
2121 /* The byte count of allocated objects has overflowed. This is
2122 probably because you forgot to report the correct size of freed
2123 memory in some of your smob free methods. */
2127 scm_mallocated
-= size
;
2134 * Each heap segment is an array of objects of a particular size.
2135 * Every segment has an associated (possibly shared) freelist.
2136 * A table of segment records is kept that records the upper and
2137 * lower extents of the segment; this is used during the conservative
2138 * phase of gc to identify probably gc roots (because they point
2139 * into valid segments at reasonable offsets). */
2142 * is true if the first segment was smaller than INIT_HEAP_SEG.
2143 * If scm_expmem is set to one, subsequent segment allocations will
2144 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2148 size_t scm_max_segment_size
;
2151 * is the lowest base address of any heap segment.
2153 SCM_CELLPTR scm_heap_org
;
2155 scm_t_heap_seg_data
* scm_heap_table
= 0;
2156 static size_t heap_segment_table_size
= 0;
2157 size_t scm_n_heap_segs
= 0;
2160 * initializes a new heap segment and returns the number of objects it contains.
2162 * The segment origin and segment size in bytes are input parameters.
2163 * The freelist is both input and output.
2165 * This function presumes that the scm_heap_table has already been expanded
2166 * to accomodate a new segment record and that the markbit space was reserved
2167 * for all the cards in this segment.
2170 #define INIT_CARD(card, span) \
2172 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2174 SCM_GC_SET_CARD_DOUBLECELL (card); \
2178 init_heap_seg (SCM_CELLPTR seg_org
, size_t size
, scm_t_freelist
*freelist
)
2180 register SCM_CELLPTR ptr
;
2181 SCM_CELLPTR seg_end
;
2182 size_t new_seg_index
;
2183 ptrdiff_t n_new_cells
;
2184 int span
= freelist
->span
;
2186 if (seg_org
== NULL
)
2189 /* Align the begin ptr up.
2191 ptr
= SCM_GC_CARD_UP (seg_org
);
2193 /* Compute the ceiling on valid object pointers w/in this segment.
2195 seg_end
= SCM_GC_CARD_DOWN ((char *)seg_org
+ size
);
2197 /* Find the right place and insert the segment record.
2200 while (new_seg_index
< scm_n_heap_segs
2201 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
))
2206 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
2207 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
2212 scm_heap_table
[new_seg_index
].span
= span
;
2213 scm_heap_table
[new_seg_index
].freelist
= freelist
;
2214 scm_heap_table
[new_seg_index
].bounds
[0] = ptr
;
2215 scm_heap_table
[new_seg_index
].bounds
[1] = seg_end
;
2218 n_new_cells
= seg_end
- ptr
;
2220 freelist
->heap_size
+= n_new_cells
;
2222 /* Partition objects in this segment into clusters */
2225 SCM
*clusterp
= &clusters
;
2227 NEXT_DATA_CELL (ptr
, span
);
2228 while (ptr
< seg_end
)
2230 scm_cell
*nxt
= ptr
;
2231 scm_cell
*prv
= NULL
;
2232 scm_cell
*last_card
= NULL
;
2233 int n_data_cells
= (SCM_GC_CARD_N_DATA_CELLS
/ span
) * SCM_CARDS_PER_CLUSTER
- 1;
2234 NEXT_DATA_CELL(nxt
, span
);
2236 /* Allocate cluster spine
2238 *clusterp
= PTR2SCM (ptr
);
2239 SCM_SETCAR (*clusterp
, PTR2SCM (nxt
));
2240 clusterp
= SCM_CDRLOC (*clusterp
);
2243 while (n_data_cells
--)
2245 scm_cell
*card
= SCM_GC_CELL_CARD (ptr
);
2246 SCM scmptr
= PTR2SCM (ptr
);
2248 NEXT_DATA_CELL (nxt
, span
);
2251 if (card
!= last_card
)
2253 INIT_CARD (card
, span
);
2257 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
2258 SCM_SET_FREE_CELL_CDR (scmptr
, PTR2SCM (nxt
));
2263 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv
), SCM_EOL
);
2268 scm_cell
*ref
= seg_end
;
2269 NEXT_DATA_CELL (ref
, span
);
2271 /* [cmm] looks like the segment size doesn't divide cleanly by
2272 cluster size. bad cmm! */
2276 /* Patch up the last cluster pointer in the segment
2277 * to join it to the input freelist.
2279 *clusterp
= freelist
->clusters
;
2280 freelist
->clusters
= clusters
;
2284 fprintf (stderr
, "H");
2290 round_to_cluster_size (scm_t_freelist
*freelist
, size_t len
)
2292 size_t cluster_size_in_bytes
= CLUSTER_SIZE_IN_BYTES (freelist
);
2295 (len
+ cluster_size_in_bytes
- 1) / cluster_size_in_bytes
* cluster_size_in_bytes
2296 + ALIGNMENT_SLACK (freelist
);
2300 alloc_some_heap (scm_t_freelist
*freelist
, policy_on_error error_policy
)
2301 #define FUNC_NAME "alloc_some_heap"
2306 if (scm_gc_heap_lock
)
2308 /* Critical code sections (such as the garbage collector) aren't
2309 * supposed to add heap segments.
2311 fprintf (stderr
, "alloc_some_heap: Can not extend locked heap.\n");
2315 if (scm_n_heap_segs
== heap_segment_table_size
)
2317 /* We have to expand the heap segment table to have room for the new
2318 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2319 * init_heap_seg only if the allocation of the segment itself succeeds.
2321 size_t new_table_size
= scm_n_heap_segs
+ 1;
2322 size_t size
= new_table_size
* sizeof (scm_t_heap_seg_data
);
2323 scm_t_heap_seg_data
*new_heap_table
;
2325 SCM_SYSCALL (new_heap_table
= ((scm_t_heap_seg_data
*)
2326 realloc ((char *)scm_heap_table
, size
)));
2327 if (!new_heap_table
)
2329 if (error_policy
== abort_on_error
)
2331 fprintf (stderr
, "alloc_some_heap: Could not grow heap segment table.\n");
2341 scm_heap_table
= new_heap_table
;
2342 heap_segment_table_size
= new_table_size
;
2346 /* Pick a size for the new heap segment.
2347 * The rule for picking the size of a segment is explained in
2351 /* Assure that the new segment is predicted to be large enough.
2353 * New yield should at least equal GC fraction of new heap size, i.e.
2355 * y + dh > f * (h + dh)
2358 * f : min yield fraction
2360 * dh : size of new heap segment
2362 * This gives dh > (f * h - y) / (1 - f)
2364 int f
= freelist
->min_yield_fraction
;
2365 unsigned long h
= SCM_HEAP_SIZE
;
2366 size_t min_cells
= (f
* h
- 100 * (long) scm_gc_yield
) / (99 - f
);
2367 len
= SCM_EXPHEAP (freelist
->heap_size
);
2369 fprintf (stderr
, "(%ld < %ld)", (long) len
, (long) min_cells
);
2371 if (len
< min_cells
)
2372 len
= min_cells
+ freelist
->cluster_size
;
2373 len
*= sizeof (scm_cell
);
2374 /* force new sampling */
2375 freelist
->collected
= LONG_MAX
;
2378 if (len
> scm_max_segment_size
)
2379 len
= scm_max_segment_size
;
2384 smallest
= CLUSTER_SIZE_IN_BYTES (freelist
);
2389 /* Allocate with decaying ambition. */
2390 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2391 && (len
>= smallest
))
2393 size_t rounded_len
= round_to_cluster_size (freelist
, len
);
2394 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (rounded_len
));
2397 init_heap_seg (ptr
, rounded_len
, freelist
);
2404 if (error_policy
== abort_on_error
)
2406 fprintf (stderr
, "alloc_some_heap: Could not grow heap.\n");
2413 /* {GC Protection Helper Functions}
2418 * If within a function you need to protect one or more scheme objects from
2419 * garbage collection, pass them as parameters to one of the
2420 * scm_remember_upto_here* functions below. These functions don't do
2421 * anything, but since the compiler does not know that they are actually
2422 * no-ops, it will generate code that calls these functions with the given
2423 * parameters. Therefore, you can be sure that the compiler will keep those
2424 * scheme values alive (on the stack or in a register) up to the point where
2425 * scm_remember_upto_here* is called. In other words, place the call to
2426 * scm_remember_upto_here* _behind_ the last code in your function, that
2427 * depends on the scheme object to exist.
2429 * Example: We want to make sure that the string object str does not get
2430 * garbage collected during the execution of 'some_function' in the code
2431 * below, because otherwise the characters belonging to str would be freed and
2432 * 'some_function' might access freed memory. To make sure that the compiler
2433 * keeps str alive on the stack or in a register such that it is visible to
2434 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2435 * call to 'some_function'. Note that this would not be necessary if str was
2436 * used anyway after the call to 'some_function'.
2437 * char *chars = SCM_STRING_CHARS (str);
2438 * some_function (chars);
2439 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2443 scm_remember_upto_here_1 (SCM obj SCM_UNUSED
)
2445 /* Empty. Protects a single object from garbage collection. */
2449 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED
, SCM obj2 SCM_UNUSED
)
2451 /* Empty. Protects two objects from garbage collection. */
2455 scm_remember_upto_here (SCM obj SCM_UNUSED
, ...)
2457 /* Empty. Protects any number of objects from garbage collection. */
2461 These crazy functions prevent garbage collection
2462 of arguments after the first argument by
2463 ensuring they remain live throughout the
2464 function because they are used in the last
2465 line of the code block.
2466 It'd be better to have a nice compiler hint to
2467 aid the conservative stack-scanning GC. --03/09/00 gjb */
2469 scm_return_first (SCM elt
, ...)
2475 scm_return_first_int (int i
, ...)
2482 scm_permanent_object (SCM obj
)
2485 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2491 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2492 other references are dropped, until the object is unprotected by calling
2493 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
2494 i. e. it is possible to protect the same object several times, but it is
2495 necessary to unprotect the object the same number of times to actually get
2496 the object unprotected. It is an error to unprotect an object more often
2497 than it has been protected before. The function scm_protect_object returns
2501 /* Implementation note: For every object X, there is a counter which
2502 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
2506 scm_gc_protect_object (SCM obj
)
2510 /* This critical section barrier will be replaced by a mutex. */
2513 handle
= scm_hashq_create_handle_x (scm_protects
, obj
, SCM_MAKINUM (0));
2514 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), SCM_MAKINUM (1)));
2522 /* Remove any protection for OBJ established by a prior call to
2523 scm_protect_object. This function returns OBJ.
2525 See scm_protect_object for more information. */
2527 scm_gc_unprotect_object (SCM obj
)
2531 /* This critical section barrier will be replaced by a mutex. */
2534 handle
= scm_hashq_get_handle (scm_protects
, obj
);
2536 if (SCM_FALSEP (handle
))
2538 fprintf (stderr
, "scm_unprotect_object called on unprotected object\n");
2543 SCM count
= scm_difference (SCM_CDR (handle
), SCM_MAKINUM (1));
2544 if (SCM_EQ_P (count
, SCM_MAKINUM (0)))
2545 scm_hashq_remove_x (scm_protects
, obj
);
2547 SCM_SETCDR (handle
, count
);
2556 scm_gc_register_root (SCM
*p
)
2559 SCM key
= scm_long2num ((long) p
);
2561 /* This critical section barrier will be replaced by a mutex. */
2564 handle
= scm_hashv_create_handle_x (scm_gc_registered_roots
, key
, SCM_MAKINUM (0));
2565 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), SCM_MAKINUM (1)));
2571 scm_gc_unregister_root (SCM
*p
)
2574 SCM key
= scm_long2num ((long) p
);
2576 /* This critical section barrier will be replaced by a mutex. */
2579 handle
= scm_hashv_get_handle (scm_gc_registered_roots
, key
);
2581 if (SCM_FALSEP (handle
))
2583 fprintf (stderr
, "scm_gc_unregister_root called on unregistered root\n");
2588 SCM count
= scm_difference (SCM_CDR (handle
), SCM_MAKINUM (1));
2589 if (SCM_EQ_P (count
, SCM_MAKINUM (0)))
2590 scm_hashv_remove_x (scm_gc_registered_roots
, key
);
2592 SCM_SETCDR (handle
, count
);
2599 scm_gc_register_roots (SCM
*b
, unsigned long n
)
2602 for (; p
< b
+ n
; ++p
)
2603 scm_gc_register_root (p
);
2607 scm_gc_unregister_roots (SCM
*b
, unsigned long n
)
2610 for (; p
< b
+ n
; ++p
)
2611 scm_gc_unregister_root (p
);
2616 /* called on process termination. */
2622 extern int on_exit (void (*procp
) (), int arg
);
2625 cleanup (int status
, void *arg
)
2627 #error Dont know how to setup a cleanup handler on your system.
2632 scm_flush_all_ports ();
2637 make_initial_segment (size_t init_heap_size
, scm_t_freelist
*freelist
)
2639 size_t rounded_size
= round_to_cluster_size (freelist
, init_heap_size
);
2641 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2645 rounded_size
= round_to_cluster_size (freelist
, SCM_HEAP_SEG_SIZE
);
2646 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2654 if (freelist
->min_yield_fraction
)
2655 freelist
->min_yield
= (freelist
->heap_size
* freelist
->min_yield_fraction
2657 freelist
->grow_heap_p
= (freelist
->heap_size
< freelist
->min_yield
);
2664 init_freelist (scm_t_freelist
*freelist
,
2669 freelist
->clusters
= SCM_EOL
;
2670 freelist
->cluster_size
= cluster_size
+ 1;
2671 freelist
->left_to_collect
= 0;
2672 freelist
->clusters_allocated
= 0;
2673 freelist
->min_yield
= 0;
2674 freelist
->min_yield_fraction
= min_yield
;
2675 freelist
->span
= span
;
2676 freelist
->collected
= 0;
2677 freelist
->collected_1
= 0;
2678 freelist
->heap_size
= 0;
2682 /* Get an integer from an environment variable. */
2684 scm_i_getenv_int (const char *var
, int def
)
2686 char *end
, *val
= getenv (var
);
2690 res
= strtol (val
, &end
, 10);
2700 unsigned long gc_trigger_1
;
2701 unsigned long gc_trigger_2
;
2702 size_t init_heap_size_1
;
2703 size_t init_heap_size_2
;
2706 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2707 scm_tc16_allocated
= scm_make_smob_type ("allocated cell", 0);
2708 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2710 j
= SCM_NUM_PROTECTS
;
2712 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2715 scm_freelist
= SCM_EOL
;
2716 scm_freelist2
= SCM_EOL
;
2717 gc_trigger_1
= scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1
);
2718 init_freelist (&scm_master_freelist
, 1, SCM_CLUSTER_SIZE_1
, gc_trigger_1
);
2719 gc_trigger_2
= scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2
);
2720 init_freelist (&scm_master_freelist2
, 2, SCM_CLUSTER_SIZE_2
, gc_trigger_2
);
2721 scm_max_segment_size
= scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size
);
2725 j
= SCM_HEAP_SEG_SIZE
;
2726 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2727 scm_heap_table
= ((scm_t_heap_seg_data
*)
2728 scm_must_malloc (sizeof (scm_t_heap_seg_data
) * 2, "hplims"));
2729 heap_segment_table_size
= 2;
2731 mark_space_ptr
= &mark_space_head
;
2733 init_heap_size_1
= scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1
);
2734 init_heap_size_2
= scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2
);
2735 if (make_initial_segment (init_heap_size_1
, &scm_master_freelist
) ||
2736 make_initial_segment (init_heap_size_2
, &scm_master_freelist2
))
2739 /* scm_hplims[0] can change. do not remove scm_heap_org */
2740 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0], 1);
2742 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2743 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2744 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2745 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2746 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2748 /* Initialise the list of ports. */
2749 scm_port_table
= (scm_t_port
**)
2750 malloc (sizeof (scm_t_port
*) * scm_port_table_room
);
2751 if (!scm_port_table
)
2758 on_exit (cleanup
, 0);
2762 scm_stand_in_procs
= SCM_EOL
;
2763 scm_permobjs
= SCM_EOL
;
2764 scm_protects
= scm_c_make_hash_table (31);
2765 scm_gc_registered_roots
= scm_c_make_hash_table (31);
2772 SCM scm_after_gc_hook
;
2774 static SCM gc_async
;
2776 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2777 * is run after the gc, as soon as the asynchronous events are handled by the
2781 gc_async_thunk (void)
2783 scm_c_run_hook (scm_after_gc_hook
, SCM_EOL
);
2784 return SCM_UNSPECIFIED
;
2788 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2789 * the garbage collection. The only purpose of this function is to mark the
2790 * gc_async (which will eventually lead to the execution of the
2794 mark_gc_async (void * hook_data SCM_UNUSED
,
2795 void *func_data SCM_UNUSED
,
2796 void *data SCM_UNUSED
)
2798 /* If cell access debugging is enabled, the user may choose to perform
2799 * additional garbage collections after an arbitrary number of cell
2800 * accesses. We don't want the scheme level after-gc-hook to be performed
2801 * for each of these garbage collections for the following reason: The
2802 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2803 * after-gc-hook was performed with every gc, and if the gc was performed
2804 * after a very small number of cell accesses, then the number of cell
2805 * accesses during the execution of the after-gc-hook will suffice to cause
2806 * the execution of the next gc. Then, guile would keep executing the
2807 * after-gc-hook over and over again, and would never come to do other
2810 * To overcome this problem, if cell access debugging with additional
2811 * garbage collections is enabled, the after-gc-hook is never run by the
2812 * garbage collecter. When running guile with cell access debugging and the
2813 * execution of the after-gc-hook is desired, then it is necessary to run
2814 * the hook explicitly from the user code. This has the effect, that from
2815 * the scheme level point of view it seems that garbage collection is
2816 * performed with a much lower frequency than it actually is. Obviously,
2817 * this will not work for code that depends on a fixed one to one
2818 * relationship between the execution counts of the C level garbage
2819 * collection hooks and the execution count of the scheme level
2822 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2823 if (debug_cells_gc_interval
== 0)
2824 scm_system_async_mark (gc_async
);
2826 scm_system_async_mark (gc_async
);
2838 scm_after_gc_hook
= scm_permanent_object (scm_make_hook (SCM_INUM0
));
2839 scm_c_define ("after-gc-hook", scm_after_gc_hook
);
2841 after_gc_thunk
= scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0
,
2843 gc_async
= scm_system_async (after_gc_thunk
); /* protected via scm_asyncs */
2845 scm_c_hook_add (&scm_after_gc_c_hook
, mark_gc_async
, NULL
, 0);
2847 #ifndef SCM_MAGIC_SNARFER
2848 #include "libguile/gc.x"
2852 #endif /*MARK_DEPENDENCIES*/