1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
43 /* #define DEBUGINFO */
45 /* SECTION: This code is compiled once.
48 #ifndef MARK_DEPENDENCIES
55 #include "libguile/_scm.h"
56 #include "libguile/eval.h"
57 #include "libguile/stime.h"
58 #include "libguile/stackchk.h"
59 #include "libguile/struct.h"
60 #include "libguile/smob.h"
61 #include "libguile/unif.h"
62 #include "libguile/async.h"
63 #include "libguile/ports.h"
64 #include "libguile/root.h"
65 #include "libguile/strings.h"
66 #include "libguile/vectors.h"
67 #include "libguile/weaks.h"
68 #include "libguile/hashtab.h"
69 #include "libguile/tags.h"
71 #include "libguile/validate.h"
72 #include "libguile/deprecation.h"
73 #include "libguile/gc.h"
75 #ifdef GUILE_DEBUG_MALLOC
76 #include "libguile/debug-malloc.h"
89 #define var_start(x, y) va_start(x, y)
92 #define var_start(x, y) va_start(x)
97 #define CELL_P(x) (SCM_ITAG3 (x) == scm_tc3_cons)
99 unsigned int scm_gc_running_p
= 0;
103 #if (SCM_DEBUG_CELL_ACCESSES == 1)
105 scm_t_bits scm_tc16_allocated
;
107 /* Set this to != 0 if every cell that is accessed shall be checked:
109 unsigned int scm_debug_cell_accesses_p
= 1;
111 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
112 * the number of cell accesses after which a gc shall be called.
114 static unsigned int debug_cells_gc_interval
= 0;
117 /* Assert that the given object is a valid reference to a valid cell. This
118 * test involves to determine whether the object is a cell pointer, whether
119 * this pointer actually points into a heap segment and whether the cell
120 * pointed to is not a free cell. Further, additional garbage collections may
121 * get executed after a user defined number of cell accesses. This helps to
122 * find places in the C code where references are dropped for extremely short
126 scm_assert_cell_valid (SCM cell
)
128 static unsigned int already_running
= 0;
130 if (scm_debug_cell_accesses_p
&& !already_running
)
132 already_running
= 1; /* set to avoid recursion */
134 if (!scm_cellp (cell
))
136 fprintf (stderr
, "scm_assert_cell_valid: Not a cell object: %lux\n",
137 (unsigned long) SCM_UNPACK (cell
));
140 else if (!scm_gc_running_p
)
142 /* Dirk::FIXME:: During garbage collection there occur references to
143 free cells. This is allright during conservative marking, but
144 should not happen otherwise (I think). The case of free cells
145 accessed during conservative marking is handled in function
146 scm_mark_locations. However, there still occur accesses to free
147 cells during gc. I don't understand why this happens. If it is
148 a bug and gets fixed, the following test should also work while
151 if (SCM_FREE_CELL_P (cell
))
153 fprintf (stderr
, "scm_assert_cell_valid: Accessing free cell: %lux\n",
154 (unsigned long) SCM_UNPACK (cell
));
158 /* If desired, perform additional garbage collections after a user
159 * defined number of cell accesses.
161 if (debug_cells_gc_interval
)
163 static unsigned int counter
= 0;
171 counter
= debug_cells_gc_interval
;
172 scm_igc ("scm_assert_cell_valid");
176 already_running
= 0; /* re-enable */
181 SCM_DEFINE (scm_set_debug_cell_accesses_x
, "set-debug-cell-accesses!", 1, 0, 0,
183 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
184 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
185 "but no additional calls to garbage collection are issued.\n"
186 "If @var{flag} is a number, cell access checking is enabled,\n"
187 "with an additional garbage collection after the given\n"
188 "number of cell accesses.\n"
189 "This procedure only exists when the compile-time flag\n"
190 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
191 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
193 if (SCM_FALSEP (flag
)) {
194 scm_debug_cell_accesses_p
= 0;
195 } else if (SCM_EQ_P (flag
, SCM_BOOL_T
)) {
196 debug_cells_gc_interval
= 0;
197 scm_debug_cell_accesses_p
= 1;
198 } else if (SCM_INUMP (flag
)) {
199 long int f
= SCM_INUM (flag
);
200 if (f
<= 0) SCM_OUT_OF_RANGE (1, flag
);
201 debug_cells_gc_interval
= f
;
202 scm_debug_cell_accesses_p
= 1;
204 SCM_WRONG_TYPE_ARG (1, flag
);
206 return SCM_UNSPECIFIED
;
210 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
214 /* {heap tuning parameters}
216 * These are parameters for controlling memory allocation. The heap
217 * is the area out of which scm_cons, and object headers are allocated.
219 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
220 * 64 bit machine. The units of the _SIZE parameters are bytes.
221 * Cons pairs and object headers occupy one heap cell.
223 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
224 * allocated initially the heap will grow by half its current size
225 * each subsequent time more heap is needed.
227 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
228 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
229 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
230 * is in scm_init_storage() and alloc_some_heap() in sys.c
232 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
233 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
235 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
238 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
241 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
242 * reclaimed by a GC triggered by must_malloc. If less than this is
243 * reclaimed, the trigger threshold is raised. [I don't know what a
244 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
245 * work around a oscillation that caused almost constant GC.]
249 * Heap size 45000 and 40% min yield gives quick startup and no extra
250 * heap allocation. Having higher values on min yield may lead to
251 * large heaps, especially if code behaviour is varying its
252 * maximum consumption between different freelists.
255 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
256 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
257 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
258 size_t scm_default_init_heap_size_1
= (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER
- 1)
259 / SCM_CARDS_PER_CLUSTER
) * SCM_GC_CARD_SIZE
);
260 int scm_default_min_yield_1
= 40;
262 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
263 size_t scm_default_init_heap_size_2
= (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER
- 1)
264 / SCM_CARDS_PER_CLUSTER
) * SCM_GC_CARD_SIZE
);
265 /* The following value may seem large, but note that if we get to GC at
266 * all, this means that we have a numerically intensive application
268 int scm_default_min_yield_2
= 40;
270 size_t scm_default_max_segment_size
= 2097000L;/* a little less (adm) than 2 Mb */
272 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
274 # define SCM_HEAP_SEG_SIZE 32768L
277 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
279 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
282 /* Make heap grow with factor 1.5 */
283 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
284 #define SCM_INIT_MALLOC_LIMIT 100000
285 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
287 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
288 aligned inner bounds for allocated storage */
291 /*in 386 protected mode we must only adjust the offset */
292 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
293 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
296 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
297 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
299 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
300 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
304 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
306 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
307 #define CLUSTER_SIZE_IN_BYTES(freelist) \
308 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
314 typedef struct scm_t_freelist
{
315 /* collected cells */
317 /* number of cells left to collect before cluster is full */
318 unsigned int left_to_collect
;
319 /* number of clusters which have been allocated */
320 unsigned int clusters_allocated
;
321 /* a list of freelists, each of size cluster_size,
322 * except the last one which may be shorter
326 /* this is the number of objects in each cluster, including the spine cell */
327 unsigned int cluster_size
;
328 /* indicates that we should grow heap instead of GC:ing
331 /* minimum yield on this list in order not to grow the heap
334 /* defines min_yield as percent of total heap size
336 int min_yield_fraction
;
337 /* number of cells per object on this list */
339 /* number of collected cells during last GC */
340 unsigned long collected
;
341 /* number of collected cells during penultimate GC */
342 unsigned long collected_1
;
343 /* total number of cells in heap segments
344 * belonging to this list.
346 unsigned long heap_size
;
349 SCM scm_freelist
= SCM_EOL
;
350 scm_t_freelist scm_master_freelist
= {
351 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_1
, 0, 0, 0, 1, 0, 0, 0
353 SCM scm_freelist2
= SCM_EOL
;
354 scm_t_freelist scm_master_freelist2
= {
355 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_2
, 0, 0, 0, 2, 0, 0, 0
359 * is the number of bytes of must_malloc allocation needed to trigger gc.
361 unsigned long scm_mtrigger
;
364 * If set, don't expand the heap. Set only during gc, during which no allocation
365 * is supposed to take place anyway.
367 int scm_gc_heap_lock
= 0;
370 * Don't pause for collection if this is set -- just
373 int scm_block_gc
= 1;
375 /* During collection, this accumulates objects holding
378 SCM scm_weak_vectors
;
380 /* During collection, this accumulates structures which are to be freed.
382 SCM scm_structs_to_free
;
384 /* GC Statistics Keeping
386 unsigned long scm_cells_allocated
= 0;
387 unsigned long scm_mallocated
= 0;
388 unsigned long scm_gc_cells_collected
;
389 unsigned long scm_gc_yield
;
390 static unsigned long scm_gc_yield_1
= 0; /* previous GC yield */
391 unsigned long scm_gc_malloc_collected
;
392 unsigned long scm_gc_ports_collected
;
393 unsigned long scm_gc_time_taken
= 0;
394 static unsigned long t_before_gc
;
395 static unsigned long t_before_sweep
;
396 unsigned long scm_gc_mark_time_taken
= 0;
397 unsigned long scm_gc_sweep_time_taken
= 0;
398 unsigned long scm_gc_times
= 0;
399 unsigned long scm_gc_cells_swept
= 0;
400 double scm_gc_cells_marked_acc
= 0.;
401 double scm_gc_cells_swept_acc
= 0.;
403 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
404 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
405 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
406 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
407 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
408 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
409 SCM_SYMBOL (sym_gc_mark_time_taken
, "gc-mark-time-taken");
410 SCM_SYMBOL (sym_gc_sweep_time_taken
, "gc-sweep-time-taken");
411 SCM_SYMBOL (sym_times
, "gc-times");
412 SCM_SYMBOL (sym_cells_marked
, "cells-marked");
413 SCM_SYMBOL (sym_cells_swept
, "cells-swept");
415 typedef struct scm_t_heap_seg_data
417 /* lower and upper bounds of the segment */
418 SCM_CELLPTR bounds
[2];
420 /* address of the head-of-freelist pointer for this segment's cells.
421 All segments usually point to the same one, scm_freelist. */
422 scm_t_freelist
*freelist
;
424 /* number of cells per object in this segment */
426 } scm_t_heap_seg_data
;
430 static size_t init_heap_seg (SCM_CELLPTR
, size_t, scm_t_freelist
*);
432 typedef enum { return_on_error
, abort_on_error
} policy_on_error
;
433 static void alloc_some_heap (scm_t_freelist
*, policy_on_error
);
436 #define SCM_HEAP_SIZE \
437 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
438 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
440 #define BVEC_GROW_SIZE 256
441 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
442 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
444 /* mark space allocation */
446 typedef struct scm_t_mark_space
448 scm_t_c_bvec_limb
*bvec_space
;
449 struct scm_t_mark_space
*next
;
452 static scm_t_mark_space
*current_mark_space
;
453 static scm_t_mark_space
**mark_space_ptr
;
454 static ptrdiff_t current_mark_space_offset
;
455 static scm_t_mark_space
*mark_space_head
;
457 static scm_t_c_bvec_limb
*
459 #define FUNC_NAME "get_bvec"
461 scm_t_c_bvec_limb
*res
;
463 if (!current_mark_space
)
465 SCM_SYSCALL (current_mark_space
= (scm_t_mark_space
*) malloc (sizeof (scm_t_mark_space
)));
466 if (!current_mark_space
)
467 SCM_MISC_ERROR ("could not grow heap", SCM_EOL
);
469 current_mark_space
->bvec_space
= NULL
;
470 current_mark_space
->next
= NULL
;
472 *mark_space_ptr
= current_mark_space
;
473 mark_space_ptr
= &(current_mark_space
->next
);
478 if (!(current_mark_space
->bvec_space
))
480 SCM_SYSCALL (current_mark_space
->bvec_space
=
481 (scm_t_c_bvec_limb
*) calloc (BVEC_GROW_SIZE_IN_BYTES
, 1));
482 if (!(current_mark_space
->bvec_space
))
483 SCM_MISC_ERROR ("could not grow heap", SCM_EOL
);
485 current_mark_space_offset
= 0;
490 if (current_mark_space_offset
== BVEC_GROW_SIZE_IN_LIMBS
)
492 current_mark_space
= NULL
;
497 res
= current_mark_space
->bvec_space
+ current_mark_space_offset
;
498 current_mark_space_offset
+= SCM_GC_CARD_BVEC_SIZE_IN_LIMBS
;
508 scm_t_mark_space
*ms
;
510 for (ms
= mark_space_head
; ms
; ms
= ms
->next
)
511 memset (ms
->bvec_space
, 0, BVEC_GROW_SIZE_IN_BYTES
);
516 /* Debugging functions. */
518 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
520 static long int heap_segment (SCM obj
); /* forw decl: non-debugging func */
523 map_free_list (scm_t_freelist
*master
, SCM freelist
)
525 long last_seg
= -1, count
= 0;
528 for (f
= freelist
; !SCM_NULLP (f
); f
= SCM_FREE_CELL_CDR (f
))
530 long int this_seg
= heap_segment (f
);
535 "map_free_list: can't find segment containing cell %lux\n",
536 (unsigned long int) SCM_UNPACK (f
));
539 else if (this_seg
!= last_seg
)
542 fprintf (stderr
, " %5ld %d-cells in segment %ld\n",
543 (long) count
, master
->span
, (long) last_seg
);
550 fprintf (stderr
, " %5ld %d-cells in segment %ld\n",
551 (long) count
, master
->span
, (long) last_seg
);
554 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
556 "Print debugging information about the free-list.\n"
557 "@code{map-free-list} is only included in\n"
558 "@code{--enable-guile-debug} builds of Guile.")
559 #define FUNC_NAME s_scm_map_free_list
563 fprintf (stderr
, "%ld segments total (%d:%ld",
564 (long) scm_n_heap_segs
,
565 scm_heap_table
[0].span
,
566 (long) (scm_heap_table
[0].bounds
[1] - scm_heap_table
[0].bounds
[0]));
568 for (i
= 1; i
!= scm_n_heap_segs
; i
++)
569 fprintf (stderr
, ", %d:%ld",
570 scm_heap_table
[i
].span
,
571 (long) (scm_heap_table
[i
].bounds
[1] - scm_heap_table
[i
].bounds
[0]));
572 fprintf (stderr
, ")\n");
573 map_free_list (&scm_master_freelist
, scm_freelist
);
574 map_free_list (&scm_master_freelist2
, scm_freelist2
);
577 return SCM_UNSPECIFIED
;
581 static long last_cluster
;
582 static long last_size
;
585 free_list_length (char *title
, long i
, SCM freelist
)
589 for (ls
= freelist
; !SCM_NULLP (ls
); ls
= SCM_FREE_CELL_CDR (ls
))
590 if (SCM_FREE_CELL_P (ls
))
594 fprintf (stderr
, "bad cell in %s at position %ld\n", title
, (long) n
);
601 if (last_cluster
== i
- 1)
602 fprintf (stderr
, "\t%ld\n", (long) last_size
);
604 fprintf (stderr
, "-%ld\t%ld\n", (long) (i
- 1), (long) last_size
);
607 fprintf (stderr
, "%s %ld", title
, (long) i
);
609 fprintf (stderr
, "%s\t%ld\n", title
, (long) n
);
617 free_list_lengths (char *title
, scm_t_freelist
*master
, SCM freelist
)
620 long i
= 0, len
, n
= 0;
621 fprintf (stderr
, "%s\n\n", title
);
622 n
+= free_list_length ("free list", -1, freelist
);
623 for (clusters
= master
->clusters
;
624 SCM_NNULLP (clusters
);
625 clusters
= SCM_CDR (clusters
))
627 len
= free_list_length ("cluster", i
++, SCM_CAR (clusters
));
630 if (last_cluster
== i
- 1)
631 fprintf (stderr
, "\t%ld\n", (long) last_size
);
633 fprintf (stderr
, "-%ld\t%ld\n", (long) (i
- 1), (long) last_size
);
634 fprintf (stderr
, "\ntotal %ld objects\n\n", (long) n
);
637 SCM_DEFINE (scm_free_list_length
, "free-list-length", 0, 0, 0,
639 "Print debugging information about the free-list.\n"
640 "@code{free-list-length} is only included in\n"
641 "@code{--enable-guile-debug} builds of Guile.")
642 #define FUNC_NAME s_scm_free_list_length
644 free_list_lengths ("1-cells", &scm_master_freelist
, scm_freelist
);
645 free_list_lengths ("2-cells", &scm_master_freelist2
, scm_freelist2
);
646 return SCM_UNSPECIFIED
;
650 #endif /* defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) */
652 #ifdef GUILE_DEBUG_FREELIST
654 /* Non-zero if freelist debugging is in effect. Set this via
655 `gc-set-debug-check-freelist!'. */
656 static int scm_debug_check_freelist
= 0;
658 /* Number of calls to SCM_NEWCELL since startup. */
659 static unsigned long scm_newcell_count
;
660 static unsigned long scm_newcell2_count
;
662 /* Search freelist for anything that isn't marked as a free cell.
663 Abort if we find something. */
665 scm_check_freelist (SCM freelist
)
670 for (f
= freelist
; !SCM_NULLP (f
); f
= SCM_FREE_CELL_CDR (f
), i
++)
671 if (!SCM_FREE_CELL_P (f
))
673 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
674 (long) scm_newcell_count
, (long) i
);
679 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
681 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
682 "on each cell allocation. This procedure only exists when the\n"
683 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
684 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
686 /* [cmm] I did a double-take when I read this code the first time.
688 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
689 return SCM_UNSPECIFIED
;
695 scm_debug_newcell (void)
700 if (scm_debug_check_freelist
)
702 scm_check_freelist (scm_freelist
);
706 /* The rest of this is supposed to be identical to the SCM_NEWCELL
708 if (SCM_NULLP (scm_freelist
))
710 new = scm_gc_for_newcell (&scm_master_freelist
, &scm_freelist
);
711 SCM_GC_SET_ALLOCATED (new);
716 scm_freelist
= SCM_FREE_CELL_CDR (scm_freelist
);
717 SCM_GC_SET_ALLOCATED (new);
724 scm_debug_newcell2 (void)
728 scm_newcell2_count
++;
729 if (scm_debug_check_freelist
)
731 scm_check_freelist (scm_freelist2
);
735 /* The rest of this is supposed to be identical to the SCM_NEWCELL
737 if (SCM_NULLP (scm_freelist2
))
739 new = scm_gc_for_newcell (&scm_master_freelist2
, &scm_freelist2
);
740 SCM_GC_SET_ALLOCATED (new);
745 scm_freelist2
= SCM_FREE_CELL_CDR (scm_freelist2
);
746 SCM_GC_SET_ALLOCATED (new);
752 #endif /* GUILE_DEBUG_FREELIST */
757 master_cells_allocated (scm_t_freelist
*master
)
759 /* the '- 1' below is to ignore the cluster spine cells. */
760 long objects
= master
->clusters_allocated
* (master
->cluster_size
- 1);
761 if (SCM_NULLP (master
->clusters
))
762 objects
-= master
->left_to_collect
;
763 return master
->span
* objects
;
767 freelist_length (SCM freelist
)
770 for (n
= 0; !SCM_NULLP (freelist
); freelist
= SCM_FREE_CELL_CDR (freelist
))
776 compute_cells_allocated ()
778 return (scm_cells_allocated
779 + master_cells_allocated (&scm_master_freelist
)
780 + master_cells_allocated (&scm_master_freelist2
)
781 - scm_master_freelist
.span
* freelist_length (scm_freelist
)
782 - scm_master_freelist2
.span
* freelist_length (scm_freelist2
));
785 /* {Scheme Interface to GC}
788 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
790 "Return an association list of statistics about Guile's current\n"
792 #define FUNC_NAME s_scm_gc_stats
797 unsigned long int local_scm_mtrigger
;
798 unsigned long int local_scm_mallocated
;
799 unsigned long int local_scm_heap_size
;
800 unsigned long int local_scm_cells_allocated
;
801 unsigned long int local_scm_gc_time_taken
;
802 unsigned long int local_scm_gc_times
;
803 unsigned long int local_scm_gc_mark_time_taken
;
804 unsigned long int local_scm_gc_sweep_time_taken
;
805 double local_scm_gc_cells_swept
;
806 double local_scm_gc_cells_marked
;
816 for (i
= scm_n_heap_segs
; i
--; )
817 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
818 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
820 if (scm_n_heap_segs
!= n
)
825 /* Below, we cons to produce the resulting list. We want a snapshot of
826 * the heap situation before consing.
828 local_scm_mtrigger
= scm_mtrigger
;
829 local_scm_mallocated
= scm_mallocated
;
830 local_scm_heap_size
= SCM_HEAP_SIZE
;
831 local_scm_cells_allocated
= compute_cells_allocated ();
832 local_scm_gc_time_taken
= scm_gc_time_taken
;
833 local_scm_gc_mark_time_taken
= scm_gc_mark_time_taken
;
834 local_scm_gc_sweep_time_taken
= scm_gc_sweep_time_taken
;
835 local_scm_gc_times
= scm_gc_times
;
836 local_scm_gc_cells_swept
= scm_gc_cells_swept_acc
;
837 local_scm_gc_cells_marked
= scm_gc_cells_marked_acc
;
839 answer
= scm_list_n (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
840 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
841 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
842 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
843 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
844 scm_cons (sym_times
, scm_ulong2num (local_scm_gc_times
)),
845 scm_cons (sym_gc_mark_time_taken
, scm_ulong2num (local_scm_gc_mark_time_taken
)),
846 scm_cons (sym_gc_sweep_time_taken
, scm_ulong2num (local_scm_gc_sweep_time_taken
)),
847 scm_cons (sym_cells_marked
, scm_i_dbl2big (local_scm_gc_cells_marked
)),
848 scm_cons (sym_cells_swept
, scm_i_dbl2big (local_scm_gc_cells_swept
)),
849 scm_cons (sym_heap_segments
, heap_segs
),
858 gc_start_stats (const char *what SCM_UNUSED
)
860 t_before_gc
= scm_c_get_internal_run_time ();
861 scm_gc_cells_swept
= 0;
862 scm_gc_cells_collected
= 0;
863 scm_gc_yield_1
= scm_gc_yield
;
864 scm_gc_yield
= (scm_cells_allocated
865 + master_cells_allocated (&scm_master_freelist
)
866 + master_cells_allocated (&scm_master_freelist2
));
867 scm_gc_malloc_collected
= 0;
868 scm_gc_ports_collected
= 0;
875 unsigned long t
= scm_c_get_internal_run_time ();
876 scm_gc_time_taken
+= (t
- t_before_gc
);
877 scm_gc_sweep_time_taken
+= (t
- t_before_sweep
);
880 scm_gc_cells_marked_acc
+= scm_gc_cells_swept
- scm_gc_cells_collected
;
881 scm_gc_cells_swept_acc
+= scm_gc_cells_swept
;
885 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
887 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
888 "returned by this function for @var{obj}")
889 #define FUNC_NAME s_scm_object_address
891 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj
));
896 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
898 "Scans all of SCM objects and reclaims for further use those that are\n"
899 "no longer accessible.")
900 #define FUNC_NAME s_scm_gc
905 return SCM_UNSPECIFIED
;
911 /* {C Interface For When GC is Triggered}
915 adjust_min_yield (scm_t_freelist
*freelist
)
917 /* min yield is adjusted upwards so that next predicted total yield
918 * (allocated cells actually freed by GC) becomes
919 * `min_yield_fraction' of total heap size. Note, however, that
920 * the absolute value of min_yield will correspond to `collected'
921 * on one master (the one which currently is triggering GC).
923 * The reason why we look at total yield instead of cells collected
924 * on one list is that we want to take other freelists into account.
925 * On this freelist, we know that (local) yield = collected cells,
926 * but that's probably not the case on the other lists.
928 * (We might consider computing a better prediction, for example
929 * by computing an average over multiple GC:s.)
931 if (freelist
->min_yield_fraction
)
933 /* Pick largest of last two yields. */
934 long delta
= ((SCM_HEAP_SIZE
* freelist
->min_yield_fraction
/ 100)
935 - (long) SCM_MAX (scm_gc_yield_1
, scm_gc_yield
));
937 fprintf (stderr
, " after GC = %lu, delta = %ld\n",
938 (long) scm_cells_allocated
,
942 freelist
->min_yield
+= delta
;
947 /* When we get POSIX threads support, the master will be global and
948 * common while the freelist will be individual for each thread.
952 scm_gc_for_newcell (scm_t_freelist
*master
, SCM
*freelist
)
958 if (SCM_NULLP (master
->clusters
))
960 if (master
->grow_heap_p
|| scm_block_gc
)
962 /* In order to reduce gc frequency, try to allocate a new heap
963 * segment first, even if gc might find some free cells. If we
964 * can't obtain a new heap segment, we will try gc later.
966 master
->grow_heap_p
= 0;
967 alloc_some_heap (master
, return_on_error
);
969 if (SCM_NULLP (master
->clusters
))
971 /* The heap was not grown, either because it wasn't scheduled to
972 * grow, or because there was not enough memory available. In
973 * both cases we have to try gc to get some free cells.
976 fprintf (stderr
, "allocated = %lu, ",
977 (long) (scm_cells_allocated
978 + master_cells_allocated (&scm_master_freelist
)
979 + master_cells_allocated (&scm_master_freelist2
)));
982 adjust_min_yield (master
);
983 if (SCM_NULLP (master
->clusters
))
985 /* gc could not free any cells. Now, we _must_ allocate a
986 * new heap segment, because there is no other possibility
987 * to provide a new cell for the caller.
989 alloc_some_heap (master
, abort_on_error
);
993 cell
= SCM_CAR (master
->clusters
);
994 master
->clusters
= SCM_CDR (master
->clusters
);
995 ++master
->clusters_allocated
;
997 while (SCM_NULLP (cell
));
999 #ifdef GUILE_DEBUG_FREELIST
1000 scm_check_freelist (cell
);
1003 --scm_ints_disabled
;
1004 *freelist
= SCM_FREE_CELL_CDR (cell
);
1010 /* This is a support routine which can be used to reserve a cluster
1011 * for some special use, such as debugging. It won't be useful until
1012 * free cells are preserved between garbage collections.
1016 scm_alloc_cluster (scm_t_freelist
*master
)
1019 cell
= scm_gc_for_newcell (master
, &freelist
);
1020 SCM_SETCDR (cell
, freelist
);
1026 scm_t_c_hook scm_before_gc_c_hook
;
1027 scm_t_c_hook scm_before_mark_c_hook
;
1028 scm_t_c_hook scm_before_sweep_c_hook
;
1029 scm_t_c_hook scm_after_sweep_c_hook
;
1030 scm_t_c_hook scm_after_gc_c_hook
;
1034 scm_igc (const char *what
)
1039 scm_c_hook_run (&scm_before_gc_c_hook
, 0);
1042 SCM_NULLP (scm_freelist
)
1044 : (SCM_NULLP (scm_freelist2
) ? "o" : "m"));
1046 /* During the critical section, only the current thread may run. */
1047 SCM_CRITICAL_SECTION_START
;
1049 if (!scm_stack_base
|| scm_block_gc
)
1055 gc_start_stats (what
);
1057 if (scm_gc_heap_lock
)
1058 /* We've invoked the collector while a GC is already in progress.
1059 That should never happen. */
1064 scm_c_hook_run (&scm_before_mark_c_hook
, 0);
1066 clear_mark_space ();
1070 /* Mark objects on the C stack. */
1071 SCM_FLUSH_REGISTER_WINDOWS
;
1072 /* This assumes that all registers are saved into the jmp_buf */
1073 setjmp (scm_save_regs_gc_mark
);
1074 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
1075 ( (size_t) (sizeof (SCM_STACKITEM
) - 1 +
1076 sizeof scm_save_regs_gc_mark
)
1077 / sizeof (SCM_STACKITEM
)));
1080 unsigned long stack_len
= scm_stack_size (scm_stack_base
);
1081 #ifdef SCM_STACK_GROWS_UP
1082 scm_mark_locations (scm_stack_base
, stack_len
);
1084 scm_mark_locations (scm_stack_base
- stack_len
, stack_len
);
1088 #else /* USE_THREADS */
1090 /* Mark every thread's stack and registers */
1091 scm_threads_mark_stacks ();
1093 #endif /* USE_THREADS */
1095 j
= SCM_NUM_PROTECTS
;
1097 scm_gc_mark (scm_sys_protects
[j
]);
1099 /* mark the registered roots */
1102 for (i
= 0; i
< SCM_VECTOR_LENGTH (scm_gc_registered_roots
); ++i
) {
1103 SCM l
= SCM_VELTS (scm_gc_registered_roots
)[i
];
1104 for (; !SCM_NULLP (l
); l
= SCM_CDR (l
)) {
1105 SCM
*p
= (SCM
*) (scm_num2long (SCM_CAAR (l
), 0, NULL
));
1111 /* FIXME: we should have a means to register C functions to be run
1112 * in different phases of GC
1114 scm_mark_subr_table ();
1117 scm_gc_mark (scm_root
->handle
);
1120 t_before_sweep
= scm_c_get_internal_run_time ();
1121 scm_gc_mark_time_taken
+= (t_before_sweep
- t_before_gc
);
1123 scm_c_hook_run (&scm_before_sweep_c_hook
, 0);
1127 scm_c_hook_run (&scm_after_sweep_c_hook
, 0);
1132 SCM_CRITICAL_SECTION_END
;
1133 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
1142 #define MARK scm_gc_mark
1143 #define FNAME "scm_gc_mark"
1145 #endif /*!MARK_DEPENDENCIES*/
1147 /* Mark an object precisely.
1151 #define FUNC_NAME FNAME
1155 scm_t_bits cell_type
;
1157 #ifndef MARK_DEPENDENCIES
1158 # define RECURSE scm_gc_mark
1160 /* go through the usual marking, but not for self-cycles. */
1161 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1165 #ifdef MARK_DEPENDENCIES
1166 goto gc_mark_loop_first_time
;
1169 /* A simple hack for debugging. Chose the second branch to get a
1170 meaningful backtrace for crashes inside the GC.
1173 #define goto_gc_mark_loop goto gc_mark_loop
1174 #define goto_gc_mark_nimp goto gc_mark_nimp
1176 #define goto_gc_mark_loop RECURSE(ptr); return
1177 #define goto_gc_mark_nimp RECURSE(ptr); return
1186 #ifdef MARK_DEPENDENCIES
1187 if (SCM_EQ_P (ptr
, p
))
1193 gc_mark_loop_first_time
:
1196 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1197 /* We are in debug mode. Check the ptr exhaustively. */
1198 if (!scm_cellp (ptr
))
1199 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL
);
1201 /* In non-debug mode, do at least some cheap testing. */
1203 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL
);
1206 #ifndef MARK_DEPENDENCIES
1208 if (SCM_GCMARKP (ptr
))
1211 SCM_SETGCMARK (ptr
);
1215 cell_type
= SCM_GC_CELL_TYPE (ptr
);
1216 switch (SCM_ITAG7 (cell_type
))
1218 case scm_tcs_cons_nimcar
:
1219 if (SCM_IMP (SCM_CDR (ptr
)))
1221 ptr
= SCM_CAR (ptr
);
1224 RECURSE (SCM_CAR (ptr
));
1225 ptr
= SCM_CDR (ptr
);
1227 case scm_tcs_cons_imcar
:
1228 ptr
= SCM_CDR (ptr
);
1231 RECURSE (SCM_SETTER (ptr
));
1232 ptr
= SCM_PROCEDURE (ptr
);
1234 case scm_tcs_struct
:
1236 /* XXX - use less explicit code. */
1237 scm_t_bits word0
= SCM_CELL_WORD_0 (ptr
) - scm_tc3_struct
;
1238 scm_t_bits
* vtable_data
= (scm_t_bits
*) word0
;
1239 SCM layout
= SCM_PACK (vtable_data
[scm_vtable_index_layout
]);
1240 long len
= SCM_SYMBOL_LENGTH (layout
);
1241 char * fields_desc
= SCM_SYMBOL_CHARS (layout
);
1242 scm_t_bits
* struct_data
= (scm_t_bits
*) SCM_STRUCT_DATA (ptr
);
1244 if (vtable_data
[scm_struct_i_flags
] & SCM_STRUCTF_ENTITY
)
1246 RECURSE (SCM_PACK (struct_data
[scm_struct_i_procedure
]));
1247 RECURSE (SCM_PACK (struct_data
[scm_struct_i_setter
]));
1253 for (x
= 0; x
< len
- 2; x
+= 2, ++struct_data
)
1254 if (fields_desc
[x
] == 'p')
1255 RECURSE (SCM_PACK (*struct_data
));
1256 if (fields_desc
[x
] == 'p')
1258 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
1259 for (x
= *struct_data
++; x
; --x
, ++struct_data
)
1260 RECURSE (SCM_PACK (*struct_data
));
1262 RECURSE (SCM_PACK (*struct_data
));
1266 ptr
= SCM_PACK (vtable_data
[scm_vtable_index_vtable
]);
1270 case scm_tcs_closures
:
1271 if (SCM_IMP (SCM_ENV (ptr
)))
1273 ptr
= SCM_CLOSCAR (ptr
);
1276 RECURSE (SCM_CLOSCAR (ptr
));
1277 ptr
= SCM_ENV (ptr
);
1279 case scm_tc7_vector
:
1280 i
= SCM_VECTOR_LENGTH (ptr
);
1284 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
1285 RECURSE (SCM_VELTS (ptr
)[i
]);
1286 ptr
= SCM_VELTS (ptr
)[0];
1291 size_t i
= SCM_CCLO_LENGTH (ptr
);
1293 for (j
= 1; j
!= i
; ++j
)
1295 SCM obj
= SCM_CCLO_REF (ptr
, j
);
1299 ptr
= SCM_CCLO_REF (ptr
, 0);
1305 case scm_tc7_byvect
:
1312 #ifdef HAVE_LONG_LONGS
1313 case scm_tc7_llvect
:
1316 case scm_tc7_string
:
1320 SCM_SET_WVECT_GC_CHAIN (ptr
, scm_weak_vectors
);
1321 scm_weak_vectors
= ptr
;
1322 if (SCM_IS_WHVEC_ANY (ptr
))
1329 len
= SCM_VECTOR_LENGTH (ptr
);
1330 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
1331 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
1333 for (x
= 0; x
< len
; ++x
)
1336 alist
= SCM_VELTS (ptr
)[x
];
1338 /* mark everything on the alist except the keys or
1339 * values, according to weak_values and weak_keys. */
1340 while ( SCM_CONSP (alist
)
1341 && !SCM_GCMARKP (alist
)
1342 && SCM_CONSP (SCM_CAR (alist
)))
1347 kvpair
= SCM_CAR (alist
);
1348 next_alist
= SCM_CDR (alist
);
1351 * SCM_SETGCMARK (alist);
1352 * SCM_SETGCMARK (kvpair);
1354 * It may be that either the key or value is protected by
1355 * an escaped reference to part of the spine of this alist.
1356 * If we mark the spine here, and only mark one or neither of the
1357 * key and value, they may never be properly marked.
1358 * This leads to a horrible situation in which an alist containing
1359 * freelist cells is exported.
1361 * So only mark the spines of these arrays last of all marking.
1362 * If somebody confuses us by constructing a weak vector
1363 * with a circular alist then we are hosed, but at least we
1364 * won't prematurely drop table entries.
1367 RECURSE (SCM_CAR (kvpair
));
1369 RECURSE (SCM_CDR (kvpair
));
1372 if (SCM_NIMP (alist
))
1378 case scm_tc7_symbol
:
1379 ptr
= SCM_PROP_SLOTS (ptr
);
1381 case scm_tc7_variable
:
1382 ptr
= SCM_CELL_OBJECT_1 (ptr
);
1387 i
= SCM_PTOBNUM (ptr
);
1388 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1389 if (!(i
< scm_numptob
))
1390 SCM_MISC_ERROR ("undefined port type", SCM_EOL
);
1392 if (SCM_PTAB_ENTRY(ptr
))
1393 RECURSE (SCM_FILENAME (ptr
));
1394 if (scm_ptobs
[i
].mark
)
1396 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1403 switch (SCM_TYP16 (ptr
))
1404 { /* should be faster than going through scm_smobs */
1405 case scm_tc_free_cell
:
1406 /* We have detected a free cell. This can happen if non-object data
1407 * on the C stack points into guile's heap and is scanned during
1408 * conservative marking. */
1412 case scm_tc16_complex
:
1415 i
= SCM_SMOBNUM (ptr
);
1416 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1417 if (!(i
< scm_numsmob
))
1418 SCM_MISC_ERROR ("undefined smob type", SCM_EOL
);
1420 if (scm_smobs
[i
].mark
)
1422 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1430 SCM_MISC_ERROR ("unknown type", SCM_EOL
);
1436 #ifndef MARK_DEPENDENCIES
1441 /* And here we define `scm_gc_mark_dependencies', by including this
1442 * same file in itself.
1444 #define MARK scm_gc_mark_dependencies
1445 #define FNAME "scm_gc_mark_dependencies"
1446 #define MARK_DEPENDENCIES
1448 #undef MARK_DEPENDENCIES
1453 /* Determine whether the given value does actually represent a cell in some
1454 * heap segment. If this is the case, the number of the heap segment is
1455 * returned. Otherwise, -1 is returned. Binary search is used in order to
1456 * determine the heap segment that contains the cell.*/
1457 /* FIXME: To be used within scm_mark_locations and scm_cellp this function
1458 * should be an inline function. */
1460 heap_segment (SCM obj
)
1466 SCM_CELLPTR ptr
= SCM2PTR (obj
);
1467 unsigned long int i
= 0;
1468 unsigned long int j
= scm_n_heap_segs
- 1;
1470 if (SCM_PTR_LT (ptr
, scm_heap_table
[i
].bounds
[0]))
1472 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[1], ptr
))
1478 if (SCM_PTR_LT (ptr
, scm_heap_table
[i
].bounds
[1]))
1482 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1489 unsigned long int k
= (i
+ j
) / 2;
1493 else if (SCM_PTR_LT (ptr
, scm_heap_table
[k
].bounds
[1]))
1497 if (SCM_PTR_LT (ptr
, scm_heap_table
[i
].bounds
[0]))
1500 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1504 if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[1], ptr
))
1510 if (!DOUBLECELL_ALIGNED_P (obj
) && scm_heap_table
[i
].span
== 2)
1512 else if (SCM_GC_IN_CARD_HEADERP (ptr
))
1521 /* Mark a region conservatively */
1523 scm_mark_locations (SCM_STACKITEM x
[], unsigned long n
)
1527 for (m
= 0; m
< n
; ++m
)
1529 SCM obj
= * (SCM
*) &x
[m
];
1530 long int segment
= heap_segment (obj
);
1537 /* The function scm_cellp determines whether an SCM value can be regarded as a
1538 * pointer to a cell on the heap.
1541 scm_cellp (SCM value
)
1543 long int segment
= heap_segment (value
);
1544 return (segment
>= 0);
1549 gc_sweep_freelist_start (scm_t_freelist
*freelist
)
1551 freelist
->cells
= SCM_EOL
;
1552 freelist
->left_to_collect
= freelist
->cluster_size
;
1553 freelist
->clusters_allocated
= 0;
1554 freelist
->clusters
= SCM_EOL
;
1555 freelist
->clustertail
= &freelist
->clusters
;
1556 freelist
->collected_1
= freelist
->collected
;
1557 freelist
->collected
= 0;
1561 gc_sweep_freelist_finish (scm_t_freelist
*freelist
)
1564 *freelist
->clustertail
= freelist
->cells
;
1565 if (!SCM_NULLP (freelist
->cells
))
1567 SCM c
= freelist
->cells
;
1568 SCM_SET_CELL_WORD_0 (c
, SCM_FREE_CELL_CDR (c
));
1569 SCM_SET_CELL_WORD_1 (c
, SCM_EOL
);
1570 freelist
->collected
+=
1571 freelist
->span
* (freelist
->cluster_size
- freelist
->left_to_collect
);
1573 scm_gc_cells_collected
+= freelist
->collected
;
1575 /* Although freelist->min_yield is used to test freelist->collected
1576 * (which is the local GC yield for freelist), it is adjusted so
1577 * that *total* yield is freelist->min_yield_fraction of total heap
1578 * size. This means that a too low yield is compensated by more
1579 * heap on the list which is currently doing most work, which is
1580 * just what we want.
1582 collected
= SCM_MAX (freelist
->collected_1
, freelist
->collected
);
1583 freelist
->grow_heap_p
= (collected
< freelist
->min_yield
);
1586 #define NEXT_DATA_CELL(ptr, span) \
1588 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1589 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1590 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1596 #define FUNC_NAME "scm_gc_sweep"
1598 register SCM_CELLPTR ptr
;
1599 register SCM nfreelist
;
1600 register scm_t_freelist
*freelist
;
1601 register unsigned long m
;
1608 gc_sweep_freelist_start (&scm_master_freelist
);
1609 gc_sweep_freelist_start (&scm_master_freelist2
);
1611 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1613 register long left_to_collect
;
1616 /* Unmarked cells go onto the front of the freelist this heap
1617 segment points to. Rather than updating the real freelist
1618 pointer as we go along, we accumulate the new head in
1619 nfreelist. Then, if it turns out that the entire segment is
1620 free, we free (i.e., malloc's free) the whole segment, and
1621 simply don't assign nfreelist back into the real freelist. */
1622 freelist
= scm_heap_table
[i
].freelist
;
1623 nfreelist
= freelist
->cells
;
1624 left_to_collect
= freelist
->left_to_collect
;
1625 span
= scm_heap_table
[i
].span
;
1627 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0], span
);
1628 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1], span
) - ptr
;
1630 /* use only data cells in seg_size */
1631 seg_size
= (seg_size
/ SCM_GC_CARD_N_CELLS
) * (SCM_GC_CARD_N_DATA_CELLS
/ span
) * span
;
1633 scm_gc_cells_swept
+= seg_size
;
1635 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1639 if (SCM_GC_IN_CARD_HEADERP (ptr
))
1645 NEXT_DATA_CELL (nxt
, span
);
1652 scmptr
= PTR2SCM (ptr
);
1654 if (SCM_GCMARKP (scmptr
))
1657 switch SCM_TYP7 (scmptr
)
1659 case scm_tcs_struct
:
1661 /* Structs need to be freed in a special order.
1662 * This is handled by GC C hooks in struct.c.
1664 SCM_SET_STRUCT_GC_CHAIN (scmptr
, scm_structs_to_free
);
1665 scm_structs_to_free
= scmptr
;
1668 case scm_tcs_cons_imcar
:
1669 case scm_tcs_cons_nimcar
:
1670 case scm_tcs_closures
:
1674 case scm_tc7_vector
:
1676 unsigned long int length
= SCM_VECTOR_LENGTH (scmptr
);
1679 m
+= length
* sizeof (scm_t_bits
);
1680 scm_must_free (SCM_VECTOR_BASE (scmptr
));
1686 m
+= (SCM_CCLO_LENGTH (scmptr
) * sizeof (SCM
));
1687 scm_must_free (SCM_CCLO_BASE (scmptr
));
1693 unsigned long int length
= SCM_BITVECTOR_LENGTH (scmptr
);
1696 m
+= sizeof (long) * ((length
+ SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1697 scm_must_free (SCM_BITVECTOR_BASE (scmptr
));
1701 case scm_tc7_byvect
:
1705 #ifdef HAVE_LONG_LONGS
1706 case scm_tc7_llvect
:
1711 m
+= SCM_UVECTOR_LENGTH (scmptr
) * scm_uniform_element_size (scmptr
);
1712 scm_must_free (SCM_UVECTOR_BASE (scmptr
));
1715 case scm_tc7_string
:
1716 m
+= SCM_STRING_LENGTH (scmptr
) + 1;
1717 scm_must_free (SCM_STRING_CHARS (scmptr
));
1719 case scm_tc7_symbol
:
1720 m
+= SCM_SYMBOL_LENGTH (scmptr
) + 1;
1721 scm_must_free (SCM_SYMBOL_CHARS (scmptr
));
1723 case scm_tc7_variable
:
1726 /* the various "subrs" (primitives) are never freed */
1729 if SCM_OPENP (scmptr
)
1731 int k
= SCM_PTOBNUM (scmptr
);
1732 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1733 if (!(k
< scm_numptob
))
1734 SCM_MISC_ERROR ("undefined port type", SCM_EOL
);
1736 /* Keep "revealed" ports alive. */
1737 if (scm_revealed_count (scmptr
) > 0)
1739 /* Yes, I really do mean scm_ptobs[k].free */
1740 /* rather than ftobs[k].close. .close */
1741 /* is for explicit CLOSE-PORT by user */
1742 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1743 SCM_SETSTREAM (scmptr
, 0);
1744 scm_remove_from_port_table (scmptr
);
1745 scm_gc_ports_collected
++;
1746 SCM_CLR_PORT_OPEN_FLAG (scmptr
);
1750 switch SCM_TYP16 (scmptr
)
1752 case scm_tc_free_cell
:
1757 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1758 scm_must_free (SCM_BDIGITS (scmptr
));
1760 #endif /* def SCM_BIGDIG */
1761 case scm_tc16_complex
:
1762 m
+= sizeof (scm_t_complex
);
1763 scm_must_free (SCM_COMPLEX_MEM (scmptr
));
1768 k
= SCM_SMOBNUM (scmptr
);
1769 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1770 if (!(k
< scm_numsmob
))
1771 SCM_MISC_ERROR ("undefined smob type", SCM_EOL
);
1773 if (scm_smobs
[k
].free
)
1774 m
+= (scm_smobs
[k
].free
) (scmptr
);
1780 SCM_MISC_ERROR ("unknown type", SCM_EOL
);
1783 if (!--left_to_collect
)
1785 SCM_SET_CELL_WORD_0 (scmptr
, nfreelist
);
1786 *freelist
->clustertail
= scmptr
;
1787 freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1789 nfreelist
= SCM_EOL
;
1790 freelist
->collected
+= span
* freelist
->cluster_size
;
1791 left_to_collect
= freelist
->cluster_size
;
1795 /* Stick the new cell on the front of nfreelist. It's
1796 critical that we mark this cell as freed; otherwise, the
1797 conservative collector might trace it as some other type
1799 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1800 SCM_SET_FREE_CELL_CDR (scmptr
, nfreelist
);
1805 #ifdef GC_FREE_SEGMENTS
1810 freelist
->heap_size
-= seg_size
;
1811 free ((char *) scm_heap_table
[i
].bounds
[0]);
1812 scm_heap_table
[i
].bounds
[0] = 0;
1813 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1814 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1815 scm_n_heap_segs
-= 1;
1816 i
--; /* We need to scan the segment just moved. */
1819 #endif /* ifdef GC_FREE_SEGMENTS */
1821 /* Update the real freelist pointer to point to the head of
1822 the list of free cells we've built for this segment. */
1823 freelist
->cells
= nfreelist
;
1824 freelist
->left_to_collect
= left_to_collect
;
1827 #ifdef GUILE_DEBUG_FREELIST
1828 scm_map_free_list ();
1832 gc_sweep_freelist_finish (&scm_master_freelist
);
1833 gc_sweep_freelist_finish (&scm_master_freelist2
);
1835 /* When we move to POSIX threads private freelists should probably
1836 be GC-protected instead. */
1837 scm_freelist
= SCM_EOL
;
1838 scm_freelist2
= SCM_EOL
;
1840 scm_cells_allocated
= (SCM_HEAP_SIZE
- scm_gc_cells_collected
);
1841 scm_gc_yield
-= scm_cells_allocated
;
1843 if (scm_mallocated
< m
)
1844 /* The byte count of allocated objects has underflowed. This is
1845 probably because you forgot to report the sizes of objects you
1846 have allocated, by calling scm_done_malloc or some such. When
1847 the GC freed them, it subtracted their size from
1848 scm_mallocated, which underflowed. */
1851 scm_mallocated
-= m
;
1852 scm_gc_malloc_collected
= m
;
1858 /* {Front end to malloc}
1860 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1863 * These functions provide services comparable to malloc, realloc, and
1864 * free. They should be used when allocating memory that will be under
1865 * control of the garbage collector, i.e., if the memory may be freed
1866 * during garbage collection.
1870 * Return newly malloced storage or throw an error.
1872 * The parameter WHAT is a string for error reporting.
1873 * If the threshold scm_mtrigger will be passed by this
1874 * allocation, or if the first call to malloc fails,
1875 * garbage collect -- on the presumption that some objects
1876 * using malloced storage may be collected.
1878 * The limit scm_mtrigger may be raised by this allocation.
1881 scm_must_malloc (size_t size
, const char *what
)
1884 unsigned long nm
= scm_mallocated
+ size
;
1887 /* The byte count of allocated objects has overflowed. This is
1888 probably because you forgot to report the correct size of freed
1889 memory in some of your smob free methods. */
1892 if (nm
<= scm_mtrigger
)
1894 SCM_SYSCALL (ptr
= malloc (size
));
1897 scm_mallocated
= nm
;
1898 #ifdef GUILE_DEBUG_MALLOC
1899 scm_malloc_register (ptr
, what
);
1907 nm
= scm_mallocated
+ size
;
1910 /* The byte count of allocated objects has overflowed. This is
1911 probably because you forgot to report the correct size of freed
1912 memory in some of your smob free methods. */
1915 SCM_SYSCALL (ptr
= malloc (size
));
1918 scm_mallocated
= nm
;
1920 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1921 unsigned long old_trigger
= scm_mtrigger
;
1922 if (nm
> scm_mtrigger
)
1923 scm_mtrigger
= nm
+ nm
/ 2;
1925 scm_mtrigger
+= scm_mtrigger
/ 2;
1926 if (scm_mtrigger
< old_trigger
)
1929 #ifdef GUILE_DEBUG_MALLOC
1930 scm_malloc_register (ptr
, what
);
1936 scm_memory_error (what
);
1941 * is similar to scm_must_malloc.
1944 scm_must_realloc (void *where
,
1952 if (size
<= old_size
)
1955 nm
= scm_mallocated
+ size
- old_size
;
1957 if (nm
< (size
- old_size
))
1958 /* The byte count of allocated objects has overflowed. This is
1959 probably because you forgot to report the correct size of freed
1960 memory in some of your smob free methods. */
1963 if (nm
<= scm_mtrigger
)
1965 SCM_SYSCALL (ptr
= realloc (where
, size
));
1968 scm_mallocated
= nm
;
1969 #ifdef GUILE_DEBUG_MALLOC
1970 scm_malloc_reregister (where
, ptr
, what
);
1978 nm
= scm_mallocated
+ size
- old_size
;
1980 if (nm
< (size
- old_size
))
1981 /* The byte count of allocated objects has overflowed. This is
1982 probably because you forgot to report the correct size of freed
1983 memory in some of your smob free methods. */
1986 SCM_SYSCALL (ptr
= realloc (where
, size
));
1989 scm_mallocated
= nm
;
1990 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1991 unsigned long old_trigger
= scm_mtrigger
;
1992 if (nm
> scm_mtrigger
)
1993 scm_mtrigger
= nm
+ nm
/ 2;
1995 scm_mtrigger
+= scm_mtrigger
/ 2;
1996 if (scm_mtrigger
< old_trigger
)
1999 #ifdef GUILE_DEBUG_MALLOC
2000 scm_malloc_reregister (where
, ptr
, what
);
2005 scm_memory_error (what
);
2009 scm_must_strndup (const char *str
, size_t length
)
2011 char * dst
= scm_must_malloc (length
+ 1, "scm_must_strndup");
2012 memcpy (dst
, str
, length
);
2018 scm_must_strdup (const char *str
)
2020 return scm_must_strndup (str
, strlen (str
));
2024 scm_must_free (void *obj
)
2025 #define FUNC_NAME "scm_must_free"
2027 #ifdef GUILE_DEBUG_MALLOC
2028 scm_malloc_unregister (obj
);
2033 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL
);
2038 /* Announce that there has been some malloc done that will be freed
2039 * during gc. A typical use is for a smob that uses some malloced
2040 * memory but can not get it from scm_must_malloc (for whatever
2041 * reason). When a new object of this smob is created you call
2042 * scm_done_malloc with the size of the object. When your smob free
2043 * function is called, be sure to include this size in the return
2046 * If you can't actually free the memory in the smob free function,
2047 * for whatever reason (like reference counting), you still can (and
2048 * should) report the amount of memory freed when you actually free it.
2049 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2050 * eh? Or even better, call scm_done_free. */
2053 scm_done_malloc (long size
)
2056 if (scm_mallocated
< size
)
2057 /* The byte count of allocated objects has underflowed. This is
2058 probably because you forgot to report the sizes of objects you
2059 have allocated, by calling scm_done_malloc or some such. When
2060 the GC freed them, it subtracted their size from
2061 scm_mallocated, which underflowed. */
2064 unsigned long nm
= scm_mallocated
+ size
;
2066 /* The byte count of allocated objects has overflowed. This is
2067 probably because you forgot to report the correct size of freed
2068 memory in some of your smob free methods. */
2072 scm_mallocated
+= size
;
2074 if (scm_mallocated
> scm_mtrigger
)
2076 scm_igc ("foreign mallocs");
2077 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
2079 if (scm_mallocated
> scm_mtrigger
)
2080 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
2082 scm_mtrigger
+= scm_mtrigger
/ 2;
2088 scm_done_free (long size
)
2091 if (scm_mallocated
< size
)
2092 /* The byte count of allocated objects has underflowed. This is
2093 probably because you forgot to report the sizes of objects you
2094 have allocated, by calling scm_done_malloc or some such. When
2095 the GC freed them, it subtracted their size from
2096 scm_mallocated, which underflowed. */
2099 unsigned long nm
= scm_mallocated
- size
;
2101 /* The byte count of allocated objects has overflowed. This is
2102 probably because you forgot to report the correct size of freed
2103 memory in some of your smob free methods. */
2107 scm_mallocated
-= size
;
2114 * Each heap segment is an array of objects of a particular size.
2115 * Every segment has an associated (possibly shared) freelist.
2116 * A table of segment records is kept that records the upper and
2117 * lower extents of the segment; this is used during the conservative
2118 * phase of gc to identify probably gc roots (because they point
2119 * into valid segments at reasonable offsets). */
2122 * is true if the first segment was smaller than INIT_HEAP_SEG.
2123 * If scm_expmem is set to one, subsequent segment allocations will
2124 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2128 size_t scm_max_segment_size
;
2131 * is the lowest base address of any heap segment.
2133 SCM_CELLPTR scm_heap_org
;
2135 scm_t_heap_seg_data
* scm_heap_table
= 0;
2136 static size_t heap_segment_table_size
= 0;
2137 size_t scm_n_heap_segs
= 0;
2140 * initializes a new heap segment and returns the number of objects it contains.
2142 * The segment origin and segment size in bytes are input parameters.
2143 * The freelist is both input and output.
2145 * This function presumes that the scm_heap_table has already been expanded
2146 * to accomodate a new segment record and that the markbit space was reserved
2147 * for all the cards in this segment.
2150 #define INIT_CARD(card, span) \
2152 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2154 SCM_GC_SET_CARD_DOUBLECELL (card); \
2158 init_heap_seg (SCM_CELLPTR seg_org
, size_t size
, scm_t_freelist
*freelist
)
2160 register SCM_CELLPTR ptr
;
2161 SCM_CELLPTR seg_end
;
2162 size_t new_seg_index
;
2163 ptrdiff_t n_new_cells
;
2164 int span
= freelist
->span
;
2166 if (seg_org
== NULL
)
2169 /* Align the begin ptr up.
2171 ptr
= SCM_GC_CARD_UP (seg_org
);
2173 /* Compute the ceiling on valid object pointers w/in this segment.
2175 seg_end
= SCM_GC_CARD_DOWN ((char *)seg_org
+ size
);
2177 /* Find the right place and insert the segment record.
2180 while (new_seg_index
< scm_n_heap_segs
2181 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
))
2186 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
2187 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
2192 scm_heap_table
[new_seg_index
].span
= span
;
2193 scm_heap_table
[new_seg_index
].freelist
= freelist
;
2194 scm_heap_table
[new_seg_index
].bounds
[0] = ptr
;
2195 scm_heap_table
[new_seg_index
].bounds
[1] = seg_end
;
2198 n_new_cells
= seg_end
- ptr
;
2200 freelist
->heap_size
+= n_new_cells
;
2202 /* Partition objects in this segment into clusters */
2205 SCM
*clusterp
= &clusters
;
2207 NEXT_DATA_CELL (ptr
, span
);
2208 while (ptr
< seg_end
)
2210 scm_cell
*nxt
= ptr
;
2211 scm_cell
*prv
= NULL
;
2212 scm_cell
*last_card
= NULL
;
2213 int n_data_cells
= (SCM_GC_CARD_N_DATA_CELLS
/ span
) * SCM_CARDS_PER_CLUSTER
- 1;
2214 NEXT_DATA_CELL(nxt
, span
);
2216 /* Allocate cluster spine
2218 *clusterp
= PTR2SCM (ptr
);
2219 SCM_SETCAR (*clusterp
, PTR2SCM (nxt
));
2220 clusterp
= SCM_CDRLOC (*clusterp
);
2223 while (n_data_cells
--)
2225 scm_cell
*card
= SCM_GC_CELL_CARD (ptr
);
2226 SCM scmptr
= PTR2SCM (ptr
);
2228 NEXT_DATA_CELL (nxt
, span
);
2231 if (card
!= last_card
)
2233 INIT_CARD (card
, span
);
2237 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
2238 SCM_SET_FREE_CELL_CDR (scmptr
, PTR2SCM (nxt
));
2243 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv
), SCM_EOL
);
2248 scm_cell
*ref
= seg_end
;
2249 NEXT_DATA_CELL (ref
, span
);
2251 /* [cmm] looks like the segment size doesn't divide cleanly by
2252 cluster size. bad cmm! */
2256 /* Patch up the last cluster pointer in the segment
2257 * to join it to the input freelist.
2259 *clusterp
= freelist
->clusters
;
2260 freelist
->clusters
= clusters
;
2264 fprintf (stderr
, "H");
2270 round_to_cluster_size (scm_t_freelist
*freelist
, size_t len
)
2272 size_t cluster_size_in_bytes
= CLUSTER_SIZE_IN_BYTES (freelist
);
2275 (len
+ cluster_size_in_bytes
- 1) / cluster_size_in_bytes
* cluster_size_in_bytes
2276 + ALIGNMENT_SLACK (freelist
);
2280 alloc_some_heap (scm_t_freelist
*freelist
, policy_on_error error_policy
)
2281 #define FUNC_NAME "alloc_some_heap"
2286 if (scm_gc_heap_lock
)
2288 /* Critical code sections (such as the garbage collector) aren't
2289 * supposed to add heap segments.
2291 fprintf (stderr
, "alloc_some_heap: Can not extend locked heap.\n");
2295 if (scm_n_heap_segs
== heap_segment_table_size
)
2297 /* We have to expand the heap segment table to have room for the new
2298 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2299 * init_heap_seg only if the allocation of the segment itself succeeds.
2301 size_t new_table_size
= scm_n_heap_segs
+ 1;
2302 size_t size
= new_table_size
* sizeof (scm_t_heap_seg_data
);
2303 scm_t_heap_seg_data
*new_heap_table
;
2305 SCM_SYSCALL (new_heap_table
= ((scm_t_heap_seg_data
*)
2306 realloc ((char *)scm_heap_table
, size
)));
2307 if (!new_heap_table
)
2309 if (error_policy
== abort_on_error
)
2311 fprintf (stderr
, "alloc_some_heap: Could not grow heap segment table.\n");
2321 scm_heap_table
= new_heap_table
;
2322 heap_segment_table_size
= new_table_size
;
2326 /* Pick a size for the new heap segment.
2327 * The rule for picking the size of a segment is explained in
2331 /* Assure that the new segment is predicted to be large enough.
2333 * New yield should at least equal GC fraction of new heap size, i.e.
2335 * y + dh > f * (h + dh)
2338 * f : min yield fraction
2340 * dh : size of new heap segment
2342 * This gives dh > (f * h - y) / (1 - f)
2344 int f
= freelist
->min_yield_fraction
;
2345 unsigned long h
= SCM_HEAP_SIZE
;
2346 size_t min_cells
= (f
* h
- 100 * (long) scm_gc_yield
) / (99 - f
);
2347 len
= SCM_EXPHEAP (freelist
->heap_size
);
2349 fprintf (stderr
, "(%ld < %ld)", (long) len
, (long) min_cells
);
2351 if (len
< min_cells
)
2352 len
= min_cells
+ freelist
->cluster_size
;
2353 len
*= sizeof (scm_cell
);
2354 /* force new sampling */
2355 freelist
->collected
= LONG_MAX
;
2358 if (len
> scm_max_segment_size
)
2359 len
= scm_max_segment_size
;
2364 smallest
= CLUSTER_SIZE_IN_BYTES (freelist
);
2369 /* Allocate with decaying ambition. */
2370 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2371 && (len
>= smallest
))
2373 size_t rounded_len
= round_to_cluster_size (freelist
, len
);
2374 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (rounded_len
));
2377 init_heap_seg (ptr
, rounded_len
, freelist
);
2384 if (error_policy
== abort_on_error
)
2386 fprintf (stderr
, "alloc_some_heap: Could not grow heap.\n");
2393 /* {GC Protection Helper Functions}
2398 * If within a function you need to protect one or more scheme objects from
2399 * garbage collection, pass them as parameters to one of the
2400 * scm_remember_upto_here* functions below. These functions don't do
2401 * anything, but since the compiler does not know that they are actually
2402 * no-ops, it will generate code that calls these functions with the given
2403 * parameters. Therefore, you can be sure that the compiler will keep those
2404 * scheme values alive (on the stack or in a register) up to the point where
2405 * scm_remember_upto_here* is called. In other words, place the call to
2406 * scm_remember_upto_here* _behind_ the last code in your function, that
2407 * depends on the scheme object to exist.
2409 * Example: We want to make sure that the string object str does not get
2410 * garbage collected during the execution of 'some_function' in the code
2411 * below, because otherwise the characters belonging to str would be freed and
2412 * 'some_function' might access freed memory. To make sure that the compiler
2413 * keeps str alive on the stack or in a register such that it is visible to
2414 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2415 * call to 'some_function'. Note that this would not be necessary if str was
2416 * used anyway after the call to 'some_function'.
2417 * char *chars = SCM_STRING_CHARS (str);
2418 * some_function (chars);
2419 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2423 scm_remember_upto_here_1 (SCM obj SCM_UNUSED
)
2425 /* Empty. Protects a single object from garbage collection. */
2429 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED
, SCM obj2 SCM_UNUSED
)
2431 /* Empty. Protects two objects from garbage collection. */
2435 scm_remember_upto_here (SCM obj SCM_UNUSED
, ...)
2437 /* Empty. Protects any number of objects from garbage collection. */
2441 These crazy functions prevent garbage collection
2442 of arguments after the first argument by
2443 ensuring they remain live throughout the
2444 function because they are used in the last
2445 line of the code block.
2446 It'd be better to have a nice compiler hint to
2447 aid the conservative stack-scanning GC. --03/09/00 gjb */
2449 scm_return_first (SCM elt
, ...)
2455 scm_return_first_int (int i
, ...)
2462 scm_permanent_object (SCM obj
)
2465 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2471 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2472 other references are dropped, until the object is unprotected by calling
2473 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
2474 i. e. it is possible to protect the same object several times, but it is
2475 necessary to unprotect the object the same number of times to actually get
2476 the object unprotected. It is an error to unprotect an object more often
2477 than it has been protected before. The function scm_protect_object returns
2481 /* Implementation note: For every object X, there is a counter which
2482 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
2486 scm_gc_protect_object (SCM obj
)
2490 /* This critical section barrier will be replaced by a mutex. */
2493 handle
= scm_hashq_create_handle_x (scm_protects
, obj
, SCM_MAKINUM (0));
2494 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), SCM_MAKINUM (1)));
2502 /* Remove any protection for OBJ established by a prior call to
2503 scm_protect_object. This function returns OBJ.
2505 See scm_protect_object for more information. */
2507 scm_gc_unprotect_object (SCM obj
)
2511 /* This critical section barrier will be replaced by a mutex. */
2514 handle
= scm_hashq_get_handle (scm_protects
, obj
);
2516 if (SCM_FALSEP (handle
))
2518 fprintf (stderr
, "scm_unprotect_object called on unprotected object\n");
2523 SCM count
= scm_difference (SCM_CDR (handle
), SCM_MAKINUM (1));
2524 if (SCM_EQ_P (count
, SCM_MAKINUM (0)))
2525 scm_hashq_remove_x (scm_protects
, obj
);
2527 SCM_SETCDR (handle
, count
);
2536 scm_gc_register_root (SCM
*p
)
2539 SCM key
= scm_long2num ((long) p
);
2541 /* This critical section barrier will be replaced by a mutex. */
2544 handle
= scm_hashv_create_handle_x (scm_gc_registered_roots
, key
, SCM_MAKINUM (0));
2545 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), SCM_MAKINUM (1)));
2551 scm_gc_unregister_root (SCM
*p
)
2554 SCM key
= scm_long2num ((long) p
);
2556 /* This critical section barrier will be replaced by a mutex. */
2559 handle
= scm_hashv_get_handle (scm_gc_registered_roots
, key
);
2561 if (SCM_FALSEP (handle
))
2563 fprintf (stderr
, "scm_gc_unregister_root called on unregistered root\n");
2568 SCM count
= scm_difference (SCM_CDR (handle
), SCM_MAKINUM (1));
2569 if (SCM_EQ_P (count
, SCM_MAKINUM (0)))
2570 scm_hashv_remove_x (scm_gc_registered_roots
, key
);
2572 SCM_SETCDR (handle
, count
);
2579 scm_gc_register_roots (SCM
*b
, unsigned long n
)
2582 for (; p
< b
+ n
; ++p
)
2583 scm_gc_register_root (p
);
2587 scm_gc_unregister_roots (SCM
*b
, unsigned long n
)
2590 for (; p
< b
+ n
; ++p
)
2591 scm_gc_unregister_root (p
);
2596 /* called on process termination. */
2602 extern int on_exit (void (*procp
) (), int arg
);
2605 cleanup (int status
, void *arg
)
2607 #error Dont know how to setup a cleanup handler on your system.
2612 scm_flush_all_ports ();
2617 make_initial_segment (size_t init_heap_size
, scm_t_freelist
*freelist
)
2619 size_t rounded_size
= round_to_cluster_size (freelist
, init_heap_size
);
2621 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2625 rounded_size
= round_to_cluster_size (freelist
, SCM_HEAP_SEG_SIZE
);
2626 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2634 if (freelist
->min_yield_fraction
)
2635 freelist
->min_yield
= (freelist
->heap_size
* freelist
->min_yield_fraction
2637 freelist
->grow_heap_p
= (freelist
->heap_size
< freelist
->min_yield
);
2644 init_freelist (scm_t_freelist
*freelist
,
2649 freelist
->clusters
= SCM_EOL
;
2650 freelist
->cluster_size
= cluster_size
+ 1;
2651 freelist
->left_to_collect
= 0;
2652 freelist
->clusters_allocated
= 0;
2653 freelist
->min_yield
= 0;
2654 freelist
->min_yield_fraction
= min_yield
;
2655 freelist
->span
= span
;
2656 freelist
->collected
= 0;
2657 freelist
->collected_1
= 0;
2658 freelist
->heap_size
= 0;
2662 /* Get an integer from an environment variable. */
2664 scm_i_getenv_int (const char *var
, int def
)
2666 char *end
, *val
= getenv (var
);
2670 res
= strtol (val
, &end
, 10);
2680 unsigned long gc_trigger_1
;
2681 unsigned long gc_trigger_2
;
2682 size_t init_heap_size_1
;
2683 size_t init_heap_size_2
;
2686 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2687 scm_tc16_allocated
= scm_make_smob_type ("allocated cell", 0);
2688 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2690 j
= SCM_NUM_PROTECTS
;
2692 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2695 scm_freelist
= SCM_EOL
;
2696 scm_freelist2
= SCM_EOL
;
2697 gc_trigger_1
= scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1
);
2698 init_freelist (&scm_master_freelist
, 1, SCM_CLUSTER_SIZE_1
, gc_trigger_1
);
2699 gc_trigger_2
= scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2
);
2700 init_freelist (&scm_master_freelist2
, 2, SCM_CLUSTER_SIZE_2
, gc_trigger_2
);
2701 scm_max_segment_size
= scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size
);
2705 j
= SCM_HEAP_SEG_SIZE
;
2706 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2707 scm_heap_table
= ((scm_t_heap_seg_data
*)
2708 scm_must_malloc (sizeof (scm_t_heap_seg_data
) * 2, "hplims"));
2709 heap_segment_table_size
= 2;
2711 mark_space_ptr
= &mark_space_head
;
2713 init_heap_size_1
= scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1
);
2714 init_heap_size_2
= scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2
);
2715 if (make_initial_segment (init_heap_size_1
, &scm_master_freelist
) ||
2716 make_initial_segment (init_heap_size_2
, &scm_master_freelist2
))
2719 /* scm_hplims[0] can change. do not remove scm_heap_org */
2720 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0], 1);
2722 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2723 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2724 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2725 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2726 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2728 /* Initialise the list of ports. */
2729 scm_port_table
= (scm_t_port
**)
2730 malloc (sizeof (scm_t_port
*) * scm_port_table_room
);
2731 if (!scm_port_table
)
2738 on_exit (cleanup
, 0);
2742 scm_stand_in_procs
= SCM_EOL
;
2743 scm_permobjs
= SCM_EOL
;
2744 scm_protects
= scm_c_make_hash_table (31);
2745 scm_gc_registered_roots
= scm_c_make_hash_table (31);
2752 SCM scm_after_gc_hook
;
2754 static SCM gc_async
;
2756 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2757 * is run after the gc, as soon as the asynchronous events are handled by the
2761 gc_async_thunk (void)
2763 scm_c_run_hook (scm_after_gc_hook
, SCM_EOL
);
2764 return SCM_UNSPECIFIED
;
2768 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2769 * the garbage collection. The only purpose of this function is to mark the
2770 * gc_async (which will eventually lead to the execution of the
2774 mark_gc_async (void * hook_data SCM_UNUSED
,
2775 void *func_data SCM_UNUSED
,
2776 void *data SCM_UNUSED
)
2778 /* If cell access debugging is enabled, the user may choose to perform
2779 * additional garbage collections after an arbitrary number of cell
2780 * accesses. We don't want the scheme level after-gc-hook to be performed
2781 * for each of these garbage collections for the following reason: The
2782 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2783 * after-gc-hook was performed with every gc, and if the gc was performed
2784 * after a very small number of cell accesses, then the number of cell
2785 * accesses during the execution of the after-gc-hook will suffice to cause
2786 * the execution of the next gc. Then, guile would keep executing the
2787 * after-gc-hook over and over again, and would never come to do other
2790 * To overcome this problem, if cell access debugging with additional
2791 * garbage collections is enabled, the after-gc-hook is never run by the
2792 * garbage collecter. When running guile with cell access debugging and the
2793 * execution of the after-gc-hook is desired, then it is necessary to run
2794 * the hook explicitly from the user code. This has the effect, that from
2795 * the scheme level point of view it seems that garbage collection is
2796 * performed with a much lower frequency than it actually is. Obviously,
2797 * this will not work for code that depends on a fixed one to one
2798 * relationship between the execution counts of the C level garbage
2799 * collection hooks and the execution count of the scheme level
2802 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2803 if (debug_cells_gc_interval
== 0)
2804 scm_system_async_mark (gc_async
);
2806 scm_system_async_mark (gc_async
);
2818 scm_after_gc_hook
= scm_permanent_object (scm_make_hook (SCM_INUM0
));
2819 scm_c_define ("after-gc-hook", scm_after_gc_hook
);
2821 after_gc_thunk
= scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0
,
2823 gc_async
= scm_system_async (after_gc_thunk
); /* protected via scm_asyncs */
2825 scm_c_hook_add (&scm_after_gc_c_hook
, mark_gc_async
, NULL
, 0);
2827 #ifndef SCM_MAGIC_SNARFER
2828 #include "libguile/gc.x"
2832 #endif /*MARK_DEPENDENCIES*/