1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
43 /* #define DEBUGINFO */
45 /* SECTION: This code is compiled once.
48 #ifndef MARK_DEPENDENCIES
57 extern unsigned long * __libc_ia64_register_backing_store_base
;
60 #include "libguile/_scm.h"
61 #include "libguile/eval.h"
62 #include "libguile/stime.h"
63 #include "libguile/stackchk.h"
64 #include "libguile/struct.h"
65 #include "libguile/smob.h"
66 #include "libguile/unif.h"
67 #include "libguile/async.h"
68 #include "libguile/ports.h"
69 #include "libguile/root.h"
70 #include "libguile/strings.h"
71 #include "libguile/vectors.h"
72 #include "libguile/weaks.h"
73 #include "libguile/hashtab.h"
74 #include "libguile/tags.h"
76 #include "libguile/validate.h"
77 #include "libguile/deprecation.h"
78 #include "libguile/gc.h"
80 #ifdef GUILE_DEBUG_MALLOC
81 #include "libguile/debug-malloc.h"
94 #define var_start(x, y) va_start(x, y)
97 #define var_start(x, y) va_start(x)
102 #define CELL_P(x) (SCM_ITAG3 (x) == scm_tc3_cons)
104 unsigned int scm_gc_running_p
= 0;
108 #if (SCM_DEBUG_CELL_ACCESSES == 1)
110 /* Set this to != 0 if every cell that is accessed shall be checked:
112 unsigned int scm_debug_cell_accesses_p
= 1;
114 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
115 * the number of cell accesses after which a gc shall be called.
117 static unsigned int debug_cells_gc_interval
= 0;
120 /* Assert that the given object is a valid reference to a valid cell. This
121 * test involves to determine whether the object is a cell pointer, whether
122 * this pointer actually points into a heap segment and whether the cell
123 * pointed to is not a free cell. Further, additional garbage collections may
124 * get executed after a user defined number of cell accesses. This helps to
125 * find places in the C code where references are dropped for extremely short
129 scm_assert_cell_valid (SCM cell
)
131 static unsigned int already_running
= 0;
133 if (scm_debug_cell_accesses_p
&& !already_running
)
135 already_running
= 1; /* set to avoid recursion */
137 if (!scm_cellp (cell
))
139 fprintf (stderr
, "scm_assert_cell_valid: Not a cell object: %lux\n",
140 (unsigned long) SCM_UNPACK (cell
));
143 else if (!scm_gc_running_p
)
145 /* Dirk::FIXME:: During garbage collection there occur references to
146 free cells. This is allright during conservative marking, but
147 should not happen otherwise (I think). The case of free cells
148 accessed during conservative marking is handled in function
149 scm_mark_locations. However, there still occur accesses to free
150 cells during gc. I don't understand why this happens. If it is
151 a bug and gets fixed, the following test should also work while
154 if (SCM_FREE_CELL_P (cell
))
156 fprintf (stderr
, "scm_assert_cell_valid: Accessing free cell: %lux\n",
157 (unsigned long) SCM_UNPACK (cell
));
161 /* If desired, perform additional garbage collections after a user
162 * defined number of cell accesses.
164 if (debug_cells_gc_interval
)
166 static unsigned int counter
= 0;
174 counter
= debug_cells_gc_interval
;
175 scm_igc ("scm_assert_cell_valid");
179 already_running
= 0; /* re-enable */
184 SCM_DEFINE (scm_set_debug_cell_accesses_x
, "set-debug-cell-accesses!", 1, 0, 0,
186 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
187 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
188 "but no additional calls to garbage collection are issued.\n"
189 "If @var{flag} is a number, cell access checking is enabled,\n"
190 "with an additional garbage collection after the given\n"
191 "number of cell accesses.\n"
192 "This procedure only exists when the compile-time flag\n"
193 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
194 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
196 if (SCM_FALSEP (flag
)) {
197 scm_debug_cell_accesses_p
= 0;
198 } else if (SCM_EQ_P (flag
, SCM_BOOL_T
)) {
199 debug_cells_gc_interval
= 0;
200 scm_debug_cell_accesses_p
= 1;
201 } else if (SCM_INUMP (flag
)) {
202 long int f
= SCM_INUM (flag
);
203 if (f
<= 0) SCM_OUT_OF_RANGE (1, flag
);
204 debug_cells_gc_interval
= f
;
205 scm_debug_cell_accesses_p
= 1;
207 SCM_WRONG_TYPE_ARG (1, flag
);
209 return SCM_UNSPECIFIED
;
213 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
217 /* {heap tuning parameters}
219 * These are parameters for controlling memory allocation. The heap
220 * is the area out of which scm_cons, and object headers are allocated.
222 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
223 * 64 bit machine. The units of the _SIZE parameters are bytes.
224 * Cons pairs and object headers occupy one heap cell.
226 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
227 * allocated initially the heap will grow by half its current size
228 * each subsequent time more heap is needed.
230 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
231 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
232 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
233 * is in scm_init_storage() and alloc_some_heap() in sys.c
235 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
236 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
238 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
241 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
244 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must
245 * be reclaimed by a GC triggered by a malloc. If less than this is
246 * reclaimed, the trigger threshold is raised. [I don't know what a
247 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
248 * work around a oscillation that caused almost constant GC.]
252 * Heap size 45000 and 40% min yield gives quick startup and no extra
253 * heap allocation. Having higher values on min yield may lead to
254 * large heaps, especially if code behaviour is varying its
255 * maximum consumption between different freelists.
258 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
259 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
260 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
261 size_t scm_default_init_heap_size_1
= (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER
- 1)
262 / SCM_CARDS_PER_CLUSTER
) * SCM_GC_CARD_SIZE
);
263 int scm_default_min_yield_1
= 40;
265 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
266 size_t scm_default_init_heap_size_2
= (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER
- 1)
267 / SCM_CARDS_PER_CLUSTER
) * SCM_GC_CARD_SIZE
);
268 /* The following value may seem large, but note that if we get to GC at
269 * all, this means that we have a numerically intensive application
271 int scm_default_min_yield_2
= 40;
273 size_t scm_default_max_segment_size
= 2097000L;/* a little less (adm) than 2 Mb */
275 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
277 # define SCM_HEAP_SEG_SIZE 32768L
280 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_t_cell))
282 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_t_cell))
285 /* Make heap grow with factor 1.5 */
286 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
287 #define SCM_INIT_MALLOC_LIMIT 100000
288 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
290 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_t_cell * span)
291 aligned inner bounds for allocated storage */
294 /*in 386 protected mode we must only adjust the offset */
295 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
296 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
299 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
300 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
302 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_t_cell)*(span)-1L) & ((long)(p)+sizeof(scm_t_cell)*(span)-1L))
303 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_t_cell)*(span)-1L) & (long)(p))
307 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_t_cell) - 1) & SCM_UNPACK (x)) == 0)
309 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
310 #define CLUSTER_SIZE_IN_BYTES(freelist) \
311 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
317 typedef struct scm_t_freelist
{
318 /* collected cells */
320 /* number of cells left to collect before cluster is full */
321 unsigned int left_to_collect
;
322 /* number of clusters which have been allocated */
323 unsigned int clusters_allocated
;
324 /* a list of freelists, each of size cluster_size,
325 * except the last one which may be shorter
329 /* this is the number of objects in each cluster, including the spine cell */
330 unsigned int cluster_size
;
331 /* indicates that we should grow heap instead of GC:ing
334 /* minimum yield on this list in order not to grow the heap
337 /* defines min_yield as percent of total heap size
339 int min_yield_fraction
;
340 /* number of cells per object on this list */
342 /* number of collected cells during last GC */
343 unsigned long collected
;
344 /* number of collected cells during penultimate GC */
345 unsigned long collected_1
;
346 /* total number of cells in heap segments
347 * belonging to this list.
349 unsigned long heap_size
;
352 SCM scm_freelist
= SCM_EOL
;
353 scm_t_freelist scm_master_freelist
= {
354 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_1
, 0, 0, 0, 1, 0, 0, 0
356 SCM scm_freelist2
= SCM_EOL
;
357 scm_t_freelist scm_master_freelist2
= {
358 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_2
, 0, 0, 0, 2, 0, 0, 0
362 * is the number of bytes of malloc allocation needed to trigger gc.
364 unsigned long scm_mtrigger
;
367 * If set, don't expand the heap. Set only during gc, during which no allocation
368 * is supposed to take place anyway.
370 int scm_gc_heap_lock
= 0;
373 * Don't pause for collection if this is set -- just
376 int scm_block_gc
= 1;
378 /* During collection, this accumulates objects holding
381 SCM scm_weak_vectors
;
383 /* During collection, this accumulates structures which are to be freed.
385 SCM scm_structs_to_free
;
387 /* GC Statistics Keeping
389 unsigned long scm_cells_allocated
= 0;
390 unsigned long scm_mallocated
= 0;
391 unsigned long scm_gc_cells_collected
;
392 unsigned long scm_gc_yield
;
393 static unsigned long scm_gc_yield_1
= 0; /* previous GC yield */
394 unsigned long scm_gc_malloc_collected
;
395 unsigned long scm_gc_ports_collected
;
396 unsigned long scm_gc_time_taken
= 0;
397 static unsigned long t_before_gc
;
398 static unsigned long t_before_sweep
;
399 unsigned long scm_gc_mark_time_taken
= 0;
400 unsigned long scm_gc_sweep_time_taken
= 0;
401 unsigned long scm_gc_times
= 0;
402 unsigned long scm_gc_cells_swept
= 0;
403 double scm_gc_cells_marked_acc
= 0.;
404 double scm_gc_cells_swept_acc
= 0.;
406 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
407 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
408 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
409 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
410 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
411 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
412 SCM_SYMBOL (sym_gc_mark_time_taken
, "gc-mark-time-taken");
413 SCM_SYMBOL (sym_gc_sweep_time_taken
, "gc-sweep-time-taken");
414 SCM_SYMBOL (sym_times
, "gc-times");
415 SCM_SYMBOL (sym_cells_marked
, "cells-marked");
416 SCM_SYMBOL (sym_cells_swept
, "cells-swept");
418 typedef struct scm_t_heap_seg_data
420 /* lower and upper bounds of the segment */
421 SCM_CELLPTR bounds
[2];
423 /* address of the head-of-freelist pointer for this segment's cells.
424 All segments usually point to the same one, scm_freelist. */
425 scm_t_freelist
*freelist
;
427 /* number of cells per object in this segment */
429 } scm_t_heap_seg_data
;
433 static size_t init_heap_seg (SCM_CELLPTR
, size_t, scm_t_freelist
*);
435 typedef enum { return_on_error
, abort_on_error
} policy_on_error
;
436 static void alloc_some_heap (scm_t_freelist
*, policy_on_error
);
439 #define SCM_HEAP_SIZE \
440 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
441 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
443 #define BVEC_GROW_SIZE 256
444 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
445 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
447 /* mark space allocation */
449 typedef struct scm_t_mark_space
451 scm_t_c_bvec_limb
*bvec_space
;
452 struct scm_t_mark_space
*next
;
455 static scm_t_mark_space
*current_mark_space
;
456 static scm_t_mark_space
**mark_space_ptr
;
457 static ptrdiff_t current_mark_space_offset
;
458 static scm_t_mark_space
*mark_space_head
;
460 static scm_t_c_bvec_limb
*
462 #define FUNC_NAME "get_bvec"
464 scm_t_c_bvec_limb
*res
;
466 if (!current_mark_space
)
468 SCM_SYSCALL (current_mark_space
= (scm_t_mark_space
*) malloc (sizeof (scm_t_mark_space
)));
469 if (!current_mark_space
)
470 SCM_MISC_ERROR ("could not grow heap", SCM_EOL
);
472 current_mark_space
->bvec_space
= NULL
;
473 current_mark_space
->next
= NULL
;
475 *mark_space_ptr
= current_mark_space
;
476 mark_space_ptr
= &(current_mark_space
->next
);
481 if (!(current_mark_space
->bvec_space
))
483 SCM_SYSCALL (current_mark_space
->bvec_space
=
484 (scm_t_c_bvec_limb
*) calloc (BVEC_GROW_SIZE_IN_BYTES
, 1));
485 if (!(current_mark_space
->bvec_space
))
486 SCM_MISC_ERROR ("could not grow heap", SCM_EOL
);
488 current_mark_space_offset
= 0;
493 if (current_mark_space_offset
== BVEC_GROW_SIZE_IN_LIMBS
)
495 current_mark_space
= NULL
;
500 res
= current_mark_space
->bvec_space
+ current_mark_space_offset
;
501 current_mark_space_offset
+= SCM_GC_CARD_BVEC_SIZE_IN_LIMBS
;
511 scm_t_mark_space
*ms
;
513 for (ms
= mark_space_head
; ms
; ms
= ms
->next
)
514 memset (ms
->bvec_space
, 0, BVEC_GROW_SIZE_IN_BYTES
);
519 /* Debugging functions. */
521 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
523 static long int heap_segment (SCM obj
); /* forw decl: non-debugging func */
526 map_free_list (scm_t_freelist
*master
, SCM freelist
)
528 long last_seg
= -1, count
= 0;
531 for (f
= freelist
; !SCM_NULLP (f
); f
= SCM_FREE_CELL_CDR (f
))
533 long int this_seg
= heap_segment (f
);
538 "map_free_list: can't find segment containing cell %lux\n",
539 (unsigned long int) SCM_UNPACK (f
));
542 else if (this_seg
!= last_seg
)
545 fprintf (stderr
, " %5ld %d-cells in segment %ld\n",
546 (long) count
, master
->span
, (long) last_seg
);
553 fprintf (stderr
, " %5ld %d-cells in segment %ld\n",
554 (long) count
, master
->span
, (long) last_seg
);
557 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
559 "Print debugging information about the free-list.\n"
560 "@code{map-free-list} is only included in\n"
561 "@code{--enable-guile-debug} builds of Guile.")
562 #define FUNC_NAME s_scm_map_free_list
566 fprintf (stderr
, "%ld segments total (%d:%ld",
567 (long) scm_n_heap_segs
,
568 scm_heap_table
[0].span
,
569 (long) (scm_heap_table
[0].bounds
[1] - scm_heap_table
[0].bounds
[0]));
571 for (i
= 1; i
!= scm_n_heap_segs
; i
++)
572 fprintf (stderr
, ", %d:%ld",
573 scm_heap_table
[i
].span
,
574 (long) (scm_heap_table
[i
].bounds
[1] - scm_heap_table
[i
].bounds
[0]));
575 fprintf (stderr
, ")\n");
576 map_free_list (&scm_master_freelist
, scm_freelist
);
577 map_free_list (&scm_master_freelist2
, scm_freelist2
);
580 return SCM_UNSPECIFIED
;
584 static long last_cluster
;
585 static long last_size
;
588 free_list_length (char *title
, long i
, SCM freelist
)
592 for (ls
= freelist
; !SCM_NULLP (ls
); ls
= SCM_FREE_CELL_CDR (ls
))
593 if (SCM_FREE_CELL_P (ls
))
597 fprintf (stderr
, "bad cell in %s at position %ld\n", title
, (long) n
);
604 if (last_cluster
== i
- 1)
605 fprintf (stderr
, "\t%ld\n", (long) last_size
);
607 fprintf (stderr
, "-%ld\t%ld\n", (long) (i
- 1), (long) last_size
);
610 fprintf (stderr
, "%s %ld", title
, (long) i
);
612 fprintf (stderr
, "%s\t%ld\n", title
, (long) n
);
620 free_list_lengths (char *title
, scm_t_freelist
*master
, SCM freelist
)
623 long i
= 0, len
, n
= 0;
624 fprintf (stderr
, "%s\n\n", title
);
625 n
+= free_list_length ("free list", -1, freelist
);
626 for (clusters
= master
->clusters
;
627 SCM_NNULLP (clusters
);
628 clusters
= SCM_CDR (clusters
))
630 len
= free_list_length ("cluster", i
++, SCM_CAR (clusters
));
633 if (last_cluster
== i
- 1)
634 fprintf (stderr
, "\t%ld\n", (long) last_size
);
636 fprintf (stderr
, "-%ld\t%ld\n", (long) (i
- 1), (long) last_size
);
637 fprintf (stderr
, "\ntotal %ld objects\n\n", (long) n
);
640 SCM_DEFINE (scm_free_list_length
, "free-list-length", 0, 0, 0,
642 "Print debugging information about the free-list.\n"
643 "@code{free-list-length} is only included in\n"
644 "@code{--enable-guile-debug} builds of Guile.")
645 #define FUNC_NAME s_scm_free_list_length
647 free_list_lengths ("1-cells", &scm_master_freelist
, scm_freelist
);
648 free_list_lengths ("2-cells", &scm_master_freelist2
, scm_freelist2
);
649 return SCM_UNSPECIFIED
;
653 #endif /* defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) */
655 #ifdef GUILE_DEBUG_FREELIST
657 /* Non-zero if freelist debugging is in effect. Set this via
658 `gc-set-debug-check-freelist!'. */
659 static int scm_debug_check_freelist
= 0;
661 /* Number of calls to SCM_NEWCELL since startup. */
662 static unsigned long scm_newcell_count
;
663 static unsigned long scm_newcell2_count
;
665 /* Search freelist for anything that isn't marked as a free cell.
666 Abort if we find something. */
668 scm_check_freelist (SCM freelist
)
673 for (f
= freelist
; !SCM_NULLP (f
); f
= SCM_FREE_CELL_CDR (f
), i
++)
674 if (!SCM_FREE_CELL_P (f
))
676 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
677 (long) scm_newcell_count
, (long) i
);
682 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
684 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
685 "on each cell allocation. This procedure only exists when the\n"
686 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
687 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
689 /* [cmm] I did a double-take when I read this code the first time.
691 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
692 return SCM_UNSPECIFIED
;
696 #endif /* GUILE_DEBUG_FREELIST */
701 master_cells_allocated (scm_t_freelist
*master
)
703 /* the '- 1' below is to ignore the cluster spine cells. */
704 long objects
= master
->clusters_allocated
* (master
->cluster_size
- 1);
705 if (SCM_NULLP (master
->clusters
))
706 objects
-= master
->left_to_collect
;
707 return master
->span
* objects
;
711 freelist_length (SCM freelist
)
714 for (n
= 0; !SCM_NULLP (freelist
); freelist
= SCM_FREE_CELL_CDR (freelist
))
720 compute_cells_allocated ()
722 return (scm_cells_allocated
723 + master_cells_allocated (&scm_master_freelist
)
724 + master_cells_allocated (&scm_master_freelist2
)
725 - scm_master_freelist
.span
* freelist_length (scm_freelist
)
726 - scm_master_freelist2
.span
* freelist_length (scm_freelist2
));
729 /* {Scheme Interface to GC}
732 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
734 "Return an association list of statistics about Guile's current\n"
736 #define FUNC_NAME s_scm_gc_stats
741 unsigned long int local_scm_mtrigger
;
742 unsigned long int local_scm_mallocated
;
743 unsigned long int local_scm_heap_size
;
744 unsigned long int local_scm_cells_allocated
;
745 unsigned long int local_scm_gc_time_taken
;
746 unsigned long int local_scm_gc_times
;
747 unsigned long int local_scm_gc_mark_time_taken
;
748 unsigned long int local_scm_gc_sweep_time_taken
;
749 double local_scm_gc_cells_swept
;
750 double local_scm_gc_cells_marked
;
760 for (i
= scm_n_heap_segs
; i
--; )
761 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
762 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
764 if (scm_n_heap_segs
!= n
)
769 /* Below, we cons to produce the resulting list. We want a snapshot of
770 * the heap situation before consing.
772 local_scm_mtrigger
= scm_mtrigger
;
773 local_scm_mallocated
= scm_mallocated
;
774 local_scm_heap_size
= SCM_HEAP_SIZE
;
775 local_scm_cells_allocated
= compute_cells_allocated ();
776 local_scm_gc_time_taken
= scm_gc_time_taken
;
777 local_scm_gc_mark_time_taken
= scm_gc_mark_time_taken
;
778 local_scm_gc_sweep_time_taken
= scm_gc_sweep_time_taken
;
779 local_scm_gc_times
= scm_gc_times
;
780 local_scm_gc_cells_swept
= scm_gc_cells_swept_acc
;
781 local_scm_gc_cells_marked
= scm_gc_cells_marked_acc
;
783 answer
= scm_list_n (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
784 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
785 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
786 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
787 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
788 scm_cons (sym_times
, scm_ulong2num (local_scm_gc_times
)),
789 scm_cons (sym_gc_mark_time_taken
, scm_ulong2num (local_scm_gc_mark_time_taken
)),
790 scm_cons (sym_gc_sweep_time_taken
, scm_ulong2num (local_scm_gc_sweep_time_taken
)),
791 scm_cons (sym_cells_marked
, scm_i_dbl2big (local_scm_gc_cells_marked
)),
792 scm_cons (sym_cells_swept
, scm_i_dbl2big (local_scm_gc_cells_swept
)),
793 scm_cons (sym_heap_segments
, heap_segs
),
802 gc_start_stats (const char *what SCM_UNUSED
)
804 t_before_gc
= scm_c_get_internal_run_time ();
805 scm_gc_cells_swept
= 0;
806 scm_gc_cells_collected
= 0;
807 scm_gc_yield_1
= scm_gc_yield
;
808 scm_gc_yield
= (scm_cells_allocated
809 + master_cells_allocated (&scm_master_freelist
)
810 + master_cells_allocated (&scm_master_freelist2
));
811 scm_gc_malloc_collected
= 0;
812 scm_gc_ports_collected
= 0;
819 unsigned long t
= scm_c_get_internal_run_time ();
820 scm_gc_time_taken
+= (t
- t_before_gc
);
821 scm_gc_sweep_time_taken
+= (t
- t_before_sweep
);
824 scm_gc_cells_marked_acc
+= scm_gc_cells_swept
- scm_gc_cells_collected
;
825 scm_gc_cells_swept_acc
+= scm_gc_cells_swept
;
829 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
831 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
832 "returned by this function for @var{obj}")
833 #define FUNC_NAME s_scm_object_address
835 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj
));
840 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
842 "Scans all of SCM objects and reclaims for further use those that are\n"
843 "no longer accessible.")
844 #define FUNC_NAME s_scm_gc
849 return SCM_UNSPECIFIED
;
855 /* {C Interface For When GC is Triggered}
859 adjust_min_yield (scm_t_freelist
*freelist
)
861 /* min yield is adjusted upwards so that next predicted total yield
862 * (allocated cells actually freed by GC) becomes
863 * `min_yield_fraction' of total heap size. Note, however, that
864 * the absolute value of min_yield will correspond to `collected'
865 * on one master (the one which currently is triggering GC).
867 * The reason why we look at total yield instead of cells collected
868 * on one list is that we want to take other freelists into account.
869 * On this freelist, we know that (local) yield = collected cells,
870 * but that's probably not the case on the other lists.
872 * (We might consider computing a better prediction, for example
873 * by computing an average over multiple GC:s.)
875 if (freelist
->min_yield_fraction
)
877 /* Pick largest of last two yields. */
878 long delta
= ((SCM_HEAP_SIZE
* freelist
->min_yield_fraction
/ 100)
879 - (long) SCM_MAX (scm_gc_yield_1
, scm_gc_yield
));
881 fprintf (stderr
, " after GC = %lu, delta = %ld\n",
882 (long) scm_cells_allocated
,
886 freelist
->min_yield
+= delta
;
891 /* When we get POSIX threads support, the master will be global and
892 * common while the freelist will be individual for each thread.
896 scm_gc_for_newcell (scm_t_freelist
*master
, SCM
*freelist
)
902 if (SCM_NULLP (master
->clusters
))
904 if (master
->grow_heap_p
|| scm_block_gc
)
906 /* In order to reduce gc frequency, try to allocate a new heap
907 * segment first, even if gc might find some free cells. If we
908 * can't obtain a new heap segment, we will try gc later.
910 master
->grow_heap_p
= 0;
911 alloc_some_heap (master
, return_on_error
);
913 if (SCM_NULLP (master
->clusters
))
915 /* The heap was not grown, either because it wasn't scheduled to
916 * grow, or because there was not enough memory available. In
917 * both cases we have to try gc to get some free cells.
920 fprintf (stderr
, "allocated = %lu, ",
921 (long) (scm_cells_allocated
922 + master_cells_allocated (&scm_master_freelist
)
923 + master_cells_allocated (&scm_master_freelist2
)));
926 adjust_min_yield (master
);
927 if (SCM_NULLP (master
->clusters
))
929 /* gc could not free any cells. Now, we _must_ allocate a
930 * new heap segment, because there is no other possibility
931 * to provide a new cell for the caller.
933 alloc_some_heap (master
, abort_on_error
);
937 cell
= SCM_CAR (master
->clusters
);
938 master
->clusters
= SCM_CDR (master
->clusters
);
939 ++master
->clusters_allocated
;
941 while (SCM_NULLP (cell
));
943 #ifdef GUILE_DEBUG_FREELIST
944 scm_check_freelist (cell
);
948 *freelist
= SCM_FREE_CELL_CDR (cell
);
954 /* This is a support routine which can be used to reserve a cluster
955 * for some special use, such as debugging. It won't be useful until
956 * free cells are preserved between garbage collections.
960 scm_alloc_cluster (scm_t_freelist
*master
)
963 cell
= scm_gc_for_newcell (master
, &freelist
);
964 SCM_SETCDR (cell
, freelist
);
970 scm_t_c_hook scm_before_gc_c_hook
;
971 scm_t_c_hook scm_before_mark_c_hook
;
972 scm_t_c_hook scm_before_sweep_c_hook
;
973 scm_t_c_hook scm_after_sweep_c_hook
;
974 scm_t_c_hook scm_after_gc_c_hook
;
977 # define SCM_MARK_BACKING_STORE() do { \
979 SCM_STACKITEM * top, * bot; \
981 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
982 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
983 / sizeof (SCM_STACKITEM))); \
984 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
985 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
986 scm_mark_locations (bot, top - bot); } while (0)
988 # define SCM_MARK_BACKING_STORE()
992 scm_igc (const char *what
)
997 scm_c_hook_run (&scm_before_gc_c_hook
, 0);
1000 SCM_NULLP (scm_freelist
)
1002 : (SCM_NULLP (scm_freelist2
) ? "o" : "m"));
1004 /* During the critical section, only the current thread may run. */
1005 SCM_CRITICAL_SECTION_START
;
1007 if (!scm_stack_base
|| scm_block_gc
)
1013 gc_start_stats (what
);
1015 if (scm_gc_heap_lock
)
1016 /* We've invoked the collector while a GC is already in progress.
1017 That should never happen. */
1022 scm_c_hook_run (&scm_before_mark_c_hook
, 0);
1024 clear_mark_space ();
1028 /* Mark objects on the C stack. */
1029 SCM_FLUSH_REGISTER_WINDOWS
;
1030 /* This assumes that all registers are saved into the jmp_buf */
1031 setjmp (scm_save_regs_gc_mark
);
1032 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
1033 ( (size_t) (sizeof (SCM_STACKITEM
) - 1 +
1034 sizeof scm_save_regs_gc_mark
)
1035 / sizeof (SCM_STACKITEM
)));
1038 unsigned long stack_len
= scm_stack_size (scm_stack_base
);
1039 #ifdef SCM_STACK_GROWS_UP
1040 scm_mark_locations (scm_stack_base
, stack_len
);
1042 scm_mark_locations (scm_stack_base
- stack_len
, stack_len
);
1045 SCM_MARK_BACKING_STORE();
1047 #else /* USE_THREADS */
1049 /* Mark every thread's stack and registers */
1050 scm_threads_mark_stacks ();
1052 #endif /* USE_THREADS */
1054 j
= SCM_NUM_PROTECTS
;
1056 scm_gc_mark (scm_sys_protects
[j
]);
1058 /* mark the registered roots */
1061 for (i
= 0; i
< SCM_VECTOR_LENGTH (scm_gc_registered_roots
); ++i
) {
1062 SCM l
= SCM_VELTS (scm_gc_registered_roots
)[i
];
1063 for (; !SCM_NULLP (l
); l
= SCM_CDR (l
)) {
1064 SCM
*p
= (SCM
*) (scm_num2long (SCM_CAAR (l
), 0, NULL
));
1070 /* FIXME: we should have a means to register C functions to be run
1071 * in different phases of GC
1073 scm_mark_subr_table ();
1076 scm_gc_mark (scm_root
->handle
);
1079 t_before_sweep
= scm_c_get_internal_run_time ();
1080 scm_gc_mark_time_taken
+= (t_before_sweep
- t_before_gc
);
1082 scm_c_hook_run (&scm_before_sweep_c_hook
, 0);
1086 scm_c_hook_run (&scm_after_sweep_c_hook
, 0);
1091 SCM_CRITICAL_SECTION_END
;
1092 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
1101 #define MARK scm_gc_mark
1102 #define FNAME "scm_gc_mark"
1104 #endif /*!MARK_DEPENDENCIES*/
1106 /* Mark an object precisely.
1110 #define FUNC_NAME FNAME
1114 scm_t_bits cell_type
;
1116 #ifndef MARK_DEPENDENCIES
1117 # define RECURSE scm_gc_mark
1119 /* go through the usual marking, but not for self-cycles. */
1120 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1124 #ifdef MARK_DEPENDENCIES
1125 goto gc_mark_loop_first_time
;
1128 /* A simple hack for debugging. Chose the second branch to get a
1129 meaningful backtrace for crashes inside the GC.
1132 #define goto_gc_mark_loop goto gc_mark_loop
1133 #define goto_gc_mark_nimp goto gc_mark_nimp
1135 #define goto_gc_mark_loop RECURSE(ptr); return
1136 #define goto_gc_mark_nimp RECURSE(ptr); return
1145 #ifdef MARK_DEPENDENCIES
1146 if (SCM_EQ_P (ptr
, p
))
1152 gc_mark_loop_first_time
:
1155 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1156 /* We are in debug mode. Check the ptr exhaustively. */
1157 if (!scm_cellp (ptr
))
1158 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL
);
1160 /* In non-debug mode, do at least some cheap testing. */
1162 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL
);
1165 #ifndef MARK_DEPENDENCIES
1167 if (SCM_GCMARKP (ptr
))
1170 SCM_SETGCMARK (ptr
);
1174 cell_type
= SCM_GC_CELL_TYPE (ptr
);
1175 switch (SCM_ITAG7 (cell_type
))
1177 case scm_tcs_cons_nimcar
:
1178 if (SCM_IMP (SCM_CDR (ptr
)))
1180 ptr
= SCM_CAR (ptr
);
1183 RECURSE (SCM_CAR (ptr
));
1184 ptr
= SCM_CDR (ptr
);
1186 case scm_tcs_cons_imcar
:
1187 ptr
= SCM_CDR (ptr
);
1190 RECURSE (SCM_SETTER (ptr
));
1191 ptr
= SCM_PROCEDURE (ptr
);
1193 case scm_tcs_struct
:
1195 /* XXX - use less explicit code. */
1196 scm_t_bits word0
= SCM_CELL_WORD_0 (ptr
) - scm_tc3_struct
;
1197 scm_t_bits
* vtable_data
= (scm_t_bits
*) word0
;
1198 SCM layout
= SCM_PACK (vtable_data
[scm_vtable_index_layout
]);
1199 long len
= SCM_SYMBOL_LENGTH (layout
);
1200 char * fields_desc
= SCM_SYMBOL_CHARS (layout
);
1201 scm_t_bits
* struct_data
= (scm_t_bits
*) SCM_STRUCT_DATA (ptr
);
1203 if (vtable_data
[scm_struct_i_flags
] & SCM_STRUCTF_ENTITY
)
1205 RECURSE (SCM_PACK (struct_data
[scm_struct_i_procedure
]));
1206 RECURSE (SCM_PACK (struct_data
[scm_struct_i_setter
]));
1212 for (x
= 0; x
< len
- 2; x
+= 2, ++struct_data
)
1213 if (fields_desc
[x
] == 'p')
1214 RECURSE (SCM_PACK (*struct_data
));
1215 if (fields_desc
[x
] == 'p')
1217 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
1218 for (x
= *struct_data
++; x
; --x
, ++struct_data
)
1219 RECURSE (SCM_PACK (*struct_data
));
1221 RECURSE (SCM_PACK (*struct_data
));
1225 ptr
= SCM_PACK (vtable_data
[scm_vtable_index_vtable
]);
1229 case scm_tcs_closures
:
1230 if (SCM_IMP (SCM_ENV (ptr
)))
1232 ptr
= SCM_CLOSCAR (ptr
);
1235 RECURSE (SCM_CLOSCAR (ptr
));
1236 ptr
= SCM_ENV (ptr
);
1238 case scm_tc7_vector
:
1239 i
= SCM_VECTOR_LENGTH (ptr
);
1243 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
1244 RECURSE (SCM_VELTS (ptr
)[i
]);
1245 ptr
= SCM_VELTS (ptr
)[0];
1250 size_t i
= SCM_CCLO_LENGTH (ptr
);
1252 for (j
= 1; j
!= i
; ++j
)
1254 SCM obj
= SCM_CCLO_REF (ptr
, j
);
1258 ptr
= SCM_CCLO_REF (ptr
, 0);
1264 case scm_tc7_byvect
:
1271 #ifdef HAVE_LONG_LONGS
1272 case scm_tc7_llvect
:
1275 case scm_tc7_string
:
1279 SCM_SET_WVECT_GC_CHAIN (ptr
, scm_weak_vectors
);
1280 scm_weak_vectors
= ptr
;
1281 if (SCM_IS_WHVEC_ANY (ptr
))
1288 len
= SCM_VECTOR_LENGTH (ptr
);
1289 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
1290 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
1292 for (x
= 0; x
< len
; ++x
)
1295 alist
= SCM_VELTS (ptr
)[x
];
1297 /* mark everything on the alist except the keys or
1298 * values, according to weak_values and weak_keys. */
1299 while ( SCM_CONSP (alist
)
1300 && !SCM_GCMARKP (alist
)
1301 && SCM_CONSP (SCM_CAR (alist
)))
1306 kvpair
= SCM_CAR (alist
);
1307 next_alist
= SCM_CDR (alist
);
1310 * SCM_SETGCMARK (alist);
1311 * SCM_SETGCMARK (kvpair);
1313 * It may be that either the key or value is protected by
1314 * an escaped reference to part of the spine of this alist.
1315 * If we mark the spine here, and only mark one or neither of the
1316 * key and value, they may never be properly marked.
1317 * This leads to a horrible situation in which an alist containing
1318 * freelist cells is exported.
1320 * So only mark the spines of these arrays last of all marking.
1321 * If somebody confuses us by constructing a weak vector
1322 * with a circular alist then we are hosed, but at least we
1323 * won't prematurely drop table entries.
1326 RECURSE (SCM_CAR (kvpair
));
1328 RECURSE (SCM_CDR (kvpair
));
1331 if (SCM_NIMP (alist
))
1337 case scm_tc7_symbol
:
1338 ptr
= SCM_PROP_SLOTS (ptr
);
1340 case scm_tc7_variable
:
1341 ptr
= SCM_CELL_OBJECT_1 (ptr
);
1346 i
= SCM_PTOBNUM (ptr
);
1347 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1348 if (!(i
< scm_numptob
))
1349 SCM_MISC_ERROR ("undefined port type", SCM_EOL
);
1351 if (SCM_PTAB_ENTRY(ptr
))
1352 RECURSE (SCM_FILENAME (ptr
));
1353 if (scm_ptobs
[i
].mark
)
1355 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1362 switch (SCM_TYP16 (ptr
))
1363 { /* should be faster than going through scm_smobs */
1364 case scm_tc_free_cell
:
1365 /* We have detected a free cell. This can happen if non-object data
1366 * on the C stack points into guile's heap and is scanned during
1367 * conservative marking. */
1371 case scm_tc16_complex
:
1374 i
= SCM_SMOBNUM (ptr
);
1375 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1376 if (!(i
< scm_numsmob
))
1377 SCM_MISC_ERROR ("undefined smob type", SCM_EOL
);
1379 if (scm_smobs
[i
].mark
)
1381 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1389 SCM_MISC_ERROR ("unknown type", SCM_EOL
);
1395 #ifndef MARK_DEPENDENCIES
1400 /* And here we define `scm_gc_mark_dependencies', by including this
1401 * same file in itself.
1403 #define MARK scm_gc_mark_dependencies
1404 #define FNAME "scm_gc_mark_dependencies"
1405 #define MARK_DEPENDENCIES
1407 #undef MARK_DEPENDENCIES
1412 /* Determine whether the given value does actually represent a cell in some
1413 * heap segment. If this is the case, the number of the heap segment is
1414 * returned. Otherwise, -1 is returned. Binary search is used in order to
1415 * determine the heap segment that contains the cell.*/
1416 /* FIXME: To be used within scm_mark_locations and scm_cellp this function
1417 * should be an inline function. */
1419 heap_segment (SCM obj
)
1425 SCM_CELLPTR ptr
= SCM2PTR (obj
);
1426 unsigned long int i
= 0;
1427 unsigned long int j
= scm_n_heap_segs
- 1;
1429 if (SCM_PTR_LT (ptr
, scm_heap_table
[i
].bounds
[0]))
1431 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[1], ptr
))
1437 if (SCM_PTR_LT (ptr
, scm_heap_table
[i
].bounds
[1]))
1441 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1448 unsigned long int k
= (i
+ j
) / 2;
1452 else if (SCM_PTR_LT (ptr
, scm_heap_table
[k
].bounds
[1]))
1456 if (SCM_PTR_LT (ptr
, scm_heap_table
[i
].bounds
[0]))
1459 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1463 if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[1], ptr
))
1469 if (!DOUBLECELL_ALIGNED_P (obj
) && scm_heap_table
[i
].span
== 2)
1471 else if (SCM_GC_IN_CARD_HEADERP (ptr
))
1480 /* Mark a region conservatively */
1482 scm_mark_locations (SCM_STACKITEM x
[], unsigned long n
)
1486 for (m
= 0; m
< n
; ++m
)
1488 SCM obj
= * (SCM
*) &x
[m
];
1489 long int segment
= heap_segment (obj
);
1496 /* The function scm_cellp determines whether an SCM value can be regarded as a
1497 * pointer to a cell on the heap.
1500 scm_cellp (SCM value
)
1502 long int segment
= heap_segment (value
);
1503 return (segment
>= 0);
1508 gc_sweep_freelist_start (scm_t_freelist
*freelist
)
1510 freelist
->cells
= SCM_EOL
;
1511 freelist
->left_to_collect
= freelist
->cluster_size
;
1512 freelist
->clusters_allocated
= 0;
1513 freelist
->clusters
= SCM_EOL
;
1514 freelist
->clustertail
= &freelist
->clusters
;
1515 freelist
->collected_1
= freelist
->collected
;
1516 freelist
->collected
= 0;
1520 gc_sweep_freelist_finish (scm_t_freelist
*freelist
)
1523 *freelist
->clustertail
= freelist
->cells
;
1524 if (!SCM_NULLP (freelist
->cells
))
1526 SCM c
= freelist
->cells
;
1527 SCM_SET_CELL_WORD_0 (c
, SCM_FREE_CELL_CDR (c
));
1528 SCM_SET_CELL_WORD_1 (c
, SCM_EOL
);
1529 freelist
->collected
+=
1530 freelist
->span
* (freelist
->cluster_size
- freelist
->left_to_collect
);
1532 scm_gc_cells_collected
+= freelist
->collected
;
1534 /* Although freelist->min_yield is used to test freelist->collected
1535 * (which is the local GC yield for freelist), it is adjusted so
1536 * that *total* yield is freelist->min_yield_fraction of total heap
1537 * size. This means that a too low yield is compensated by more
1538 * heap on the list which is currently doing most work, which is
1539 * just what we want.
1541 collected
= SCM_MAX (freelist
->collected_1
, freelist
->collected
);
1542 freelist
->grow_heap_p
= (collected
< freelist
->min_yield
);
1545 #define NEXT_DATA_CELL(ptr, span) \
1547 scm_t_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1548 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1549 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1555 #define FUNC_NAME "scm_gc_sweep"
1557 register SCM_CELLPTR ptr
;
1558 register SCM nfreelist
;
1559 register scm_t_freelist
*freelist
;
1560 register unsigned long m
;
1567 gc_sweep_freelist_start (&scm_master_freelist
);
1568 gc_sweep_freelist_start (&scm_master_freelist2
);
1570 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1572 register long left_to_collect
;
1575 /* Unmarked cells go onto the front of the freelist this heap
1576 segment points to. Rather than updating the real freelist
1577 pointer as we go along, we accumulate the new head in
1578 nfreelist. Then, if it turns out that the entire segment is
1579 free, we free (i.e., malloc's free) the whole segment, and
1580 simply don't assign nfreelist back into the real freelist. */
1581 freelist
= scm_heap_table
[i
].freelist
;
1582 nfreelist
= freelist
->cells
;
1583 left_to_collect
= freelist
->left_to_collect
;
1584 span
= scm_heap_table
[i
].span
;
1586 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0], span
);
1587 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1], span
) - ptr
;
1589 /* use only data cells in seg_size */
1590 seg_size
= (seg_size
/ SCM_GC_CARD_N_CELLS
) * (SCM_GC_CARD_N_DATA_CELLS
/ span
) * span
;
1592 scm_gc_cells_swept
+= seg_size
;
1594 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1598 if (SCM_GC_IN_CARD_HEADERP (ptr
))
1604 NEXT_DATA_CELL (nxt
, span
);
1611 scmptr
= PTR2SCM (ptr
);
1613 if (SCM_GCMARKP (scmptr
))
1616 switch SCM_TYP7 (scmptr
)
1618 case scm_tcs_struct
:
1620 /* Structs need to be freed in a special order.
1621 * This is handled by GC C hooks in struct.c.
1623 SCM_SET_STRUCT_GC_CHAIN (scmptr
, scm_structs_to_free
);
1624 scm_structs_to_free
= scmptr
;
1627 case scm_tcs_cons_imcar
:
1628 case scm_tcs_cons_nimcar
:
1629 case scm_tcs_closures
:
1633 case scm_tc7_vector
:
1635 unsigned long int length
= SCM_VECTOR_LENGTH (scmptr
);
1638 scm_gc_free (SCM_VECTOR_BASE (scmptr
),
1639 length
* sizeof (scm_t_bits
),
1646 scm_gc_free (SCM_CCLO_BASE (scmptr
),
1647 SCM_CCLO_LENGTH (scmptr
) * sizeof (SCM
),
1648 "compiled closure");
1654 unsigned long int length
= SCM_BITVECTOR_LENGTH (scmptr
);
1657 scm_gc_free (SCM_BITVECTOR_BASE (scmptr
),
1659 * ((length
+SCM_LONG_BIT
-1) / SCM_LONG_BIT
)),
1664 case scm_tc7_byvect
:
1668 #ifdef HAVE_LONG_LONGS
1669 case scm_tc7_llvect
:
1674 scm_gc_free (SCM_UVECTOR_BASE (scmptr
),
1675 (SCM_UVECTOR_LENGTH (scmptr
)
1676 * scm_uniform_element_size (scmptr
)),
1680 case scm_tc7_string
:
1681 scm_gc_free (SCM_STRING_CHARS (scmptr
),
1682 SCM_STRING_LENGTH (scmptr
) + 1, "string");
1684 case scm_tc7_symbol
:
1685 scm_gc_free (SCM_SYMBOL_CHARS (scmptr
),
1686 SCM_SYMBOL_LENGTH (scmptr
) + 1, "symbol");
1688 case scm_tc7_variable
:
1691 /* the various "subrs" (primitives) are never freed */
1694 if SCM_OPENP (scmptr
)
1696 int k
= SCM_PTOBNUM (scmptr
);
1698 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1699 if (!(k
< scm_numptob
))
1700 SCM_MISC_ERROR ("undefined port type", SCM_EOL
);
1702 /* Keep "revealed" ports alive. */
1703 if (scm_revealed_count (scmptr
) > 0)
1705 /* Yes, I really do mean scm_ptobs[k].free */
1706 /* rather than ftobs[k].close. .close */
1707 /* is for explicit CLOSE-PORT by user */
1708 mm
= scm_ptobs
[k
].free (scmptr
);
1712 #if SCM_ENABLE_DEPRECATED == 1
1713 scm_c_issue_deprecation_warning
1714 ("Returning non-0 from a port free function is "
1715 "deprecated. Use scm_gc_free et al instead.");
1716 scm_c_issue_deprecation_warning_fmt
1717 ("(You just returned non-0 while freeing a %s.)",
1725 SCM_SETSTREAM (scmptr
, 0);
1726 scm_remove_from_port_table (scmptr
);
1727 scm_gc_ports_collected
++;
1728 SCM_CLR_PORT_OPEN_FLAG (scmptr
);
1732 switch SCM_TYP16 (scmptr
)
1734 case scm_tc_free_cell
:
1739 scm_gc_free (SCM_BDIGITS (scmptr
),
1740 ((SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
1741 / SCM_CHAR_BIT
)), "bignum");
1743 #endif /* def SCM_BIGDIG */
1744 case scm_tc16_complex
:
1745 scm_gc_free (SCM_COMPLEX_MEM (scmptr
), 2*sizeof (double),
1751 k
= SCM_SMOBNUM (scmptr
);
1752 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1753 if (!(k
< scm_numsmob
))
1754 SCM_MISC_ERROR ("undefined smob type", SCM_EOL
);
1756 if (scm_smobs
[k
].free
)
1759 mm
= scm_smobs
[k
].free (scmptr
);
1762 #if SCM_ENABLE_DEPRECATED == 1
1763 scm_c_issue_deprecation_warning
1764 ("Returning non-0 from a smob free function is "
1765 "deprecated. Use scm_gc_free et al instead.");
1766 scm_c_issue_deprecation_warning_fmt
1767 ("(You just returned non-0 while freeing a %s.)",
1780 SCM_MISC_ERROR ("unknown type", SCM_EOL
);
1783 if (!--left_to_collect
)
1785 SCM_SET_CELL_WORD_0 (scmptr
, nfreelist
);
1786 *freelist
->clustertail
= scmptr
;
1787 freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1789 nfreelist
= SCM_EOL
;
1790 freelist
->collected
+= span
* freelist
->cluster_size
;
1791 left_to_collect
= freelist
->cluster_size
;
1795 /* Stick the new cell on the front of nfreelist. It's
1796 critical that we mark this cell as freed; otherwise, the
1797 conservative collector might trace it as some other type
1799 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1800 SCM_SET_FREE_CELL_CDR (scmptr
, nfreelist
);
1805 #ifdef GC_FREE_SEGMENTS
1810 freelist
->heap_size
-= seg_size
;
1811 free ((char *) scm_heap_table
[i
].bounds
[0]);
1812 scm_heap_table
[i
].bounds
[0] = 0;
1813 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1814 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1815 scm_n_heap_segs
-= 1;
1816 i
--; /* We need to scan the segment just moved. */
1819 #endif /* ifdef GC_FREE_SEGMENTS */
1821 /* Update the real freelist pointer to point to the head of
1822 the list of free cells we've built for this segment. */
1823 freelist
->cells
= nfreelist
;
1824 freelist
->left_to_collect
= left_to_collect
;
1827 #ifdef GUILE_DEBUG_FREELIST
1828 scm_map_free_list ();
1832 gc_sweep_freelist_finish (&scm_master_freelist
);
1833 gc_sweep_freelist_finish (&scm_master_freelist2
);
1835 /* When we move to POSIX threads private freelists should probably
1836 be GC-protected instead. */
1837 scm_freelist
= SCM_EOL
;
1838 scm_freelist2
= SCM_EOL
;
1840 scm_cells_allocated
= (SCM_HEAP_SIZE
- scm_gc_cells_collected
);
1841 scm_gc_yield
-= scm_cells_allocated
;
1843 if (scm_mallocated
< m
)
1845 /* The byte count of allocated objects has underflowed. This is
1846 probably because you forgot to report the sizes of objects you
1847 have allocated, by calling scm_done_malloc or some such. When
1848 the GC freed them, it subtracted their size from
1849 scm_mallocated, which underflowed. */
1851 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
1852 "This is probably because the GC hasn't been correctly informed\n"
1853 "about object sizes\n");
1857 scm_mallocated
-= m
;
1858 scm_gc_malloc_collected
= m
;
1864 /* Function for non-cell memory management.
1868 scm_malloc (size_t size
)
1875 SCM_SYSCALL (ptr
= malloc (size
));
1880 SCM_SYSCALL (ptr
= malloc (size
));
1884 scm_memory_error ("malloc");
1888 scm_realloc (void *mem
, size_t size
)
1892 SCM_SYSCALL (ptr
= realloc (mem
, size
));
1896 scm_igc ("realloc");
1897 SCM_SYSCALL (ptr
= realloc (mem
, size
));
1901 scm_memory_error ("realloc");
1905 scm_strndup (const char *str
, size_t n
)
1907 char *dst
= scm_malloc (n
+1);
1908 memcpy (dst
, str
, n
);
1914 scm_strdup (const char *str
)
1916 return scm_strndup (str
, strlen (str
));
1920 scm_gc_register_collectable_memory (void *mem
, size_t size
, const char *what
)
1922 scm_mallocated
+= size
;
1924 if (scm_mallocated
> scm_mtrigger
)
1927 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
1929 if (scm_mallocated
> scm_mtrigger
)
1930 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
1932 scm_mtrigger
+= scm_mtrigger
/ 2;
1936 #ifdef GUILE_DEBUG_MALLOC
1938 scm_malloc_register (mem
, what
);
1943 scm_gc_unregister_collectable_memory (void *mem
, size_t size
, const char *what
)
1945 scm_mallocated
-= size
;
1947 #ifdef GUILE_DEBUG_MALLOC
1949 scm_malloc_unregister (mem
);
1954 scm_gc_malloc (size_t size
, const char *what
)
1956 /* XXX - The straightforward implementation below has the problem
1957 that it might call the GC twice, once in scm_malloc and then
1958 again in scm_gc_register_collectable_memory. We don't really
1959 want the second GC since it will not find new garbage.
1962 void *ptr
= scm_malloc (size
);
1963 scm_gc_register_collectable_memory (ptr
, size
, what
);
1968 scm_gc_realloc (void *mem
, size_t old_size
, size_t new_size
, const char *what
)
1970 /* XXX - see scm_gc_malloc. */
1972 void *ptr
= scm_realloc (mem
, new_size
);
1973 scm_gc_unregister_collectable_memory (mem
, old_size
, what
);
1974 scm_gc_register_collectable_memory (ptr
, new_size
, what
);
1979 scm_gc_free (void *mem
, size_t size
, const char *what
)
1981 scm_gc_unregister_collectable_memory (mem
, size
, what
);
1986 scm_gc_strndup (const char *str
, size_t n
, const char *what
)
1988 char *dst
= scm_gc_malloc (n
+1, what
);
1989 memcpy (dst
, str
, n
);
1995 scm_gc_strdup (const char *str
, const char *what
)
1997 return scm_gc_strndup (str
, strlen (str
), what
);
2000 #if SCM_ENABLE_DEPRECATED == 1
2002 /* {Deprecated front end to malloc}
2004 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
2007 * These functions provide services comparable to malloc, realloc, and
2008 * free. They should be used when allocating memory that will be under
2009 * control of the garbage collector, i.e., if the memory may be freed
2010 * during garbage collection.
2012 * They are deprecated because they weren't really used the way
2013 * outlined above, and making sure to return the right amount from
2014 * smob free routines was sometimes difficult when dealing with nested
2015 * data structures. We basically want everybody to review their code
2016 * and use the more symmetrical scm_gc_malloc/scm_gc_free functions
2017 * instead. In some cases, where scm_must_malloc has been used
2018 * incorrectly (i.e. for non-GC-able memory), use scm_malloc/free.
2022 scm_must_malloc (size_t size
, const char *what
)
2024 scm_c_issue_deprecation_warning
2025 ("scm_must_malloc is deprecated. "
2026 "Use scm_gc_malloc and scm_gc_free instead.");
2028 return scm_gc_malloc (size
, what
);
2032 scm_must_realloc (void *where
,
2037 scm_c_issue_deprecation_warning
2038 ("scm_must_realloc is deprecated. "
2039 "Use scm_gc_realloc and scm_gc_free instead.");
2041 return scm_gc_realloc (where
, old_size
, size
, what
);
2045 scm_must_strndup (const char *str
, size_t length
)
2047 scm_c_issue_deprecation_warning
2048 ("scm_must_strndup is deprecated. "
2049 "Use scm_gc_strndup and scm_gc_free instead.");
2051 return scm_gc_strndup (str
, length
, "string");
2055 scm_must_strdup (const char *str
)
2057 scm_c_issue_deprecation_warning
2058 ("scm_must_strdup is deprecated. "
2059 "Use scm_gc_strdup and scm_gc_free instead.");
2061 return scm_gc_strdup (str
, "string");
2065 scm_must_free (void *obj
)
2066 #define FUNC_NAME "scm_must_free"
2068 scm_c_issue_deprecation_warning
2069 ("scm_must_free is deprecated. "
2070 "Use scm_gc_malloc and scm_gc_free instead.");
2072 #ifdef GUILE_DEBUG_MALLOC
2073 scm_malloc_unregister (obj
);
2078 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL
);
2084 scm_done_malloc (long size
)
2086 scm_c_issue_deprecation_warning
2087 ("scm_done_malloc is deprecated. "
2088 "Use scm_gc_register_collectable_memory instead.");
2090 scm_gc_register_collectable_memory (NULL
, size
, "foreign mallocs");
2094 scm_done_free (long size
)
2096 scm_c_issue_deprecation_warning
2097 ("scm_done_free is deprecated. "
2098 "Use scm_gc_unregister_collectable_memory instead.");
2100 scm_gc_unregister_collectable_memory (NULL
, size
, "foreign mallocs");
2103 #endif /* SCM_ENABLE_DEPRECATED == 1 */
2108 * Each heap segment is an array of objects of a particular size.
2109 * Every segment has an associated (possibly shared) freelist.
2110 * A table of segment records is kept that records the upper and
2111 * lower extents of the segment; this is used during the conservative
2112 * phase of gc to identify probably gc roots (because they point
2113 * into valid segments at reasonable offsets). */
2116 * is true if the first segment was smaller than INIT_HEAP_SEG.
2117 * If scm_expmem is set to one, subsequent segment allocations will
2118 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2122 size_t scm_max_segment_size
;
2125 * is the lowest base address of any heap segment.
2127 SCM_CELLPTR scm_heap_org
;
2129 scm_t_heap_seg_data
* scm_heap_table
= 0;
2130 static size_t heap_segment_table_size
= 0;
2131 size_t scm_n_heap_segs
= 0;
2134 * initializes a new heap segment and returns the number of objects it contains.
2136 * The segment origin and segment size in bytes are input parameters.
2137 * The freelist is both input and output.
2139 * This function presumes that the scm_heap_table has already been expanded
2140 * to accomodate a new segment record and that the markbit space was reserved
2141 * for all the cards in this segment.
2144 #define INIT_CARD(card, span) \
2146 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2148 SCM_GC_SET_CARD_DOUBLECELL (card); \
2152 init_heap_seg (SCM_CELLPTR seg_org
, size_t size
, scm_t_freelist
*freelist
)
2154 register SCM_CELLPTR ptr
;
2155 SCM_CELLPTR seg_end
;
2156 size_t new_seg_index
;
2157 ptrdiff_t n_new_cells
;
2158 int span
= freelist
->span
;
2160 if (seg_org
== NULL
)
2163 /* Align the begin ptr up.
2165 ptr
= SCM_GC_CARD_UP (seg_org
);
2167 /* Compute the ceiling on valid object pointers w/in this segment.
2169 seg_end
= SCM_GC_CARD_DOWN ((char *)seg_org
+ size
);
2171 /* Find the right place and insert the segment record.
2174 while (new_seg_index
< scm_n_heap_segs
2175 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
))
2180 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
2181 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
2186 scm_heap_table
[new_seg_index
].span
= span
;
2187 scm_heap_table
[new_seg_index
].freelist
= freelist
;
2188 scm_heap_table
[new_seg_index
].bounds
[0] = ptr
;
2189 scm_heap_table
[new_seg_index
].bounds
[1] = seg_end
;
2192 n_new_cells
= seg_end
- ptr
;
2194 freelist
->heap_size
+= n_new_cells
;
2196 /* Partition objects in this segment into clusters */
2199 SCM
*clusterp
= &clusters
;
2201 NEXT_DATA_CELL (ptr
, span
);
2202 while (ptr
< seg_end
)
2204 scm_t_cell
*nxt
= ptr
;
2205 scm_t_cell
*prv
= NULL
;
2206 scm_t_cell
*last_card
= NULL
;
2207 int n_data_cells
= (SCM_GC_CARD_N_DATA_CELLS
/ span
) * SCM_CARDS_PER_CLUSTER
- 1;
2208 NEXT_DATA_CELL(nxt
, span
);
2210 /* Allocate cluster spine
2212 *clusterp
= PTR2SCM (ptr
);
2213 SCM_SETCAR (*clusterp
, PTR2SCM (nxt
));
2214 clusterp
= SCM_CDRLOC (*clusterp
);
2217 while (n_data_cells
--)
2219 scm_t_cell
*card
= SCM_GC_CELL_CARD (ptr
);
2220 SCM scmptr
= PTR2SCM (ptr
);
2222 NEXT_DATA_CELL (nxt
, span
);
2225 if (card
!= last_card
)
2227 INIT_CARD (card
, span
);
2231 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
2232 SCM_SET_FREE_CELL_CDR (scmptr
, PTR2SCM (nxt
));
2237 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv
), SCM_EOL
);
2242 scm_t_cell
*ref
= seg_end
;
2243 NEXT_DATA_CELL (ref
, span
);
2245 /* [cmm] looks like the segment size doesn't divide cleanly by
2246 cluster size. bad cmm! */
2250 /* Patch up the last cluster pointer in the segment
2251 * to join it to the input freelist.
2253 *clusterp
= freelist
->clusters
;
2254 freelist
->clusters
= clusters
;
2258 fprintf (stderr
, "H");
2264 round_to_cluster_size (scm_t_freelist
*freelist
, size_t len
)
2266 size_t cluster_size_in_bytes
= CLUSTER_SIZE_IN_BYTES (freelist
);
2269 (len
+ cluster_size_in_bytes
- 1) / cluster_size_in_bytes
* cluster_size_in_bytes
2270 + ALIGNMENT_SLACK (freelist
);
2274 alloc_some_heap (scm_t_freelist
*freelist
, policy_on_error error_policy
)
2275 #define FUNC_NAME "alloc_some_heap"
2280 if (scm_gc_heap_lock
)
2282 /* Critical code sections (such as the garbage collector) aren't
2283 * supposed to add heap segments.
2285 fprintf (stderr
, "alloc_some_heap: Can not extend locked heap.\n");
2289 if (scm_n_heap_segs
== heap_segment_table_size
)
2291 /* We have to expand the heap segment table to have room for the new
2292 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2293 * init_heap_seg only if the allocation of the segment itself succeeds.
2295 size_t new_table_size
= scm_n_heap_segs
+ 1;
2296 size_t size
= new_table_size
* sizeof (scm_t_heap_seg_data
);
2297 scm_t_heap_seg_data
*new_heap_table
;
2299 SCM_SYSCALL (new_heap_table
= ((scm_t_heap_seg_data
*)
2300 realloc ((char *)scm_heap_table
, size
)));
2301 if (!new_heap_table
)
2303 if (error_policy
== abort_on_error
)
2305 fprintf (stderr
, "alloc_some_heap: Could not grow heap segment table.\n");
2315 scm_heap_table
= new_heap_table
;
2316 heap_segment_table_size
= new_table_size
;
2320 /* Pick a size for the new heap segment.
2321 * The rule for picking the size of a segment is explained in
2325 /* Assure that the new segment is predicted to be large enough.
2327 * New yield should at least equal GC fraction of new heap size, i.e.
2329 * y + dh > f * (h + dh)
2332 * f : min yield fraction
2334 * dh : size of new heap segment
2336 * This gives dh > (f * h - y) / (1 - f)
2338 int f
= freelist
->min_yield_fraction
;
2339 unsigned long h
= SCM_HEAP_SIZE
;
2340 size_t min_cells
= (f
* h
- 100 * (long) scm_gc_yield
) / (99 - f
);
2341 len
= SCM_EXPHEAP (freelist
->heap_size
);
2343 fprintf (stderr
, "(%ld < %ld)", (long) len
, (long) min_cells
);
2345 if (len
< min_cells
)
2346 len
= min_cells
+ freelist
->cluster_size
;
2347 len
*= sizeof (scm_t_cell
);
2348 /* force new sampling */
2349 freelist
->collected
= LONG_MAX
;
2352 if (len
> scm_max_segment_size
)
2353 len
= scm_max_segment_size
;
2358 smallest
= CLUSTER_SIZE_IN_BYTES (freelist
);
2363 /* Allocate with decaying ambition. */
2364 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2365 && (len
>= smallest
))
2367 size_t rounded_len
= round_to_cluster_size (freelist
, len
);
2368 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (rounded_len
));
2371 init_heap_seg (ptr
, rounded_len
, freelist
);
2378 if (error_policy
== abort_on_error
)
2380 fprintf (stderr
, "alloc_some_heap: Could not grow heap.\n");
2387 /* {GC Protection Helper Functions}
2392 * If within a function you need to protect one or more scheme objects from
2393 * garbage collection, pass them as parameters to one of the
2394 * scm_remember_upto_here* functions below. These functions don't do
2395 * anything, but since the compiler does not know that they are actually
2396 * no-ops, it will generate code that calls these functions with the given
2397 * parameters. Therefore, you can be sure that the compiler will keep those
2398 * scheme values alive (on the stack or in a register) up to the point where
2399 * scm_remember_upto_here* is called. In other words, place the call to
2400 * scm_remember_upto_here* _behind_ the last code in your function, that
2401 * depends on the scheme object to exist.
2403 * Example: We want to make sure that the string object str does not get
2404 * garbage collected during the execution of 'some_function' in the code
2405 * below, because otherwise the characters belonging to str would be freed and
2406 * 'some_function' might access freed memory. To make sure that the compiler
2407 * keeps str alive on the stack or in a register such that it is visible to
2408 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2409 * call to 'some_function'. Note that this would not be necessary if str was
2410 * used anyway after the call to 'some_function'.
2411 * char *chars = SCM_STRING_CHARS (str);
2412 * some_function (chars);
2413 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2417 scm_remember_upto_here_1 (SCM obj SCM_UNUSED
)
2419 /* Empty. Protects a single object from garbage collection. */
2423 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED
, SCM obj2 SCM_UNUSED
)
2425 /* Empty. Protects two objects from garbage collection. */
2429 scm_remember_upto_here (SCM obj SCM_UNUSED
, ...)
2431 /* Empty. Protects any number of objects from garbage collection. */
2435 These crazy functions prevent garbage collection
2436 of arguments after the first argument by
2437 ensuring they remain live throughout the
2438 function because they are used in the last
2439 line of the code block.
2440 It'd be better to have a nice compiler hint to
2441 aid the conservative stack-scanning GC. --03/09/00 gjb */
2443 scm_return_first (SCM elt
, ...)
2449 scm_return_first_int (int i
, ...)
2456 scm_permanent_object (SCM obj
)
2459 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2465 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2466 other references are dropped, until the object is unprotected by calling
2467 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
2468 i. e. it is possible to protect the same object several times, but it is
2469 necessary to unprotect the object the same number of times to actually get
2470 the object unprotected. It is an error to unprotect an object more often
2471 than it has been protected before. The function scm_protect_object returns
2475 /* Implementation note: For every object X, there is a counter which
2476 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
2480 scm_gc_protect_object (SCM obj
)
2484 /* This critical section barrier will be replaced by a mutex. */
2487 handle
= scm_hashq_create_handle_x (scm_protects
, obj
, SCM_MAKINUM (0));
2488 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), SCM_MAKINUM (1)));
2496 /* Remove any protection for OBJ established by a prior call to
2497 scm_protect_object. This function returns OBJ.
2499 See scm_protect_object for more information. */
2501 scm_gc_unprotect_object (SCM obj
)
2505 /* This critical section barrier will be replaced by a mutex. */
2508 handle
= scm_hashq_get_handle (scm_protects
, obj
);
2510 if (SCM_FALSEP (handle
))
2512 fprintf (stderr
, "scm_unprotect_object called on unprotected object\n");
2517 SCM count
= scm_difference (SCM_CDR (handle
), SCM_MAKINUM (1));
2518 if (SCM_EQ_P (count
, SCM_MAKINUM (0)))
2519 scm_hashq_remove_x (scm_protects
, obj
);
2521 SCM_SETCDR (handle
, count
);
2530 scm_gc_register_root (SCM
*p
)
2533 SCM key
= scm_long2num ((long) p
);
2535 /* This critical section barrier will be replaced by a mutex. */
2538 handle
= scm_hashv_create_handle_x (scm_gc_registered_roots
, key
, SCM_MAKINUM (0));
2539 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), SCM_MAKINUM (1)));
2545 scm_gc_unregister_root (SCM
*p
)
2548 SCM key
= scm_long2num ((long) p
);
2550 /* This critical section barrier will be replaced by a mutex. */
2553 handle
= scm_hashv_get_handle (scm_gc_registered_roots
, key
);
2555 if (SCM_FALSEP (handle
))
2557 fprintf (stderr
, "scm_gc_unregister_root called on unregistered root\n");
2562 SCM count
= scm_difference (SCM_CDR (handle
), SCM_MAKINUM (1));
2563 if (SCM_EQ_P (count
, SCM_MAKINUM (0)))
2564 scm_hashv_remove_x (scm_gc_registered_roots
, key
);
2566 SCM_SETCDR (handle
, count
);
2573 scm_gc_register_roots (SCM
*b
, unsigned long n
)
2576 for (; p
< b
+ n
; ++p
)
2577 scm_gc_register_root (p
);
2581 scm_gc_unregister_roots (SCM
*b
, unsigned long n
)
2584 for (; p
< b
+ n
; ++p
)
2585 scm_gc_unregister_root (p
);
2588 int scm_i_terminating
;
2590 /* called on process termination. */
2596 extern int on_exit (void (*procp
) (), int arg
);
2599 cleanup (int status
, void *arg
)
2601 #error Dont know how to setup a cleanup handler on your system.
2605 scm_i_terminating
= 1;
2606 scm_flush_all_ports ();
2611 make_initial_segment (size_t init_heap_size
, scm_t_freelist
*freelist
)
2613 size_t rounded_size
= round_to_cluster_size (freelist
, init_heap_size
);
2615 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2619 rounded_size
= round_to_cluster_size (freelist
, SCM_HEAP_SEG_SIZE
);
2620 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2628 if (freelist
->min_yield_fraction
)
2629 freelist
->min_yield
= (freelist
->heap_size
* freelist
->min_yield_fraction
2631 freelist
->grow_heap_p
= (freelist
->heap_size
< freelist
->min_yield
);
2638 init_freelist (scm_t_freelist
*freelist
,
2643 freelist
->clusters
= SCM_EOL
;
2644 freelist
->cluster_size
= cluster_size
+ 1;
2645 freelist
->left_to_collect
= 0;
2646 freelist
->clusters_allocated
= 0;
2647 freelist
->min_yield
= 0;
2648 freelist
->min_yield_fraction
= min_yield
;
2649 freelist
->span
= span
;
2650 freelist
->collected
= 0;
2651 freelist
->collected_1
= 0;
2652 freelist
->heap_size
= 0;
2656 /* Get an integer from an environment variable. */
2658 scm_i_getenv_int (const char *var
, int def
)
2660 char *end
, *val
= getenv (var
);
2664 res
= strtol (val
, &end
, 10);
2674 unsigned long gc_trigger_1
;
2675 unsigned long gc_trigger_2
;
2676 size_t init_heap_size_1
;
2677 size_t init_heap_size_2
;
2680 j
= SCM_NUM_PROTECTS
;
2682 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2685 scm_freelist
= SCM_EOL
;
2686 scm_freelist2
= SCM_EOL
;
2687 gc_trigger_1
= scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1
);
2688 init_freelist (&scm_master_freelist
, 1, SCM_CLUSTER_SIZE_1
, gc_trigger_1
);
2689 gc_trigger_2
= scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2
);
2690 init_freelist (&scm_master_freelist2
, 2, SCM_CLUSTER_SIZE_2
, gc_trigger_2
);
2691 scm_max_segment_size
= scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size
);
2695 j
= SCM_HEAP_SEG_SIZE
;
2696 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2697 scm_heap_table
= ((scm_t_heap_seg_data
*)
2698 scm_malloc (sizeof (scm_t_heap_seg_data
) * 2));
2699 heap_segment_table_size
= 2;
2701 mark_space_ptr
= &mark_space_head
;
2703 init_heap_size_1
= scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1
);
2704 init_heap_size_2
= scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2
);
2705 if (make_initial_segment (init_heap_size_1
, &scm_master_freelist
) ||
2706 make_initial_segment (init_heap_size_2
, &scm_master_freelist2
))
2709 /* scm_hplims[0] can change. do not remove scm_heap_org */
2710 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0], 1);
2712 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2713 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2714 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2715 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2716 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2718 /* Initialise the list of ports. */
2719 scm_port_table
= (scm_t_port
**)
2720 malloc (sizeof (scm_t_port
*) * scm_port_table_room
);
2721 if (!scm_port_table
)
2728 on_exit (cleanup
, 0);
2732 scm_stand_in_procs
= SCM_EOL
;
2733 scm_permobjs
= SCM_EOL
;
2734 scm_protects
= scm_c_make_hash_table (31);
2735 scm_gc_registered_roots
= scm_c_make_hash_table (31);
2742 SCM scm_after_gc_hook
;
2744 static SCM gc_async
;
2746 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2747 * is run after the gc, as soon as the asynchronous events are handled by the
2751 gc_async_thunk (void)
2753 scm_c_run_hook (scm_after_gc_hook
, SCM_EOL
);
2754 return SCM_UNSPECIFIED
;
2758 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2759 * the garbage collection. The only purpose of this function is to mark the
2760 * gc_async (which will eventually lead to the execution of the
2764 mark_gc_async (void * hook_data SCM_UNUSED
,
2765 void *func_data SCM_UNUSED
,
2766 void *data SCM_UNUSED
)
2768 /* If cell access debugging is enabled, the user may choose to perform
2769 * additional garbage collections after an arbitrary number of cell
2770 * accesses. We don't want the scheme level after-gc-hook to be performed
2771 * for each of these garbage collections for the following reason: The
2772 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2773 * after-gc-hook was performed with every gc, and if the gc was performed
2774 * after a very small number of cell accesses, then the number of cell
2775 * accesses during the execution of the after-gc-hook will suffice to cause
2776 * the execution of the next gc. Then, guile would keep executing the
2777 * after-gc-hook over and over again, and would never come to do other
2780 * To overcome this problem, if cell access debugging with additional
2781 * garbage collections is enabled, the after-gc-hook is never run by the
2782 * garbage collecter. When running guile with cell access debugging and the
2783 * execution of the after-gc-hook is desired, then it is necessary to run
2784 * the hook explicitly from the user code. This has the effect, that from
2785 * the scheme level point of view it seems that garbage collection is
2786 * performed with a much lower frequency than it actually is. Obviously,
2787 * this will not work for code that depends on a fixed one to one
2788 * relationship between the execution counts of the C level garbage
2789 * collection hooks and the execution count of the scheme level
2792 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2793 if (debug_cells_gc_interval
== 0)
2794 scm_system_async_mark (gc_async
);
2796 scm_system_async_mark (gc_async
);
2802 #if SCM_ENABLE_DEPRECATED == 1
2804 /* If an allocated cell is detected during garbage collection, this
2805 * means that some code has just obtained the object but was preempted
2806 * before the initialization of the object was completed. This meanst
2807 * that some entries of the allocated cell may already contain SCM
2808 * objects. Therefore, allocated cells are scanned conservatively.
2811 scm_t_bits scm_tc16_allocated
;
2814 allocated_mark (SCM cell
)
2816 unsigned long int cell_segment
= heap_segment (cell
);
2817 unsigned int span
= scm_heap_table
[cell_segment
].span
;
2820 for (i
= 1; i
!= span
* 2; ++i
)
2822 SCM obj
= SCM_CELL_OBJECT (cell
, i
);
2823 long int obj_segment
= heap_segment (obj
);
2824 if (obj_segment
>= 0)
2831 scm_deprecated_newcell (void)
2833 scm_c_issue_deprecation_warning
2834 ("SCM_NEWCELL is deprecated. Use `scm_cell' instead.\n");
2836 return scm_cell (scm_tc16_allocated
, 0);
2840 scm_deprecated_newcell2 (void)
2842 scm_c_issue_deprecation_warning
2843 ("SCM_NEWCELL2 is deprecated. Use `scm_double_cell' instead.\n");
2845 return scm_double_cell (scm_tc16_allocated
, 0, 0, 0);
2848 #endif /* SCM_ENABLE_DEPRECATED == 1 */
2855 #if SCM_ENABLE_DEPRECATED == 1
2856 scm_tc16_allocated
= scm_make_smob_type ("allocated cell", 0);
2857 scm_set_smob_mark (scm_tc16_allocated
, allocated_mark
);
2860 scm_after_gc_hook
= scm_permanent_object (scm_make_hook (SCM_INUM0
));
2861 scm_c_define ("after-gc-hook", scm_after_gc_hook
);
2863 after_gc_thunk
= scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0
,
2865 gc_async
= scm_system_async (after_gc_thunk
); /* protected via scm_asyncs */
2867 scm_c_hook_add (&scm_after_gc_c_hook
, mark_gc_async
, NULL
, 0);
2869 #ifndef SCM_MAGIC_SNARFER
2870 #include "libguile/gc.x"
2874 #endif /*MARK_DEPENDENCIES*/