1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
45 /* #define DEBUGINFO */
47 /* SECTION: This code is compiled once.
50 #ifndef MARK_DEPENDENCIES
55 #include "libguile/_scm.h"
56 #include "libguile/eval.h"
57 #include "libguile/stime.h"
58 #include "libguile/stackchk.h"
59 #include "libguile/struct.h"
60 #include "libguile/smob.h"
61 #include "libguile/unif.h"
62 #include "libguile/async.h"
63 #include "libguile/ports.h"
64 #include "libguile/root.h"
65 #include "libguile/strings.h"
66 #include "libguile/vectors.h"
67 #include "libguile/weaks.h"
68 #include "libguile/hashtab.h"
69 #include "libguile/tags.h"
71 #include "libguile/validate.h"
72 #include "libguile/gc.h"
74 #ifdef GUILE_DEBUG_MALLOC
75 #include "libguile/debug-malloc.h"
88 #define var_start(x, y) va_start(x, y)
91 #define var_start(x, y) va_start(x)
96 unsigned int scm_gc_running_p
= 0;
100 #if (SCM_DEBUG_CELL_ACCESSES == 1)
102 unsigned int scm_debug_cell_accesses_p
= 0;
105 /* Assert that the given object is a valid reference to a valid cell. This
106 * test involves to determine whether the object is a cell pointer, whether
107 * this pointer actually points into a heap segment and whether the cell
108 * pointed to is not a free cell.
111 scm_assert_cell_valid (SCM cell
)
113 if (scm_debug_cell_accesses_p
)
115 scm_debug_cell_accesses_p
= 0; /* disable to avoid recursion */
117 if (!scm_cellp (cell
))
119 fprintf (stderr
, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell
));
122 else if (!scm_gc_running_p
)
124 /* Dirk::FIXME:: During garbage collection there occur references to
125 free cells. This is allright during conservative marking, but
126 should not happen otherwise (I think). The case of free cells
127 accessed during conservative marking is handled in function
128 scm_mark_locations. However, there still occur accesses to free
129 cells during gc. I don't understand why this happens. If it is
130 a bug and gets fixed, the following test should also work while
133 if (SCM_FREE_CELL_P (cell
))
135 fprintf (stderr
, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell
));
139 scm_debug_cell_accesses_p
= 1; /* re-enable */
144 SCM_DEFINE (scm_set_debug_cell_accesses_x
, "set-debug-cell-accesses!", 1, 0, 0,
146 "If FLAG is #f, cell access checking is disabled.\n"
147 "If FLAG is #t, cell access checking is enabled.\n"
148 "This procedure only exists because the compile-time flag\n"
149 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
150 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
152 if (SCM_FALSEP (flag
)) {
153 scm_debug_cell_accesses_p
= 0;
154 } else if (SCM_EQ_P (flag
, SCM_BOOL_T
)) {
155 scm_debug_cell_accesses_p
= 1;
157 SCM_WRONG_TYPE_ARG (1, flag
);
159 return SCM_UNSPECIFIED
;
163 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
167 /* {heap tuning parameters}
169 * These are parameters for controlling memory allocation. The heap
170 * is the area out of which scm_cons, and object headers are allocated.
172 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
173 * 64 bit machine. The units of the _SIZE parameters are bytes.
174 * Cons pairs and object headers occupy one heap cell.
176 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
177 * allocated initially the heap will grow by half its current size
178 * each subsequent time more heap is needed.
180 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
181 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
182 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
183 * is in scm_init_storage() and alloc_some_heap() in sys.c
185 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
186 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
188 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
191 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
194 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
195 * reclaimed by a GC triggered by must_malloc. If less than this is
196 * reclaimed, the trigger threshold is raised. [I don't know what a
197 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
198 * work around a oscillation that caused almost constant GC.]
202 * Heap size 45000 and 40% min yield gives quick startup and no extra
203 * heap allocation. Having higher values on min yield may lead to
204 * large heaps, especially if code behaviour is varying its
205 * maximum consumption between different freelists.
208 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
209 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
210 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
211 int scm_default_init_heap_size_1
= (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER
- 1)
212 / SCM_CARDS_PER_CLUSTER
) * SCM_GC_CARD_SIZE
);
213 int scm_default_min_yield_1
= 40;
215 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
216 int scm_default_init_heap_size_2
= (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER
- 1)
217 / SCM_CARDS_PER_CLUSTER
) * SCM_GC_CARD_SIZE
);
218 /* The following value may seem large, but note that if we get to GC at
219 * all, this means that we have a numerically intensive application
221 int scm_default_min_yield_2
= 40;
223 int scm_default_max_segment_size
= 2097000L;/* a little less (adm) than 2 Mb */
225 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
227 # define SCM_HEAP_SEG_SIZE 32768L
230 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
232 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
235 /* Make heap grow with factor 1.5 */
236 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
237 #define SCM_INIT_MALLOC_LIMIT 100000
238 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
240 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
241 aligned inner bounds for allocated storage */
244 /*in 386 protected mode we must only adjust the offset */
245 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
246 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
249 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
250 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
252 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
253 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
257 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
259 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
260 #define CLUSTER_SIZE_IN_BYTES(freelist) \
261 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
267 typedef struct scm_freelist_t
{
268 /* collected cells */
270 /* number of cells left to collect before cluster is full */
271 unsigned int left_to_collect
;
272 /* number of clusters which have been allocated */
273 unsigned int clusters_allocated
;
274 /* a list of freelists, each of size cluster_size,
275 * except the last one which may be shorter
279 /* this is the number of objects in each cluster, including the spine cell */
281 /* indicates that we should grow heap instead of GC:ing
284 /* minimum yield on this list in order not to grow the heap
287 /* defines min_yield as percent of total heap size
289 int min_yield_fraction
;
290 /* number of cells per object on this list */
292 /* number of collected cells during last GC */
294 /* number of collected cells during penultimate GC */
296 /* total number of cells in heap segments
297 * belonging to this list.
302 SCM scm_freelist
= SCM_EOL
;
303 scm_freelist_t scm_master_freelist
= {
304 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_1
, 0, 0, 0, 1, 0, 0
306 SCM scm_freelist2
= SCM_EOL
;
307 scm_freelist_t scm_master_freelist2
= {
308 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_2
, 0, 0, 0, 2, 0, 0
312 * is the number of bytes of must_malloc allocation needed to trigger gc.
314 unsigned long scm_mtrigger
;
317 * If set, don't expand the heap. Set only during gc, during which no allocation
318 * is supposed to take place anyway.
320 int scm_gc_heap_lock
= 0;
323 * Don't pause for collection if this is set -- just
326 int scm_block_gc
= 1;
328 /* During collection, this accumulates objects holding
331 SCM scm_weak_vectors
;
333 /* During collection, this accumulates structures which are to be freed.
335 SCM scm_structs_to_free
;
337 /* GC Statistics Keeping
339 unsigned long scm_cells_allocated
= 0;
340 long scm_mallocated
= 0;
341 unsigned long scm_gc_cells_collected
;
342 unsigned long scm_gc_yield
;
343 static unsigned long scm_gc_yield_1
= 0; /* previous GC yield */
344 unsigned long scm_gc_malloc_collected
;
345 unsigned long scm_gc_ports_collected
;
346 unsigned long scm_gc_time_taken
= 0;
347 static unsigned long t_before_gc
;
348 static unsigned long t_before_sweep
;
349 unsigned long scm_gc_mark_time_taken
= 0;
350 unsigned long scm_gc_sweep_time_taken
= 0;
351 unsigned long scm_gc_times
= 0;
352 unsigned long scm_gc_cells_swept
= 0;
353 double scm_gc_cells_marked_acc
= 0.;
354 double scm_gc_cells_swept_acc
= 0.;
356 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
357 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
358 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
359 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
360 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
361 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
362 SCM_SYMBOL (sym_gc_mark_time_taken
, "gc-mark-time-taken");
363 SCM_SYMBOL (sym_gc_sweep_time_taken
, "gc-sweep-time-taken");
364 SCM_SYMBOL (sym_times
, "gc-times");
365 SCM_SYMBOL (sym_cells_marked
, "cells-marked");
366 SCM_SYMBOL (sym_cells_swept
, "cells-swept");
368 typedef struct scm_heap_seg_data_t
370 /* lower and upper bounds of the segment */
371 SCM_CELLPTR bounds
[2];
373 /* address of the head-of-freelist pointer for this segment's cells.
374 All segments usually point to the same one, scm_freelist. */
375 scm_freelist_t
*freelist
;
377 /* number of cells per object in this segment */
379 } scm_heap_seg_data_t
;
383 static scm_sizet
init_heap_seg (SCM_CELLPTR
, scm_sizet
, scm_freelist_t
*);
385 typedef enum { return_on_error
, abort_on_error
} policy_on_error
;
386 static void alloc_some_heap (scm_freelist_t
*, policy_on_error
);
389 #define SCM_HEAP_SIZE \
390 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
391 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
393 #define BVEC_GROW_SIZE 256
394 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
395 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
397 /* mark space allocation */
399 typedef struct scm_mark_space_t
401 scm_c_bvec_limb_t
*bvec_space
;
402 struct scm_mark_space_t
*next
;
405 static scm_mark_space_t
*current_mark_space
;
406 static scm_mark_space_t
**mark_space_ptr
;
407 static int current_mark_space_offset
;
408 static scm_mark_space_t
*mark_space_head
;
410 static scm_c_bvec_limb_t
*
412 #define FUNC_NAME "get_bvec"
414 scm_c_bvec_limb_t
*res
;
416 if (!current_mark_space
)
418 SCM_SYSCALL (current_mark_space
= (scm_mark_space_t
*) malloc (sizeof (scm_mark_space_t
)));
419 if (!current_mark_space
)
420 SCM_MISC_ERROR ("could not grow heap", SCM_EOL
);
422 current_mark_space
->bvec_space
= NULL
;
423 current_mark_space
->next
= NULL
;
425 *mark_space_ptr
= current_mark_space
;
426 mark_space_ptr
= &(current_mark_space
->next
);
431 if (!(current_mark_space
->bvec_space
))
433 SCM_SYSCALL (current_mark_space
->bvec_space
=
434 (scm_c_bvec_limb_t
*) calloc (BVEC_GROW_SIZE_IN_BYTES
, 1));
435 if (!(current_mark_space
->bvec_space
))
436 SCM_MISC_ERROR ("could not grow heap", SCM_EOL
);
438 current_mark_space_offset
= 0;
443 if (current_mark_space_offset
== BVEC_GROW_SIZE_IN_LIMBS
)
445 current_mark_space
= NULL
;
450 res
= current_mark_space
->bvec_space
+ current_mark_space_offset
;
451 current_mark_space_offset
+= SCM_GC_CARD_BVEC_SIZE_IN_LIMBS
;
461 scm_mark_space_t
*ms
;
463 for (ms
= mark_space_head
; ms
; ms
= ms
->next
)
464 memset (ms
->bvec_space
, 0, BVEC_GROW_SIZE_IN_BYTES
);
469 /* Debugging functions. */
471 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
473 /* Return the number of the heap segment containing CELL. */
479 for (i
= 0; i
< scm_n_heap_segs
; i
++)
480 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], SCM2PTR (cell
))
481 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], SCM2PTR (cell
)))
483 fprintf (stderr
, "which_seg: can't find segment containing cell %lx\n",
490 map_free_list (scm_freelist_t
*master
, SCM freelist
)
492 int last_seg
= -1, count
= 0;
495 for (f
= freelist
; !SCM_NULLP (f
); f
= SCM_FREE_CELL_CDR (f
))
497 int this_seg
= which_seg (f
);
499 if (this_seg
!= last_seg
)
502 fprintf (stderr
, " %5d %d-cells in segment %d\n",
503 count
, master
->span
, last_seg
);
510 fprintf (stderr
, " %5d %d-cells in segment %d\n",
511 count
, master
->span
, last_seg
);
514 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
516 "Print debugging information about the free-list.\n"
517 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
518 #define FUNC_NAME s_scm_map_free_list
521 fprintf (stderr
, "%d segments total (%d:%d",
523 scm_heap_table
[0].span
,
524 scm_heap_table
[0].bounds
[1] - scm_heap_table
[0].bounds
[0]);
525 for (i
= 1; i
< scm_n_heap_segs
; i
++)
526 fprintf (stderr
, ", %d:%d",
527 scm_heap_table
[i
].span
,
528 scm_heap_table
[i
].bounds
[1] - scm_heap_table
[i
].bounds
[0]);
529 fprintf (stderr
, ")\n");
530 map_free_list (&scm_master_freelist
, scm_freelist
);
531 map_free_list (&scm_master_freelist2
, scm_freelist2
);
534 return SCM_UNSPECIFIED
;
538 static int last_cluster
;
539 static int last_size
;
542 free_list_length (char *title
, int i
, SCM freelist
)
546 for (ls
= freelist
; !SCM_NULLP (ls
); ls
= SCM_FREE_CELL_CDR (ls
))
547 if (SCM_FREE_CELL_P (ls
))
551 fprintf (stderr
, "bad cell in %s at position %d\n", title
, n
);
558 if (last_cluster
== i
- 1)
559 fprintf (stderr
, "\t%d\n", last_size
);
561 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
564 fprintf (stderr
, "%s %d", title
, i
);
566 fprintf (stderr
, "%s\t%d\n", title
, n
);
574 free_list_lengths (char *title
, scm_freelist_t
*master
, SCM freelist
)
577 int i
= 0, len
, n
= 0;
578 fprintf (stderr
, "%s\n\n", title
);
579 n
+= free_list_length ("free list", -1, freelist
);
580 for (clusters
= master
->clusters
;
581 SCM_NNULLP (clusters
);
582 clusters
= SCM_CDR (clusters
))
584 len
= free_list_length ("cluster", i
++, SCM_CAR (clusters
));
587 if (last_cluster
== i
- 1)
588 fprintf (stderr
, "\t%d\n", last_size
);
590 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
591 fprintf (stderr
, "\ntotal %d objects\n\n", n
);
594 SCM_DEFINE (scm_free_list_length
, "free-list-length", 0, 0, 0,
596 "Print debugging information about the free-list.\n"
597 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
598 #define FUNC_NAME s_scm_free_list_length
600 free_list_lengths ("1-cells", &scm_master_freelist
, scm_freelist
);
601 free_list_lengths ("2-cells", &scm_master_freelist2
, scm_freelist2
);
602 return SCM_UNSPECIFIED
;
608 #ifdef GUILE_DEBUG_FREELIST
610 /* Number of calls to SCM_NEWCELL since startup. */
611 static unsigned long scm_newcell_count
;
612 static unsigned long scm_newcell2_count
;
614 /* Search freelist for anything that isn't marked as a free cell.
615 Abort if we find something. */
617 scm_check_freelist (SCM freelist
)
622 for (f
= freelist
; !SCM_NULLP (f
); f
= SCM_FREE_CELL_CDR (f
), i
++)
623 if (!SCM_FREE_CELL_P (f
))
625 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
626 scm_newcell_count
, i
);
631 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
633 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
634 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
635 "compile-time flag was selected.\n")
636 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
638 /* [cmm] I did a double-take when I read this code the first time.
640 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
641 return SCM_UNSPECIFIED
;
647 scm_debug_newcell (void)
652 if (scm_debug_check_freelist
)
654 scm_check_freelist (scm_freelist
);
658 /* The rest of this is supposed to be identical to the SCM_NEWCELL
660 if (SCM_NULLP (scm_freelist
))
661 new = scm_gc_for_newcell (&scm_master_freelist
, &scm_freelist
);
665 scm_freelist
= SCM_FREE_CELL_CDR (scm_freelist
);
672 scm_debug_newcell2 (void)
676 scm_newcell2_count
++;
677 if (scm_debug_check_freelist
)
679 scm_check_freelist (scm_freelist2
);
683 /* The rest of this is supposed to be identical to the SCM_NEWCELL
685 if (SCM_NULLP (scm_freelist2
))
686 new = scm_gc_for_newcell (&scm_master_freelist2
, &scm_freelist2
);
690 scm_freelist2
= SCM_FREE_CELL_CDR (scm_freelist2
);
696 #endif /* GUILE_DEBUG_FREELIST */
701 master_cells_allocated (scm_freelist_t
*master
)
703 /* the '- 1' below is to ignore the cluster spine cells. */
704 int objects
= master
->clusters_allocated
* (master
->cluster_size
- 1);
705 if (SCM_NULLP (master
->clusters
))
706 objects
-= master
->left_to_collect
;
707 return master
->span
* objects
;
711 freelist_length (SCM freelist
)
714 for (n
= 0; !SCM_NULLP (freelist
); freelist
= SCM_FREE_CELL_CDR (freelist
))
720 compute_cells_allocated ()
722 return (scm_cells_allocated
723 + master_cells_allocated (&scm_master_freelist
)
724 + master_cells_allocated (&scm_master_freelist2
)
725 - scm_master_freelist
.span
* freelist_length (scm_freelist
)
726 - scm_master_freelist2
.span
* freelist_length (scm_freelist2
));
729 /* {Scheme Interface to GC}
732 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
734 "Returns an association list of statistics about Guile's current use of storage. ")
735 #define FUNC_NAME s_scm_gc_stats
740 long int local_scm_mtrigger
;
741 long int local_scm_mallocated
;
742 long int local_scm_heap_size
;
743 long int local_scm_cells_allocated
;
744 long int local_scm_gc_time_taken
;
745 long int local_scm_gc_times
;
746 long int local_scm_gc_mark_time_taken
;
747 long int local_scm_gc_sweep_time_taken
;
748 double local_scm_gc_cells_swept
;
749 double local_scm_gc_cells_marked
;
759 for (i
= scm_n_heap_segs
; i
--; )
760 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
761 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
763 if (scm_n_heap_segs
!= n
)
768 /* Below, we cons to produce the resulting list. We want a snapshot of
769 * the heap situation before consing.
771 local_scm_mtrigger
= scm_mtrigger
;
772 local_scm_mallocated
= scm_mallocated
;
773 local_scm_heap_size
= SCM_HEAP_SIZE
;
774 local_scm_cells_allocated
= compute_cells_allocated ();
775 local_scm_gc_time_taken
= scm_gc_time_taken
;
776 local_scm_gc_mark_time_taken
= scm_gc_mark_time_taken
;
777 local_scm_gc_sweep_time_taken
= scm_gc_sweep_time_taken
;
778 local_scm_gc_times
= scm_gc_times
;
779 local_scm_gc_cells_swept
= scm_gc_cells_swept_acc
;
780 local_scm_gc_cells_marked
= scm_gc_cells_marked_acc
;
782 answer
= scm_listify (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
783 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
784 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
785 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
786 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
787 scm_cons (sym_times
, scm_ulong2num (local_scm_gc_times
)),
788 scm_cons (sym_gc_mark_time_taken
, scm_ulong2num (local_scm_gc_mark_time_taken
)),
789 scm_cons (sym_gc_sweep_time_taken
, scm_ulong2num (local_scm_gc_sweep_time_taken
)),
790 scm_cons (sym_cells_marked
, scm_dbl2big (local_scm_gc_cells_marked
)),
791 scm_cons (sym_cells_swept
, scm_dbl2big (local_scm_gc_cells_swept
)),
792 scm_cons (sym_heap_segments
, heap_segs
),
801 gc_start_stats (const char *what
)
803 t_before_gc
= scm_c_get_internal_run_time ();
804 scm_gc_cells_swept
= 0;
805 scm_gc_cells_collected
= 0;
806 scm_gc_yield_1
= scm_gc_yield
;
807 scm_gc_yield
= (scm_cells_allocated
808 + master_cells_allocated (&scm_master_freelist
)
809 + master_cells_allocated (&scm_master_freelist2
));
810 scm_gc_malloc_collected
= 0;
811 scm_gc_ports_collected
= 0;
818 unsigned long t
= scm_c_get_internal_run_time ();
819 scm_gc_time_taken
+= (t
- t_before_gc
);
820 scm_gc_sweep_time_taken
+= (t
- t_before_sweep
);
823 scm_gc_cells_marked_acc
+= scm_gc_cells_swept
- scm_gc_cells_collected
;
824 scm_gc_cells_swept_acc
+= scm_gc_cells_swept
;
828 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
830 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
831 "returned by this function for @var{obj}")
832 #define FUNC_NAME s_scm_object_address
834 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj
));
839 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
841 "Scans all of SCM objects and reclaims for further use those that are\n"
842 "no longer accessible.")
843 #define FUNC_NAME s_scm_gc
848 return SCM_UNSPECIFIED
;
854 /* {C Interface For When GC is Triggered}
858 adjust_min_yield (scm_freelist_t
*freelist
)
860 /* min yield is adjusted upwards so that next predicted total yield
861 * (allocated cells actually freed by GC) becomes
862 * `min_yield_fraction' of total heap size. Note, however, that
863 * the absolute value of min_yield will correspond to `collected'
864 * on one master (the one which currently is triggering GC).
866 * The reason why we look at total yield instead of cells collected
867 * on one list is that we want to take other freelists into account.
868 * On this freelist, we know that (local) yield = collected cells,
869 * but that's probably not the case on the other lists.
871 * (We might consider computing a better prediction, for example
872 * by computing an average over multiple GC:s.)
874 if (freelist
->min_yield_fraction
)
876 /* Pick largest of last two yields. */
877 int delta
= ((SCM_HEAP_SIZE
* freelist
->min_yield_fraction
/ 100)
878 - (long) SCM_MAX (scm_gc_yield_1
, scm_gc_yield
));
880 fprintf (stderr
, " after GC = %d, delta = %d\n",
885 freelist
->min_yield
+= delta
;
890 /* When we get POSIX threads support, the master will be global and
891 * common while the freelist will be individual for each thread.
895 scm_gc_for_newcell (scm_freelist_t
*master
, SCM
*freelist
)
901 if (SCM_NULLP (master
->clusters
))
903 if (master
->grow_heap_p
|| scm_block_gc
)
905 /* In order to reduce gc frequency, try to allocate a new heap
906 * segment first, even if gc might find some free cells. If we
907 * can't obtain a new heap segment, we will try gc later.
909 master
->grow_heap_p
= 0;
910 alloc_some_heap (master
, return_on_error
);
912 if (SCM_NULLP (master
->clusters
))
914 /* The heap was not grown, either because it wasn't scheduled to
915 * grow, or because there was not enough memory available. In
916 * both cases we have to try gc to get some free cells.
919 fprintf (stderr
, "allocated = %d, ",
921 + master_cells_allocated (&scm_master_freelist
)
922 + master_cells_allocated (&scm_master_freelist2
));
925 adjust_min_yield (master
);
926 if (SCM_NULLP (master
->clusters
))
928 /* gc could not free any cells. Now, we _must_ allocate a
929 * new heap segment, because there is no other possibility
930 * to provide a new cell for the caller.
932 alloc_some_heap (master
, abort_on_error
);
936 cell
= SCM_CAR (master
->clusters
);
937 master
->clusters
= SCM_CDR (master
->clusters
);
938 ++master
->clusters_allocated
;
940 while (SCM_NULLP (cell
));
942 #ifdef GUILE_DEBUG_FREELIST
943 scm_check_freelist (cell
);
947 *freelist
= SCM_FREE_CELL_CDR (cell
);
953 /* This is a support routine which can be used to reserve a cluster
954 * for some special use, such as debugging. It won't be useful until
955 * free cells are preserved between garbage collections.
959 scm_alloc_cluster (scm_freelist_t
*master
)
962 cell
= scm_gc_for_newcell (master
, &freelist
);
963 SCM_SETCDR (cell
, freelist
);
969 scm_c_hook_t scm_before_gc_c_hook
;
970 scm_c_hook_t scm_before_mark_c_hook
;
971 scm_c_hook_t scm_before_sweep_c_hook
;
972 scm_c_hook_t scm_after_sweep_c_hook
;
973 scm_c_hook_t scm_after_gc_c_hook
;
977 scm_igc (const char *what
)
982 scm_c_hook_run (&scm_before_gc_c_hook
, 0);
985 SCM_NULLP (scm_freelist
)
987 : (SCM_NULLP (scm_freelist2
) ? "o" : "m"));
990 /* During the critical section, only the current thread may run. */
991 SCM_THREAD_CRITICAL_SECTION_START
;
994 /* fprintf (stderr, "gc: %s\n", what); */
996 if (!scm_stack_base
|| scm_block_gc
)
1002 gc_start_stats (what
);
1004 if (scm_mallocated
< 0)
1005 /* The byte count of allocated objects has underflowed. This is
1006 probably because you forgot to report the sizes of objects you
1007 have allocated, by calling scm_done_malloc or some such. When
1008 the GC freed them, it subtracted their size from
1009 scm_mallocated, which underflowed. */
1012 if (scm_gc_heap_lock
)
1013 /* We've invoked the collector while a GC is already in progress.
1014 That should never happen. */
1019 /* flush dead entries from the continuation stack */
1024 elts
= SCM_VELTS (scm_continuation_stack
);
1025 bound
= SCM_VECTOR_LENGTH (scm_continuation_stack
);
1026 x
= SCM_INUM (scm_continuation_stack_ptr
);
1029 elts
[x
] = SCM_BOOL_F
;
1034 scm_c_hook_run (&scm_before_mark_c_hook
, 0);
1036 clear_mark_space ();
1040 /* Mark objects on the C stack. */
1041 SCM_FLUSH_REGISTER_WINDOWS
;
1042 /* This assumes that all registers are saved into the jmp_buf */
1043 setjmp (scm_save_regs_gc_mark
);
1044 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
1045 ( (scm_sizet
) (sizeof (SCM_STACKITEM
) - 1 +
1046 sizeof scm_save_regs_gc_mark
)
1047 / sizeof (SCM_STACKITEM
)));
1050 scm_sizet stack_len
= scm_stack_size (scm_stack_base
);
1051 #ifdef SCM_STACK_GROWS_UP
1052 scm_mark_locations (scm_stack_base
, stack_len
);
1054 scm_mark_locations (scm_stack_base
- stack_len
, stack_len
);
1058 #else /* USE_THREADS */
1060 /* Mark every thread's stack and registers */
1061 scm_threads_mark_stacks ();
1063 #endif /* USE_THREADS */
1065 j
= SCM_NUM_PROTECTS
;
1067 scm_gc_mark (scm_sys_protects
[j
]);
1069 /* FIXME: we should have a means to register C functions to be run
1070 * in different phases of GC
1072 scm_mark_subr_table ();
1075 scm_gc_mark (scm_root
->handle
);
1078 t_before_sweep
= scm_c_get_internal_run_time ();
1079 scm_gc_mark_time_taken
+= (t_before_sweep
- t_before_gc
);
1081 scm_c_hook_run (&scm_before_sweep_c_hook
, 0);
1085 scm_c_hook_run (&scm_after_sweep_c_hook
, 0);
1091 SCM_THREAD_CRITICAL_SECTION_END
;
1093 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
1102 #define MARK scm_gc_mark
1103 #define FNAME "scm_gc_mark"
1105 #endif /*!MARK_DEPENDENCIES*/
1107 /* Mark an object precisely.
1111 #define FUNC_NAME FNAME
1116 #ifndef MARK_DEPENDENCIES
1117 # define RECURSE scm_gc_mark
1119 /* go through the usual marking, but not for self-cycles. */
1120 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1124 #ifdef MARK_DEPENDENCIES
1125 goto gc_mark_loop_first_time
;
1134 #ifdef MARK_DEPENDENCIES
1135 if (SCM_EQ_P (ptr
, p
))
1141 gc_mark_loop_first_time
:
1144 if (!SCM_CELLP (ptr
))
1145 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL
);
1147 #if (defined (GUILE_DEBUG_FREELIST))
1149 if (SCM_GC_IN_CARD_HEADERP (SCM2PTR (ptr
)))
1150 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL
);
1154 #ifndef MARK_DEPENDENCIES
1156 if (SCM_GCMARKP (ptr
))
1159 SCM_SETGCMARK (ptr
);
1163 switch (SCM_TYP7 (ptr
))
1165 case scm_tcs_cons_nimcar
:
1166 if (SCM_IMP (SCM_CDR (ptr
)))
1168 ptr
= SCM_CAR (ptr
);
1171 RECURSE (SCM_CAR (ptr
));
1172 ptr
= SCM_CDR (ptr
);
1174 case scm_tcs_cons_imcar
:
1175 ptr
= SCM_CDR (ptr
);
1178 RECURSE (SCM_CELL_OBJECT_2 (ptr
));
1179 ptr
= SCM_CDR (ptr
);
1181 case scm_tcs_cons_gloc
:
1183 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1184 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1185 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1186 * pointer to a struct vtable data region. The fact that these are
1187 * accessed in the same way restricts the possibilites to change the
1188 * data layout of structs or heap cells.
1190 scm_bits_t word0
= SCM_CELL_WORD_0 (ptr
) - scm_tc3_cons_gloc
;
1191 scm_bits_t
* vtable_data
= (scm_bits_t
*) word0
; /* access as struct */
1192 if (vtable_data
[scm_vtable_index_vcell
] != 0)
1195 SCM gloc_car
= SCM_PACK (word0
);
1197 ptr
= SCM_CDR (ptr
);
1202 /* ptr is a struct */
1203 SCM layout
= SCM_PACK (vtable_data
[scm_vtable_index_layout
]);
1204 int len
= SCM_SYMBOL_LENGTH (layout
);
1205 char * fields_desc
= SCM_SYMBOL_CHARS (layout
);
1206 scm_bits_t
* struct_data
= (scm_bits_t
*) SCM_STRUCT_DATA (ptr
);
1208 if (vtable_data
[scm_struct_i_flags
] & SCM_STRUCTF_ENTITY
)
1210 RECURSE (SCM_PACK (struct_data
[scm_struct_i_procedure
]));
1211 RECURSE (SCM_PACK (struct_data
[scm_struct_i_setter
]));
1217 for (x
= 0; x
< len
- 2; x
+= 2, ++struct_data
)
1218 if (fields_desc
[x
] == 'p')
1219 RECURSE (SCM_PACK (*struct_data
));
1220 if (fields_desc
[x
] == 'p')
1222 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
1223 for (x
= *struct_data
++; x
; --x
, ++struct_data
)
1224 RECURSE (SCM_PACK (*struct_data
));
1226 RECURSE (SCM_PACK (*struct_data
));
1230 ptr
= SCM_PACK (vtable_data
[scm_vtable_index_vtable
]);
1235 case scm_tcs_closures
:
1236 if (SCM_IMP (SCM_CDR (ptr
)))
1238 ptr
= SCM_CLOSCAR (ptr
);
1241 RECURSE (SCM_CLOSCAR (ptr
));
1242 ptr
= SCM_CDR (ptr
);
1244 case scm_tc7_vector
:
1245 i
= SCM_VECTOR_LENGTH (ptr
);
1249 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
1250 RECURSE (SCM_VELTS (ptr
)[i
]);
1251 ptr
= SCM_VELTS (ptr
)[0];
1256 unsigned long int i
= SCM_CCLO_LENGTH (ptr
);
1257 unsigned long int j
;
1258 for (j
= 1; j
!= i
; ++j
)
1260 SCM obj
= SCM_CCLO_REF (ptr
, j
);
1264 ptr
= SCM_CCLO_REF (ptr
, 0);
1270 case scm_tc7_byvect
:
1277 #ifdef HAVE_LONG_LONGS
1278 case scm_tc7_llvect
:
1281 case scm_tc7_string
:
1284 case scm_tc7_substring
:
1285 ptr
= SCM_CDR (ptr
);
1289 SCM_WVECT_GC_CHAIN (ptr
) = scm_weak_vectors
;
1290 scm_weak_vectors
= ptr
;
1291 if (SCM_IS_WHVEC_ANY (ptr
))
1298 len
= SCM_VECTOR_LENGTH (ptr
);
1299 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
1300 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
1302 for (x
= 0; x
< len
; ++x
)
1305 alist
= SCM_VELTS (ptr
)[x
];
1307 /* mark everything on the alist except the keys or
1308 * values, according to weak_values and weak_keys. */
1309 while ( SCM_CONSP (alist
)
1310 && !SCM_GCMARKP (alist
)
1311 && SCM_CONSP (SCM_CAR (alist
)))
1316 kvpair
= SCM_CAR (alist
);
1317 next_alist
= SCM_CDR (alist
);
1320 * SCM_SETGCMARK (alist);
1321 * SCM_SETGCMARK (kvpair);
1323 * It may be that either the key or value is protected by
1324 * an escaped reference to part of the spine of this alist.
1325 * If we mark the spine here, and only mark one or neither of the
1326 * key and value, they may never be properly marked.
1327 * This leads to a horrible situation in which an alist containing
1328 * freelist cells is exported.
1330 * So only mark the spines of these arrays last of all marking.
1331 * If somebody confuses us by constructing a weak vector
1332 * with a circular alist then we are hosed, but at least we
1333 * won't prematurely drop table entries.
1336 RECURSE (SCM_CAR (kvpair
));
1338 RECURSE (SCM_CDR (kvpair
));
1341 if (SCM_NIMP (alist
))
1347 case scm_tc7_symbol
:
1348 ptr
= SCM_PROP_SLOTS (ptr
);
1353 i
= SCM_PTOBNUM (ptr
);
1354 if (!(i
< scm_numptob
))
1356 if (SCM_PTAB_ENTRY(ptr
))
1357 RECURSE (SCM_FILENAME (ptr
));
1358 if (scm_ptobs
[i
].mark
)
1360 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1367 switch (SCM_TYP16 (ptr
))
1368 { /* should be faster than going through scm_smobs */
1369 case scm_tc_free_cell
:
1370 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1373 case scm_tc16_complex
:
1376 i
= SCM_SMOBNUM (ptr
);
1377 if (!(i
< scm_numsmob
))
1379 if (scm_smobs
[i
].mark
)
1381 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1390 SCM_MISC_ERROR ("unknown type", SCM_EOL
);
1396 #ifndef MARK_DEPENDENCIES
1401 /* And here we define `scm_gc_mark_dependencies', by including this
1402 * same file in itself.
1404 #define MARK scm_gc_mark_dependencies
1405 #define FNAME "scm_gc_mark_dependencies"
1406 #define MARK_DEPENDENCIES
1408 #undef MARK_DEPENDENCIES
1413 /* Mark a Region Conservatively
1417 scm_mark_locations (SCM_STACKITEM x
[], scm_sizet n
)
1421 for (m
= 0; m
< n
; ++m
)
1423 SCM obj
= * (SCM
*) &x
[m
];
1424 if (SCM_CELLP (obj
))
1426 SCM_CELLPTR ptr
= SCM2PTR (obj
);
1428 int j
= scm_n_heap_segs
- 1;
1429 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1430 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1437 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
1439 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1447 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1451 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1456 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1460 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1467 if (SCM_GC_IN_CARD_HEADERP (ptr
))
1470 if (scm_heap_table
[seg_id
].span
== 1
1471 || DOUBLECELL_ALIGNED_P (obj
))
1482 /* The function scm_cellp determines whether an SCM value can be regarded as a
1483 * pointer to a cell on the heap. Binary search is used in order to determine
1484 * the heap segment that contains the cell.
1487 scm_cellp (SCM value
)
1489 if (SCM_CELLP (value
)) {
1490 scm_cell
* ptr
= SCM2PTR (value
);
1492 unsigned int j
= scm_n_heap_segs
- 1;
1495 int k
= (i
+ j
) / 2;
1496 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
)) {
1498 } else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
)) {
1503 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1504 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
)
1505 && (scm_heap_table
[i
].span
== 1 || DOUBLECELL_ALIGNED_P (value
))
1506 && !SCM_GC_IN_CARD_HEADERP (ptr
)
1517 gc_sweep_freelist_start (scm_freelist_t
*freelist
)
1519 freelist
->cells
= SCM_EOL
;
1520 freelist
->left_to_collect
= freelist
->cluster_size
;
1521 freelist
->clusters_allocated
= 0;
1522 freelist
->clusters
= SCM_EOL
;
1523 freelist
->clustertail
= &freelist
->clusters
;
1524 freelist
->collected_1
= freelist
->collected
;
1525 freelist
->collected
= 0;
1529 gc_sweep_freelist_finish (scm_freelist_t
*freelist
)
1532 *freelist
->clustertail
= freelist
->cells
;
1533 if (!SCM_NULLP (freelist
->cells
))
1535 SCM c
= freelist
->cells
;
1536 SCM_SETCAR (c
, SCM_CDR (c
));
1537 SCM_SETCDR (c
, SCM_EOL
);
1538 freelist
->collected
+=
1539 freelist
->span
* (freelist
->cluster_size
- freelist
->left_to_collect
);
1541 scm_gc_cells_collected
+= freelist
->collected
;
1543 /* Although freelist->min_yield is used to test freelist->collected
1544 * (which is the local GC yield for freelist), it is adjusted so
1545 * that *total* yield is freelist->min_yield_fraction of total heap
1546 * size. This means that a too low yield is compensated by more
1547 * heap on the list which is currently doing most work, which is
1548 * just what we want.
1550 collected
= SCM_MAX (freelist
->collected_1
, freelist
->collected
);
1551 freelist
->grow_heap_p
= (collected
< freelist
->min_yield
);
1554 #define NEXT_DATA_CELL(ptr, span) \
1556 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1557 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1558 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1564 #define FUNC_NAME "scm_gc_sweep"
1566 register SCM_CELLPTR ptr
;
1567 register SCM nfreelist
;
1568 register scm_freelist_t
*freelist
;
1576 gc_sweep_freelist_start (&scm_master_freelist
);
1577 gc_sweep_freelist_start (&scm_master_freelist2
);
1579 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1581 register unsigned int left_to_collect
;
1582 register scm_sizet j
;
1584 /* Unmarked cells go onto the front of the freelist this heap
1585 segment points to. Rather than updating the real freelist
1586 pointer as we go along, we accumulate the new head in
1587 nfreelist. Then, if it turns out that the entire segment is
1588 free, we free (i.e., malloc's free) the whole segment, and
1589 simply don't assign nfreelist back into the real freelist. */
1590 freelist
= scm_heap_table
[i
].freelist
;
1591 nfreelist
= freelist
->cells
;
1592 left_to_collect
= freelist
->left_to_collect
;
1593 span
= scm_heap_table
[i
].span
;
1595 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0], span
);
1596 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1], span
) - ptr
;
1598 /* use only data cells in seg_size */
1599 seg_size
= (seg_size
/ SCM_GC_CARD_N_CELLS
) * (SCM_GC_CARD_N_DATA_CELLS
/ span
) * span
;
1601 scm_gc_cells_swept
+= seg_size
;
1603 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1607 if (SCM_GC_IN_CARD_HEADERP (ptr
))
1613 NEXT_DATA_CELL (nxt
, span
);
1620 scmptr
= PTR2SCM (ptr
);
1622 if (SCM_GCMARKP (scmptr
))
1625 switch SCM_TYP7 (scmptr
)
1627 case scm_tcs_cons_gloc
:
1629 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1630 * struct or a gloc. See the corresponding comment in
1633 scm_bits_t word0
= (SCM_CELL_WORD_0 (scmptr
)
1634 - scm_tc3_cons_gloc
);
1635 /* access as struct */
1636 scm_bits_t
* vtable_data
= (scm_bits_t
*) word0
;
1637 if (vtable_data
[scm_vtable_index_vcell
] == 0)
1639 /* Structs need to be freed in a special order.
1640 * This is handled by GC C hooks in struct.c.
1642 SCM_SET_STRUCT_GC_CHAIN (scmptr
, scm_structs_to_free
);
1643 scm_structs_to_free
= scmptr
;
1646 /* fall through so that scmptr gets collected */
1649 case scm_tcs_cons_imcar
:
1650 case scm_tcs_cons_nimcar
:
1651 case scm_tcs_closures
:
1655 m
+= (2 + SCM_VECTOR_LENGTH (scmptr
)) * sizeof (SCM
);
1656 scm_must_free (SCM_VECTOR_BASE (scmptr
) - 2);
1658 case scm_tc7_vector
:
1660 unsigned long int length
= SCM_VECTOR_LENGTH (scmptr
);
1663 m
+= length
* sizeof (scm_bits_t
);
1664 scm_must_free (SCM_VECTOR_BASE (scmptr
));
1670 m
+= (SCM_CCLO_LENGTH (scmptr
) * sizeof (SCM
));
1671 scm_must_free (SCM_CCLO_BASE (scmptr
));
1677 unsigned long int length
= SCM_BITVECTOR_LENGTH (scmptr
);
1680 m
+= sizeof (long) * ((length
+ SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1681 scm_must_free (SCM_BITVECTOR_BASE (scmptr
));
1685 case scm_tc7_byvect
:
1689 #ifdef HAVE_LONG_LONGS
1690 case scm_tc7_llvect
:
1695 m
+= SCM_UVECTOR_LENGTH (scmptr
) * scm_uniform_element_size (scmptr
);
1696 scm_must_free (SCM_UVECTOR_BASE (scmptr
));
1699 case scm_tc7_substring
:
1701 case scm_tc7_string
:
1702 m
+= SCM_STRING_LENGTH (scmptr
) + 1;
1703 scm_must_free (SCM_STRING_CHARS (scmptr
));
1705 case scm_tc7_symbol
:
1706 m
+= SCM_SYMBOL_LENGTH (scmptr
) + 1;
1707 scm_must_free (SCM_SYMBOL_CHARS (scmptr
));
1710 /* the various "subrs" (primitives) are never freed */
1713 if SCM_OPENP (scmptr
)
1715 int k
= SCM_PTOBNUM (scmptr
);
1716 if (!(k
< scm_numptob
))
1718 /* Keep "revealed" ports alive. */
1719 if (scm_revealed_count (scmptr
) > 0)
1721 /* Yes, I really do mean scm_ptobs[k].free */
1722 /* rather than ftobs[k].close. .close */
1723 /* is for explicit CLOSE-PORT by user */
1724 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1725 SCM_SETSTREAM (scmptr
, 0);
1726 scm_remove_from_port_table (scmptr
);
1727 scm_gc_ports_collected
++;
1728 SCM_SETAND_CAR (scmptr
, ~SCM_OPN
);
1732 switch SCM_TYP16 (scmptr
)
1734 case scm_tc_free_cell
:
1739 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1740 scm_must_free (SCM_BDIGITS (scmptr
));
1742 #endif /* def SCM_BIGDIG */
1743 case scm_tc16_complex
:
1744 m
+= sizeof (scm_complex_t
);
1745 scm_must_free (SCM_COMPLEX_MEM (scmptr
));
1750 k
= SCM_SMOBNUM (scmptr
);
1751 if (!(k
< scm_numsmob
))
1753 m
+= (scm_smobs
[k
].free
) (scmptr
);
1760 SCM_MISC_ERROR ("unknown type", SCM_EOL
);
1763 if (!--left_to_collect
)
1765 SCM_SETCAR (scmptr
, nfreelist
);
1766 *freelist
->clustertail
= scmptr
;
1767 freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1769 nfreelist
= SCM_EOL
;
1770 freelist
->collected
+= span
* freelist
->cluster_size
;
1771 left_to_collect
= freelist
->cluster_size
;
1775 /* Stick the new cell on the front of nfreelist. It's
1776 critical that we mark this cell as freed; otherwise, the
1777 conservative collector might trace it as some other type
1779 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1780 SCM_SET_FREE_CELL_CDR (scmptr
, nfreelist
);
1785 #ifdef GC_FREE_SEGMENTS
1790 freelist
->heap_size
-= seg_size
;
1791 free ((char *) scm_heap_table
[i
].bounds
[0]);
1792 scm_heap_table
[i
].bounds
[0] = 0;
1793 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1794 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1795 scm_n_heap_segs
-= 1;
1796 i
--; /* We need to scan the segment just moved. */
1799 #endif /* ifdef GC_FREE_SEGMENTS */
1801 /* Update the real freelist pointer to point to the head of
1802 the list of free cells we've built for this segment. */
1803 freelist
->cells
= nfreelist
;
1804 freelist
->left_to_collect
= left_to_collect
;
1807 #ifdef GUILE_DEBUG_FREELIST
1808 scm_map_free_list ();
1812 gc_sweep_freelist_finish (&scm_master_freelist
);
1813 gc_sweep_freelist_finish (&scm_master_freelist2
);
1815 /* When we move to POSIX threads private freelists should probably
1816 be GC-protected instead. */
1817 scm_freelist
= SCM_EOL
;
1818 scm_freelist2
= SCM_EOL
;
1820 scm_cells_allocated
= (SCM_HEAP_SIZE
- scm_gc_cells_collected
);
1821 scm_gc_yield
-= scm_cells_allocated
;
1822 scm_mallocated
-= m
;
1823 scm_gc_malloc_collected
= m
;
1829 /* {Front end to malloc}
1831 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1834 * These functions provide services comperable to malloc, realloc, and
1835 * free. They are for allocating malloced parts of scheme objects.
1836 * The primary purpose of the front end is to impose calls to gc. */
1840 * Return newly malloced storage or throw an error.
1842 * The parameter WHAT is a string for error reporting.
1843 * If the threshold scm_mtrigger will be passed by this
1844 * allocation, or if the first call to malloc fails,
1845 * garbage collect -- on the presumption that some objects
1846 * using malloced storage may be collected.
1848 * The limit scm_mtrigger may be raised by this allocation.
1851 scm_must_malloc (scm_sizet size
, const char *what
)
1854 unsigned long nm
= scm_mallocated
+ size
;
1856 if (nm
<= scm_mtrigger
)
1858 SCM_SYSCALL (ptr
= malloc (size
));
1861 scm_mallocated
= nm
;
1862 #ifdef GUILE_DEBUG_MALLOC
1863 scm_malloc_register (ptr
, what
);
1871 nm
= scm_mallocated
+ size
;
1872 SCM_SYSCALL (ptr
= malloc (size
));
1875 scm_mallocated
= nm
;
1876 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1877 if (nm
> scm_mtrigger
)
1878 scm_mtrigger
= nm
+ nm
/ 2;
1880 scm_mtrigger
+= scm_mtrigger
/ 2;
1882 #ifdef GUILE_DEBUG_MALLOC
1883 scm_malloc_register (ptr
, what
);
1889 scm_memory_error (what
);
1894 * is similar to scm_must_malloc.
1897 scm_must_realloc (void *where
,
1903 scm_sizet nm
= scm_mallocated
+ size
- old_size
;
1905 if (nm
<= scm_mtrigger
)
1907 SCM_SYSCALL (ptr
= realloc (where
, size
));
1910 scm_mallocated
= nm
;
1911 #ifdef GUILE_DEBUG_MALLOC
1912 scm_malloc_reregister (where
, ptr
, what
);
1920 nm
= scm_mallocated
+ size
- old_size
;
1921 SCM_SYSCALL (ptr
= realloc (where
, size
));
1924 scm_mallocated
= nm
;
1925 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1926 if (nm
> scm_mtrigger
)
1927 scm_mtrigger
= nm
+ nm
/ 2;
1929 scm_mtrigger
+= scm_mtrigger
/ 2;
1931 #ifdef GUILE_DEBUG_MALLOC
1932 scm_malloc_reregister (where
, ptr
, what
);
1937 scm_memory_error (what
);
1942 scm_must_free (void *obj
)
1943 #define FUNC_NAME "scm_must_free"
1945 #ifdef GUILE_DEBUG_MALLOC
1946 scm_malloc_unregister (obj
);
1951 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL
);
1956 /* Announce that there has been some malloc done that will be freed
1957 * during gc. A typical use is for a smob that uses some malloced
1958 * memory but can not get it from scm_must_malloc (for whatever
1959 * reason). When a new object of this smob is created you call
1960 * scm_done_malloc with the size of the object. When your smob free
1961 * function is called, be sure to include this size in the return
1964 * If you can't actually free the memory in the smob free function,
1965 * for whatever reason (like reference counting), you still can (and
1966 * should) report the amount of memory freed when you actually free it.
1967 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1968 * eh? Or even better, call scm_done_free. */
1971 scm_done_malloc (long size
)
1973 scm_mallocated
+= size
;
1975 if (scm_mallocated
> scm_mtrigger
)
1977 scm_igc ("foreign mallocs");
1978 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
1980 if (scm_mallocated
> scm_mtrigger
)
1981 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
1983 scm_mtrigger
+= scm_mtrigger
/ 2;
1989 scm_done_free (long size
)
1991 scm_mallocated
-= size
;
1998 * Each heap segment is an array of objects of a particular size.
1999 * Every segment has an associated (possibly shared) freelist.
2000 * A table of segment records is kept that records the upper and
2001 * lower extents of the segment; this is used during the conservative
2002 * phase of gc to identify probably gc roots (because they point
2003 * into valid segments at reasonable offsets). */
2006 * is true if the first segment was smaller than INIT_HEAP_SEG.
2007 * If scm_expmem is set to one, subsequent segment allocations will
2008 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2012 scm_sizet scm_max_segment_size
;
2015 * is the lowest base address of any heap segment.
2017 SCM_CELLPTR scm_heap_org
;
2019 scm_heap_seg_data_t
* scm_heap_table
= 0;
2020 static unsigned int heap_segment_table_size
= 0;
2021 int scm_n_heap_segs
= 0;
2024 * initializes a new heap segment and returns the number of objects it contains.
2026 * The segment origin and segment size in bytes are input parameters.
2027 * The freelist is both input and output.
2029 * This function presumes that the scm_heap_table has already been expanded
2030 * to accomodate a new segment record and that the markbit space was reserved
2031 * for all the cards in this segment.
2034 #define INIT_CARD(card, span) \
2036 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2038 SCM_GC_SET_CARD_DOUBLECELL (card); \
2042 init_heap_seg (SCM_CELLPTR seg_org
, scm_sizet size
, scm_freelist_t
*freelist
)
2044 register SCM_CELLPTR ptr
;
2045 SCM_CELLPTR seg_end
;
2048 int span
= freelist
->span
;
2050 if (seg_org
== NULL
)
2053 /* Align the begin ptr up.
2055 ptr
= SCM_GC_CARD_UP (seg_org
);
2057 /* Compute the ceiling on valid object pointers w/in this segment.
2059 seg_end
= SCM_GC_CARD_DOWN ((char *)seg_org
+ size
);
2061 /* Find the right place and insert the segment record.
2064 for (new_seg_index
= 0;
2065 ( (new_seg_index
< scm_n_heap_segs
)
2066 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
));
2072 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
2073 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
2078 scm_heap_table
[new_seg_index
].span
= span
;
2079 scm_heap_table
[new_seg_index
].freelist
= freelist
;
2080 scm_heap_table
[new_seg_index
].bounds
[0] = ptr
;
2081 scm_heap_table
[new_seg_index
].bounds
[1] = seg_end
;
2084 n_new_cells
= seg_end
- ptr
;
2086 freelist
->heap_size
+= n_new_cells
;
2088 /* Partition objects in this segment into clusters */
2091 SCM
*clusterp
= &clusters
;
2093 NEXT_DATA_CELL (ptr
, span
);
2094 while (ptr
< seg_end
)
2096 scm_cell
*nxt
= ptr
;
2097 scm_cell
*prv
= NULL
;
2098 scm_cell
*last_card
= NULL
;
2099 int n_data_cells
= (SCM_GC_CARD_N_DATA_CELLS
/ span
) * SCM_CARDS_PER_CLUSTER
- 1;
2100 NEXT_DATA_CELL(nxt
, span
);
2102 /* Allocate cluster spine
2104 *clusterp
= PTR2SCM (ptr
);
2105 SCM_SETCAR (*clusterp
, PTR2SCM (nxt
));
2106 clusterp
= SCM_CDRLOC (*clusterp
);
2109 while (n_data_cells
--)
2111 scm_cell
*card
= SCM_GC_CELL_CARD (ptr
);
2112 SCM scmptr
= PTR2SCM (ptr
);
2114 NEXT_DATA_CELL (nxt
, span
);
2117 if (card
!= last_card
)
2119 INIT_CARD (card
, span
);
2123 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
2124 SCM_SETCDR (scmptr
, PTR2SCM (nxt
));
2129 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv
), SCM_EOL
);
2134 scm_cell
*ref
= seg_end
;
2135 NEXT_DATA_CELL (ref
, span
);
2137 /* [cmm] looks like the segment size doesn't divide cleanly by
2138 cluster size. bad cmm! */
2142 /* Patch up the last cluster pointer in the segment
2143 * to join it to the input freelist.
2145 *clusterp
= freelist
->clusters
;
2146 freelist
->clusters
= clusters
;
2150 fprintf (stderr
, "H");
2156 round_to_cluster_size (scm_freelist_t
*freelist
, scm_sizet len
)
2158 scm_sizet cluster_size_in_bytes
= CLUSTER_SIZE_IN_BYTES (freelist
);
2161 (len
+ cluster_size_in_bytes
- 1) / cluster_size_in_bytes
* cluster_size_in_bytes
2162 + ALIGNMENT_SLACK (freelist
);
2166 alloc_some_heap (scm_freelist_t
*freelist
, policy_on_error error_policy
)
2167 #define FUNC_NAME "alloc_some_heap"
2172 if (scm_gc_heap_lock
)
2174 /* Critical code sections (such as the garbage collector) aren't
2175 * supposed to add heap segments.
2177 fprintf (stderr
, "alloc_some_heap: Can not extend locked heap.\n");
2181 if (scm_n_heap_segs
== heap_segment_table_size
)
2183 /* We have to expand the heap segment table to have room for the new
2184 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2185 * init_heap_seg only if the allocation of the segment itself succeeds.
2187 unsigned int new_table_size
= scm_n_heap_segs
+ 1;
2188 size_t size
= new_table_size
* sizeof (scm_heap_seg_data_t
);
2189 scm_heap_seg_data_t
* new_heap_table
;
2191 SCM_SYSCALL (new_heap_table
= ((scm_heap_seg_data_t
*)
2192 realloc ((char *)scm_heap_table
, size
)));
2193 if (!new_heap_table
)
2195 if (error_policy
== abort_on_error
)
2197 fprintf (stderr
, "alloc_some_heap: Could not grow heap segment table.\n");
2207 scm_heap_table
= new_heap_table
;
2208 heap_segment_table_size
= new_table_size
;
2212 /* Pick a size for the new heap segment.
2213 * The rule for picking the size of a segment is explained in
2217 /* Assure that the new segment is predicted to be large enough.
2219 * New yield should at least equal GC fraction of new heap size, i.e.
2221 * y + dh > f * (h + dh)
2224 * f : min yield fraction
2226 * dh : size of new heap segment
2228 * This gives dh > (f * h - y) / (1 - f)
2230 int f
= freelist
->min_yield_fraction
;
2231 long h
= SCM_HEAP_SIZE
;
2232 long min_cells
= (f
* h
- 100 * (long) scm_gc_yield
) / (99 - f
);
2233 len
= SCM_EXPHEAP (freelist
->heap_size
);
2235 fprintf (stderr
, "(%d < %d)", len
, min_cells
);
2237 if (len
< min_cells
)
2238 len
= min_cells
+ freelist
->cluster_size
;
2239 len
*= sizeof (scm_cell
);
2240 /* force new sampling */
2241 freelist
->collected
= LONG_MAX
;
2244 if (len
> scm_max_segment_size
)
2245 len
= scm_max_segment_size
;
2250 smallest
= CLUSTER_SIZE_IN_BYTES (freelist
);
2255 /* Allocate with decaying ambition. */
2256 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2257 && (len
>= smallest
))
2259 scm_sizet rounded_len
= round_to_cluster_size (freelist
, len
);
2260 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (rounded_len
));
2263 init_heap_seg (ptr
, rounded_len
, freelist
);
2270 if (error_policy
== abort_on_error
)
2272 fprintf (stderr
, "alloc_some_heap: Could not grow heap.\n");
2279 SCM_DEFINE (scm_unhash_name
, "unhash-name", 1, 0, 0,
2281 "Flushes the glocs for @var{name}, or all glocs if @var{name}\n"
2283 #define FUNC_NAME s_scm_unhash_name
2287 SCM_VALIDATE_SYMBOL (1,name
);
2289 bound
= scm_n_heap_segs
;
2290 for (x
= 0; x
< bound
; ++x
)
2294 p
= scm_heap_table
[x
].bounds
[0];
2295 pbound
= scm_heap_table
[x
].bounds
[1];
2298 SCM cell
= PTR2SCM (p
);
2299 if (SCM_TYP3 (cell
) == scm_tc3_cons_gloc
)
2301 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2302 * struct cell. See the corresponding comment in scm_gc_mark.
2304 scm_bits_t word0
= SCM_CELL_WORD_0 (cell
) - scm_tc3_cons_gloc
;
2305 SCM gloc_car
= SCM_PACK (word0
); /* access as gloc */
2306 SCM vcell
= SCM_CELL_OBJECT_1 (gloc_car
);
2307 if ((SCM_EQ_P (name
, SCM_BOOL_T
) || SCM_EQ_P (SCM_CAR (gloc_car
), name
))
2308 && (SCM_UNPACK (vcell
) != 0) && (SCM_UNPACK (vcell
) != 1))
2310 SCM_SET_CELL_OBJECT_0 (cell
, name
);
2323 /* {GC Protection Helper Functions}
2328 * If within a function you need to protect one or more scheme objects from
2329 * garbage collection, pass them as parameters to one of the
2330 * scm_remember_upto_here* functions below. These functions don't do
2331 * anything, but since the compiler does not know that they are actually
2332 * no-ops, it will generate code that calls these functions with the given
2333 * parameters. Therefore, you can be sure that the compiler will keep those
2334 * scheme values alive (on the stack or in a register) up to the point where
2335 * scm_remember_upto_here* is called. In other words, place the call to
2336 * scm_remember_upt_here* _behind_ the last code in your function, that
2337 * depends on the scheme object to exist.
2339 * Example: We want to make sure, that the string object str does not get
2340 * garbage collected during the execution of 'some_function', because
2341 * otherwise the characters belonging to str would be freed and
2342 * 'some_function' might access freed memory. To make sure that the compiler
2343 * keeps str alive on the stack or in a register such that it is visible to
2344 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2345 * call to 'some_function'. Note that this would not be necessary if str was
2346 * used anyway after the call to 'some_function'.
2347 * char *chars = SCM_STRING_CHARS (str);
2348 * some_function (chars);
2349 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2353 scm_remember_upto_here_1 (SCM obj
)
2355 /* Empty. Protects a single object from garbage collection. */
2359 scm_remember_upto_here_2 (SCM obj1
, SCM obj2
)
2361 /* Empty. Protects two objects from garbage collection. */
2365 scm_remember_upto_here (SCM obj
, ...)
2367 /* Empty. Protects any number of objects from garbage collection. */
2371 #if (SCM_DEBUG_DEPRECATED == 0)
2374 scm_remember (SCM
*ptr
)
2379 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2382 These crazy functions prevent garbage collection
2383 of arguments after the first argument by
2384 ensuring they remain live throughout the
2385 function because they are used in the last
2386 line of the code block.
2387 It'd be better to have a nice compiler hint to
2388 aid the conservative stack-scanning GC. --03/09/00 gjb */
2390 scm_return_first (SCM elt
, ...)
2396 scm_return_first_int (int i
, ...)
2403 scm_permanent_object (SCM obj
)
2406 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2412 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2413 other references are dropped, until the object is unprotected by calling
2414 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2415 i. e. it is possible to protect the same object several times, but it is
2416 necessary to unprotect the object the same number of times to actually get
2417 the object unprotected. It is an error to unprotect an object more often
2418 than it has been protected before. The function scm_protect_object returns
2422 /* Implementation note: For every object X, there is a counter which
2423 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2427 scm_protect_object (SCM obj
)
2431 /* This critical section barrier will be replaced by a mutex. */
2434 handle
= scm_hashq_create_handle_x (scm_protects
, obj
, SCM_MAKINUM (0));
2435 SCM_SETCDR (handle
, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle
)) + 1));
2443 /* Remove any protection for OBJ established by a prior call to
2444 scm_protect_object. This function returns OBJ.
2446 See scm_protect_object for more information. */
2448 scm_unprotect_object (SCM obj
)
2452 /* This critical section barrier will be replaced by a mutex. */
2455 handle
= scm_hashq_get_handle (scm_protects
, obj
);
2457 if (SCM_IMP (handle
))
2459 fprintf (stderr
, "scm_unprotect_object called on unprotected object\n");
2464 unsigned long int count
= SCM_INUM (SCM_CDR (handle
)) - 1;
2466 scm_hashq_remove_x (scm_protects
, obj
);
2468 SCM_SETCDR (handle
, SCM_MAKINUM (count
));
2478 /* called on process termination. */
2484 extern int on_exit (void (*procp
) (), int arg
);
2487 cleanup (int status
, void *arg
)
2489 #error Dont know how to setup a cleanup handler on your system.
2494 scm_flush_all_ports ();
2499 make_initial_segment (scm_sizet init_heap_size
, scm_freelist_t
*freelist
)
2501 scm_sizet rounded_size
= round_to_cluster_size (freelist
, init_heap_size
);
2503 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2507 rounded_size
= round_to_cluster_size (freelist
, SCM_HEAP_SEG_SIZE
);
2508 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2516 if (freelist
->min_yield_fraction
)
2517 freelist
->min_yield
= (freelist
->heap_size
* freelist
->min_yield_fraction
2519 freelist
->grow_heap_p
= (freelist
->heap_size
< freelist
->min_yield
);
2526 init_freelist (scm_freelist_t
*freelist
,
2531 freelist
->clusters
= SCM_EOL
;
2532 freelist
->cluster_size
= cluster_size
+ 1;
2533 freelist
->left_to_collect
= 0;
2534 freelist
->clusters_allocated
= 0;
2535 freelist
->min_yield
= 0;
2536 freelist
->min_yield_fraction
= min_yield
;
2537 freelist
->span
= span
;
2538 freelist
->collected
= 0;
2539 freelist
->collected_1
= 0;
2540 freelist
->heap_size
= 0;
2544 /* Get an integer from an environment variable. */
2546 scm_i_getenv_int (const char *var
, int def
)
2548 char *end
, *val
= getenv (var
);
2552 res
= strtol (val
, &end
, 10);
2562 scm_sizet gc_trigger_1
;
2563 scm_sizet gc_trigger_2
;
2564 scm_sizet init_heap_size_1
;
2565 scm_sizet init_heap_size_2
;
2568 j
= SCM_NUM_PROTECTS
;
2570 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2573 scm_freelist
= SCM_EOL
;
2574 scm_freelist2
= SCM_EOL
;
2575 gc_trigger_1
= scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1
);
2576 init_freelist (&scm_master_freelist
, 1, SCM_CLUSTER_SIZE_1
, gc_trigger_1
);
2577 gc_trigger_2
= scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2
);
2578 init_freelist (&scm_master_freelist2
, 2, SCM_CLUSTER_SIZE_2
, gc_trigger_2
);
2579 scm_max_segment_size
= scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size
);
2583 j
= SCM_HEAP_SEG_SIZE
;
2584 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2585 scm_heap_table
= ((scm_heap_seg_data_t
*)
2586 scm_must_malloc (sizeof (scm_heap_seg_data_t
) * 2, "hplims"));
2587 heap_segment_table_size
= 2;
2589 mark_space_ptr
= &mark_space_head
;
2591 init_heap_size_1
= scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1
);
2592 init_heap_size_2
= scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2
);
2593 if (make_initial_segment (init_heap_size_1
, &scm_master_freelist
) ||
2594 make_initial_segment (init_heap_size_2
, &scm_master_freelist2
))
2597 /* scm_hplims[0] can change. do not remove scm_heap_org */
2598 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0], 1);
2600 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2601 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2602 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2603 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2604 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2606 /* Initialise the list of ports. */
2607 scm_port_table
= (scm_port
**)
2608 malloc (sizeof (scm_port
*) * scm_port_table_room
);
2609 if (!scm_port_table
)
2616 on_exit (cleanup
, 0);
2620 scm_undefineds
= scm_cons (SCM_UNDEFINED
, SCM_EOL
);
2621 SCM_SETCDR (scm_undefineds
, scm_undefineds
);
2623 scm_listofnull
= scm_cons (SCM_EOL
, SCM_EOL
);
2624 scm_nullstr
= scm_makstr (0L, 0);
2625 scm_nullvect
= scm_c_make_vector (0, SCM_UNDEFINED
);
2627 #define DEFAULT_SYMHASH_SIZE 277
2628 scm_symhash
= scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE
);
2629 scm_symhash_vars
= scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE
);
2631 scm_stand_in_procs
= SCM_EOL
;
2632 scm_permobjs
= SCM_EOL
;
2633 scm_protects
= scm_c_make_hash_table (31);
2640 SCM scm_after_gc_hook
;
2642 #if (SCM_DEBUG_DEPRECATED == 0)
2643 static SCM scm_gc_vcell
; /* the vcell for gc-thunk. */
2644 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2645 static SCM gc_async
;
2648 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2649 * is run after the gc, as soon as the asynchronous events are handled by the
2653 gc_async_thunk (void)
2655 scm_c_run_hook (scm_after_gc_hook
, SCM_EOL
);
2657 #if (SCM_DEBUG_DEPRECATED == 0)
2659 /* The following code will be removed in Guile 1.5. */
2660 if (SCM_NFALSEP (scm_gc_vcell
))
2662 SCM proc
= SCM_CDR (scm_gc_vcell
);
2664 if (SCM_NFALSEP (proc
) && !SCM_UNBNDP (proc
))
2665 scm_apply (proc
, SCM_EOL
, SCM_EOL
);
2668 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2670 return SCM_UNSPECIFIED
;
2674 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2675 * the garbage collection. The only purpose of this function is to mark the
2676 * gc_async (which will eventually lead to the execution of the
2680 mark_gc_async (void * hook_data
, void *func_data
, void *data
)
2682 scm_system_async_mark (gc_async
);
2692 scm_after_gc_hook
= scm_create_hook ("after-gc-hook", 0);
2694 #if (SCM_DEBUG_DEPRECATED == 0)
2695 scm_gc_vcell
= scm_sysintern ("gc-thunk", SCM_BOOL_F
);
2696 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2697 after_gc_thunk
= scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0
, gc_async_thunk
, 0);
2698 gc_async
= scm_system_async (after_gc_thunk
); /* protected via scm_asyncs */
2700 scm_c_hook_add (&scm_after_gc_c_hook
, mark_gc_async
, NULL
, 0);
2702 #ifndef SCM_MAGIC_SNARFER
2703 #include "libguile/gc.x"
2707 #endif /*MARK_DEPENDENCIES*/