1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
45 /* #define DEBUGINFO */
47 /* SECTION: This code is compiled once.
50 #ifndef MARK_DEPENDENCIES
54 #include "libguile/_scm.h"
55 #include "libguile/eval.h"
56 #include "libguile/stime.h"
57 #include "libguile/stackchk.h"
58 #include "libguile/struct.h"
59 #include "libguile/smob.h"
60 #include "libguile/unif.h"
61 #include "libguile/async.h"
62 #include "libguile/ports.h"
63 #include "libguile/root.h"
64 #include "libguile/strings.h"
65 #include "libguile/vectors.h"
66 #include "libguile/weaks.h"
67 #include "libguile/hashtab.h"
69 #include "libguile/validate.h"
70 #include "libguile/gc.h"
72 #ifdef GUILE_DEBUG_MALLOC
73 #include "libguile/debug-malloc.h"
86 #define var_start(x, y) va_start(x, y)
89 #define var_start(x, y) va_start(x)
94 unsigned int scm_gc_running_p
= 0;
98 #if (SCM_DEBUG_CELL_ACCESSES == 1)
100 unsigned int scm_debug_cell_accesses_p
= 0;
103 /* Assert that the given object is a valid reference to a valid cell. This
104 * test involves to determine whether the object is a cell pointer, whether
105 * this pointer actually points into a heap segment and whether the cell
106 * pointed to is not a free cell.
109 scm_assert_cell_valid (SCM cell
)
111 if (scm_debug_cell_accesses_p
)
113 scm_debug_cell_accesses_p
= 0; /* disable to avoid recursion */
115 if (!scm_cellp (cell
))
117 fprintf (stderr
, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell
));
120 else if (!scm_gc_running_p
)
122 /* Dirk::FIXME:: During garbage collection there occur references to
123 free cells. This is allright during conservative marking, but
124 should not happen otherwise (I think). The case of free cells
125 accessed during conservative marking is handled in function
126 scm_mark_locations. However, there still occur accesses to free
127 cells during gc. I don't understand why this happens. If it is
128 a bug and gets fixed, the following test should also work while
131 if (SCM_FREE_CELL_P (cell
))
133 fprintf (stderr
, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell
));
137 scm_debug_cell_accesses_p
= 1; /* re-enable */
142 SCM_DEFINE (scm_set_debug_cell_accesses_x
, "set-debug-cell-accesses!", 1, 0, 0,
144 "If FLAG is #f, cell access checking is disabled.\n"
145 "If FLAG is #t, cell access checking is enabled.\n"
146 "This procedure only exists because the compile-time flag\n"
147 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
148 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
150 if (SCM_FALSEP (flag
)) {
151 scm_debug_cell_accesses_p
= 0;
152 } else if (SCM_EQ_P (flag
, SCM_BOOL_T
)) {
153 scm_debug_cell_accesses_p
= 1;
155 SCM_WRONG_TYPE_ARG (1, flag
);
157 return SCM_UNSPECIFIED
;
161 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
165 /* {heap tuning parameters}
167 * These are parameters for controlling memory allocation. The heap
168 * is the area out of which scm_cons, and object headers are allocated.
170 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
171 * 64 bit machine. The units of the _SIZE parameters are bytes.
172 * Cons pairs and object headers occupy one heap cell.
174 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
175 * allocated initially the heap will grow by half its current size
176 * each subsequent time more heap is needed.
178 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
179 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
180 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
181 * is in scm_init_storage() and alloc_some_heap() in sys.c
183 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
184 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
186 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
189 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
192 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
193 * reclaimed by a GC triggered by must_malloc. If less than this is
194 * reclaimed, the trigger threshold is raised. [I don't know what a
195 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
196 * work around a oscillation that caused almost constant GC.]
200 * Heap size 45000 and 40% min yield gives quick startup and no extra
201 * heap allocation. Having higher values on min yield may lead to
202 * large heaps, especially if code behaviour is varying its
203 * maximum consumption between different freelists.
206 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
207 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
208 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
209 int scm_default_init_heap_size_1
= (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER
- 1)
210 / SCM_CARDS_PER_CLUSTER
) * SCM_GC_CARD_SIZE
);
211 int scm_default_min_yield_1
= 40;
213 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
214 int scm_default_init_heap_size_2
= (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER
- 1)
215 / SCM_CARDS_PER_CLUSTER
) * SCM_GC_CARD_SIZE
);
216 /* The following value may seem large, but note that if we get to GC at
217 * all, this means that we have a numerically intensive application
219 int scm_default_min_yield_2
= 40;
221 int scm_default_max_segment_size
= 2097000L;/* a little less (adm) than 2 Mb */
223 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
225 # define SCM_HEAP_SEG_SIZE 32768L
228 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
230 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
233 /* Make heap grow with factor 1.5 */
234 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
235 #define SCM_INIT_MALLOC_LIMIT 100000
236 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
238 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
239 aligned inner bounds for allocated storage */
242 /*in 386 protected mode we must only adjust the offset */
243 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
244 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
247 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
248 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
250 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
251 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
255 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
256 #define CLUSTER_SIZE_IN_BYTES(freelist) \
257 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
263 typedef struct scm_freelist_t
{
264 /* collected cells */
266 /* number of cells left to collect before cluster is full */
267 unsigned int left_to_collect
;
268 /* number of clusters which have been allocated */
269 unsigned int clusters_allocated
;
270 /* a list of freelists, each of size cluster_size,
271 * except the last one which may be shorter
275 /* this is the number of objects in each cluster, including the spine cell */
277 /* indicates that we should grow heap instead of GC:ing
280 /* minimum yield on this list in order not to grow the heap
283 /* defines min_yield as percent of total heap size
285 int min_yield_fraction
;
286 /* number of cells per object on this list */
288 /* number of collected cells during last GC */
290 /* number of collected cells during penultimate GC */
292 /* total number of cells in heap segments
293 * belonging to this list.
298 SCM scm_freelist
= SCM_EOL
;
299 scm_freelist_t scm_master_freelist
= {
300 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_1
, 0, 0, 0, 1, 0, 0
302 SCM scm_freelist2
= SCM_EOL
;
303 scm_freelist_t scm_master_freelist2
= {
304 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_2
, 0, 0, 0, 2, 0, 0
308 * is the number of bytes of must_malloc allocation needed to trigger gc.
310 unsigned long scm_mtrigger
;
313 * If set, don't expand the heap. Set only during gc, during which no allocation
314 * is supposed to take place anyway.
316 int scm_gc_heap_lock
= 0;
319 * Don't pause for collection if this is set -- just
322 int scm_block_gc
= 1;
324 /* During collection, this accumulates objects holding
327 SCM scm_weak_vectors
;
329 /* During collection, this accumulates structures which are to be freed.
331 SCM scm_structs_to_free
;
333 /* GC Statistics Keeping
335 unsigned long scm_cells_allocated
= 0;
336 long scm_mallocated
= 0;
337 unsigned long scm_gc_cells_collected
;
338 unsigned long scm_gc_yield
;
339 static unsigned long scm_gc_yield_1
= 0; /* previous GC yield */
340 unsigned long scm_gc_malloc_collected
;
341 unsigned long scm_gc_ports_collected
;
342 unsigned long scm_gc_time_taken
= 0;
343 static unsigned long t_before_gc
;
344 static unsigned long t_before_sweep
;
345 unsigned long scm_gc_mark_time_taken
= 0;
346 unsigned long scm_gc_sweep_time_taken
= 0;
347 unsigned long scm_gc_times
= 0;
348 unsigned long scm_gc_cells_swept
= 0;
349 double scm_gc_cells_marked_acc
= 0.;
350 double scm_gc_cells_swept_acc
= 0.;
352 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
353 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
354 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
355 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
356 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
357 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
358 SCM_SYMBOL (sym_gc_mark_time_taken
, "gc-mark-time-taken");
359 SCM_SYMBOL (sym_gc_sweep_time_taken
, "gc-sweep-time-taken");
360 SCM_SYMBOL (sym_times
, "gc-times");
361 SCM_SYMBOL (sym_cells_marked
, "cells-marked");
362 SCM_SYMBOL (sym_cells_swept
, "cells-swept");
364 typedef struct scm_heap_seg_data_t
366 /* lower and upper bounds of the segment */
367 SCM_CELLPTR bounds
[2];
369 /* address of the head-of-freelist pointer for this segment's cells.
370 All segments usually point to the same one, scm_freelist. */
371 scm_freelist_t
*freelist
;
373 /* number of cells per object in this segment */
375 } scm_heap_seg_data_t
;
379 static scm_sizet
init_heap_seg (SCM_CELLPTR
, scm_sizet
, scm_freelist_t
*);
381 typedef enum { return_on_error
, abort_on_error
} policy_on_error
;
382 static void alloc_some_heap (scm_freelist_t
*, policy_on_error
);
385 #define SCM_HEAP_SIZE \
386 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
387 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
389 #define BVEC_GROW_SIZE 256
390 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
391 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
393 /* mark space allocation */
395 typedef struct scm_mark_space_t
397 scm_c_bvec_limb_t
*bvec_space
;
398 struct scm_mark_space_t
*next
;
401 static scm_mark_space_t
*current_mark_space
;
402 static scm_mark_space_t
**mark_space_ptr
;
403 static int current_mark_space_offset
;
404 static scm_mark_space_t
*mark_space_head
;
406 static scm_c_bvec_limb_t
*
409 scm_c_bvec_limb_t
*res
;
411 if (!current_mark_space
)
413 SCM_SYSCALL (current_mark_space
= (scm_mark_space_t
*) malloc (sizeof (scm_mark_space_t
)));
414 if (!current_mark_space
)
415 scm_wta (SCM_UNDEFINED
, "could not grow", "heap");
417 current_mark_space
->bvec_space
= NULL
;
418 current_mark_space
->next
= NULL
;
420 *mark_space_ptr
= current_mark_space
;
421 mark_space_ptr
= &(current_mark_space
->next
);
426 if (!(current_mark_space
->bvec_space
))
428 SCM_SYSCALL (current_mark_space
->bvec_space
=
429 (scm_c_bvec_limb_t
*) calloc (BVEC_GROW_SIZE_IN_BYTES
, 1));
430 if (!(current_mark_space
->bvec_space
))
431 scm_wta (SCM_UNDEFINED
, "could not grow", "heap");
433 current_mark_space_offset
= 0;
438 if (current_mark_space_offset
== BVEC_GROW_SIZE_IN_LIMBS
)
440 current_mark_space
= NULL
;
445 res
= current_mark_space
->bvec_space
+ current_mark_space_offset
;
446 current_mark_space_offset
+= SCM_GC_CARD_BVEC_SIZE_IN_LIMBS
;
454 scm_mark_space_t
*ms
;
456 for (ms
= mark_space_head
; ms
; ms
= ms
->next
)
457 memset (ms
->bvec_space
, 0, BVEC_GROW_SIZE_IN_BYTES
);
462 /* Debugging functions. */
464 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
466 /* Return the number of the heap segment containing CELL. */
472 for (i
= 0; i
< scm_n_heap_segs
; i
++)
473 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], SCM2PTR (cell
))
474 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], SCM2PTR (cell
)))
476 fprintf (stderr
, "which_seg: can't find segment containing cell %lx\n",
483 map_free_list (scm_freelist_t
*master
, SCM freelist
)
485 int last_seg
= -1, count
= 0;
488 for (f
= freelist
; !SCM_NULLP (f
); f
= SCM_FREE_CELL_CDR (f
))
490 int this_seg
= which_seg (f
);
492 if (this_seg
!= last_seg
)
495 fprintf (stderr
, " %5d %d-cells in segment %d\n",
496 count
, master
->span
, last_seg
);
503 fprintf (stderr
, " %5d %d-cells in segment %d\n",
504 count
, master
->span
, last_seg
);
507 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
509 "Print debugging information about the free-list.\n"
510 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
511 #define FUNC_NAME s_scm_map_free_list
514 fprintf (stderr
, "%d segments total (%d:%d",
516 scm_heap_table
[0].span
,
517 scm_heap_table
[0].bounds
[1] - scm_heap_table
[0].bounds
[0]);
518 for (i
= 1; i
< scm_n_heap_segs
; i
++)
519 fprintf (stderr
, ", %d:%d",
520 scm_heap_table
[i
].span
,
521 scm_heap_table
[i
].bounds
[1] - scm_heap_table
[i
].bounds
[0]);
522 fprintf (stderr
, ")\n");
523 map_free_list (&scm_master_freelist
, scm_freelist
);
524 map_free_list (&scm_master_freelist2
, scm_freelist2
);
527 return SCM_UNSPECIFIED
;
531 static int last_cluster
;
532 static int last_size
;
535 free_list_length (char *title
, int i
, SCM freelist
)
539 for (ls
= freelist
; !SCM_NULLP (ls
); ls
= SCM_FREE_CELL_CDR (ls
))
540 if (SCM_FREE_CELL_P (ls
))
544 fprintf (stderr
, "bad cell in %s at position %d\n", title
, n
);
551 if (last_cluster
== i
- 1)
552 fprintf (stderr
, "\t%d\n", last_size
);
554 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
557 fprintf (stderr
, "%s %d", title
, i
);
559 fprintf (stderr
, "%s\t%d\n", title
, n
);
567 free_list_lengths (char *title
, scm_freelist_t
*master
, SCM freelist
)
570 int i
= 0, len
, n
= 0;
571 fprintf (stderr
, "%s\n\n", title
);
572 n
+= free_list_length ("free list", -1, freelist
);
573 for (clusters
= master
->clusters
;
574 SCM_NNULLP (clusters
);
575 clusters
= SCM_CDR (clusters
))
577 len
= free_list_length ("cluster", i
++, SCM_CAR (clusters
));
580 if (last_cluster
== i
- 1)
581 fprintf (stderr
, "\t%d\n", last_size
);
583 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
584 fprintf (stderr
, "\ntotal %d objects\n\n", n
);
587 SCM_DEFINE (scm_free_list_length
, "free-list-length", 0, 0, 0,
589 "Print debugging information about the free-list.\n"
590 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
591 #define FUNC_NAME s_scm_free_list_length
593 free_list_lengths ("1-cells", &scm_master_freelist
, scm_freelist
);
594 free_list_lengths ("2-cells", &scm_master_freelist2
, scm_freelist2
);
595 return SCM_UNSPECIFIED
;
601 #ifdef GUILE_DEBUG_FREELIST
603 /* Number of calls to SCM_NEWCELL since startup. */
604 static unsigned long scm_newcell_count
;
605 static unsigned long scm_newcell2_count
;
607 /* Search freelist for anything that isn't marked as a free cell.
608 Abort if we find something. */
610 scm_check_freelist (SCM freelist
)
615 for (f
= freelist
; !SCM_NULLP (f
); f
= SCM_FREE_CELL_CDR (f
), i
++)
616 if (!SCM_FREE_CELL_P (f
))
618 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
619 scm_newcell_count
, i
);
624 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
626 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
627 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
628 "compile-time flag was selected.\n")
629 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
631 /* [cmm] I did a double-take when I read this code the first time.
633 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
634 return SCM_UNSPECIFIED
;
640 scm_debug_newcell (void)
645 if (scm_debug_check_freelist
)
647 scm_check_freelist (scm_freelist
);
651 /* The rest of this is supposed to be identical to the SCM_NEWCELL
653 if (SCM_NULLP (scm_freelist
))
654 new = scm_gc_for_newcell (&scm_master_freelist
, &scm_freelist
);
658 scm_freelist
= SCM_FREE_CELL_CDR (scm_freelist
);
665 scm_debug_newcell2 (void)
669 scm_newcell2_count
++;
670 if (scm_debug_check_freelist
)
672 scm_check_freelist (scm_freelist2
);
676 /* The rest of this is supposed to be identical to the SCM_NEWCELL
678 if (SCM_NULLP (scm_freelist2
))
679 new = scm_gc_for_newcell (&scm_master_freelist2
, &scm_freelist2
);
683 scm_freelist2
= SCM_FREE_CELL_CDR (scm_freelist2
);
689 #endif /* GUILE_DEBUG_FREELIST */
694 master_cells_allocated (scm_freelist_t
*master
)
696 /* the '- 1' below is to ignore the cluster spine cells. */
697 int objects
= master
->clusters_allocated
* (master
->cluster_size
- 1);
698 if (SCM_NULLP (master
->clusters
))
699 objects
-= master
->left_to_collect
;
700 return master
->span
* objects
;
704 freelist_length (SCM freelist
)
707 for (n
= 0; !SCM_NULLP (freelist
); freelist
= SCM_FREE_CELL_CDR (freelist
))
713 compute_cells_allocated ()
715 return (scm_cells_allocated
716 + master_cells_allocated (&scm_master_freelist
)
717 + master_cells_allocated (&scm_master_freelist2
)
718 - scm_master_freelist
.span
* freelist_length (scm_freelist
)
719 - scm_master_freelist2
.span
* freelist_length (scm_freelist2
));
722 /* {Scheme Interface to GC}
725 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
727 "Returns an association list of statistics about Guile's current use of storage. ")
728 #define FUNC_NAME s_scm_gc_stats
733 long int local_scm_mtrigger
;
734 long int local_scm_mallocated
;
735 long int local_scm_heap_size
;
736 long int local_scm_cells_allocated
;
737 long int local_scm_gc_time_taken
;
738 long int local_scm_gc_times
;
739 long int local_scm_gc_mark_time_taken
;
740 long int local_scm_gc_sweep_time_taken
;
741 double local_scm_gc_cells_swept
;
742 double local_scm_gc_cells_marked
;
752 for (i
= scm_n_heap_segs
; i
--; )
753 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
754 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
756 if (scm_n_heap_segs
!= n
)
761 /* Below, we cons to produce the resulting list. We want a snapshot of
762 * the heap situation before consing.
764 local_scm_mtrigger
= scm_mtrigger
;
765 local_scm_mallocated
= scm_mallocated
;
766 local_scm_heap_size
= SCM_HEAP_SIZE
;
767 local_scm_cells_allocated
= compute_cells_allocated ();
768 local_scm_gc_time_taken
= scm_gc_time_taken
;
769 local_scm_gc_mark_time_taken
= scm_gc_mark_time_taken
;
770 local_scm_gc_sweep_time_taken
= scm_gc_sweep_time_taken
;
771 local_scm_gc_times
= scm_gc_times
;
772 local_scm_gc_cells_swept
= scm_gc_cells_swept_acc
;
773 local_scm_gc_cells_marked
= scm_gc_cells_marked_acc
;
775 answer
= scm_listify (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
776 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
777 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
778 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
779 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
780 scm_cons (sym_times
, scm_ulong2num (local_scm_gc_times
)),
781 scm_cons (sym_gc_mark_time_taken
, scm_ulong2num (local_scm_gc_mark_time_taken
)),
782 scm_cons (sym_gc_sweep_time_taken
, scm_ulong2num (local_scm_gc_sweep_time_taken
)),
783 scm_cons (sym_cells_marked
, scm_dbl2big (local_scm_gc_cells_marked
)),
784 scm_cons (sym_cells_swept
, scm_dbl2big (local_scm_gc_cells_swept
)),
785 scm_cons (sym_heap_segments
, heap_segs
),
794 gc_start_stats (const char *what
)
796 t_before_gc
= scm_c_get_internal_run_time ();
797 scm_gc_cells_swept
= 0;
798 scm_gc_cells_collected
= 0;
799 scm_gc_yield_1
= scm_gc_yield
;
800 scm_gc_yield
= (scm_cells_allocated
801 + master_cells_allocated (&scm_master_freelist
)
802 + master_cells_allocated (&scm_master_freelist2
));
803 scm_gc_malloc_collected
= 0;
804 scm_gc_ports_collected
= 0;
811 unsigned long t
= scm_c_get_internal_run_time ();
812 scm_gc_time_taken
+= (t
- t_before_gc
);
813 scm_gc_sweep_time_taken
+= (t
- t_before_sweep
);
816 scm_gc_cells_marked_acc
+= scm_gc_cells_swept
- scm_gc_cells_collected
;
817 scm_gc_cells_swept_acc
+= scm_gc_cells_swept
;
821 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
823 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
824 "returned by this function for @var{obj}")
825 #define FUNC_NAME s_scm_object_address
827 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj
));
832 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
834 "Scans all of SCM objects and reclaims for further use those that are\n"
835 "no longer accessible.")
836 #define FUNC_NAME s_scm_gc
841 return SCM_UNSPECIFIED
;
847 /* {C Interface For When GC is Triggered}
851 adjust_min_yield (scm_freelist_t
*freelist
)
853 /* min yield is adjusted upwards so that next predicted total yield
854 * (allocated cells actually freed by GC) becomes
855 * `min_yield_fraction' of total heap size. Note, however, that
856 * the absolute value of min_yield will correspond to `collected'
857 * on one master (the one which currently is triggering GC).
859 * The reason why we look at total yield instead of cells collected
860 * on one list is that we want to take other freelists into account.
861 * On this freelist, we know that (local) yield = collected cells,
862 * but that's probably not the case on the other lists.
864 * (We might consider computing a better prediction, for example
865 * by computing an average over multiple GC:s.)
867 if (freelist
->min_yield_fraction
)
869 /* Pick largest of last two yields. */
870 int delta
= ((SCM_HEAP_SIZE
* freelist
->min_yield_fraction
/ 100)
871 - (long) SCM_MAX (scm_gc_yield_1
, scm_gc_yield
));
873 fprintf (stderr
, " after GC = %d, delta = %d\n",
878 freelist
->min_yield
+= delta
;
883 /* When we get POSIX threads support, the master will be global and
884 * common while the freelist will be individual for each thread.
888 scm_gc_for_newcell (scm_freelist_t
*master
, SCM
*freelist
)
894 if (SCM_NULLP (master
->clusters
))
896 if (master
->grow_heap_p
|| scm_block_gc
)
898 /* In order to reduce gc frequency, try to allocate a new heap
899 * segment first, even if gc might find some free cells. If we
900 * can't obtain a new heap segment, we will try gc later.
902 master
->grow_heap_p
= 0;
903 alloc_some_heap (master
, return_on_error
);
905 if (SCM_NULLP (master
->clusters
))
907 /* The heap was not grown, either because it wasn't scheduled to
908 * grow, or because there was not enough memory available. In
909 * both cases we have to try gc to get some free cells.
912 fprintf (stderr
, "allocated = %d, ",
914 + master_cells_allocated (&scm_master_freelist
)
915 + master_cells_allocated (&scm_master_freelist2
));
918 adjust_min_yield (master
);
919 if (SCM_NULLP (master
->clusters
))
921 /* gc could not free any cells. Now, we _must_ allocate a
922 * new heap segment, because there is no other possibility
923 * to provide a new cell for the caller.
925 alloc_some_heap (master
, abort_on_error
);
929 cell
= SCM_CAR (master
->clusters
);
930 master
->clusters
= SCM_CDR (master
->clusters
);
931 ++master
->clusters_allocated
;
933 while (SCM_NULLP (cell
));
935 #ifdef GUILE_DEBUG_FREELIST
936 scm_check_freelist (cell
);
940 *freelist
= SCM_FREE_CELL_CDR (cell
);
946 /* This is a support routine which can be used to reserve a cluster
947 * for some special use, such as debugging. It won't be useful until
948 * free cells are preserved between garbage collections.
952 scm_alloc_cluster (scm_freelist_t
*master
)
955 cell
= scm_gc_for_newcell (master
, &freelist
);
956 SCM_SETCDR (cell
, freelist
);
962 scm_c_hook_t scm_before_gc_c_hook
;
963 scm_c_hook_t scm_before_mark_c_hook
;
964 scm_c_hook_t scm_before_sweep_c_hook
;
965 scm_c_hook_t scm_after_sweep_c_hook
;
966 scm_c_hook_t scm_after_gc_c_hook
;
970 scm_igc (const char *what
)
975 scm_c_hook_run (&scm_before_gc_c_hook
, 0);
978 SCM_NULLP (scm_freelist
)
980 : (SCM_NULLP (scm_freelist2
) ? "o" : "m"));
983 /* During the critical section, only the current thread may run. */
984 SCM_THREAD_CRITICAL_SECTION_START
;
987 /* fprintf (stderr, "gc: %s\n", what); */
989 if (!scm_stack_base
|| scm_block_gc
)
995 gc_start_stats (what
);
997 if (scm_mallocated
< 0)
998 /* The byte count of allocated objects has underflowed. This is
999 probably because you forgot to report the sizes of objects you
1000 have allocated, by calling scm_done_malloc or some such. When
1001 the GC freed them, it subtracted their size from
1002 scm_mallocated, which underflowed. */
1005 if (scm_gc_heap_lock
)
1006 /* We've invoked the collector while a GC is already in progress.
1007 That should never happen. */
1012 /* flush dead entries from the continuation stack */
1017 elts
= SCM_VELTS (scm_continuation_stack
);
1018 bound
= SCM_VECTOR_LENGTH (scm_continuation_stack
);
1019 x
= SCM_INUM (scm_continuation_stack_ptr
);
1022 elts
[x
] = SCM_BOOL_F
;
1027 scm_c_hook_run (&scm_before_mark_c_hook
, 0);
1029 clear_mark_space ();
1033 /* Mark objects on the C stack. */
1034 SCM_FLUSH_REGISTER_WINDOWS
;
1035 /* This assumes that all registers are saved into the jmp_buf */
1036 setjmp (scm_save_regs_gc_mark
);
1037 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
1038 ( (scm_sizet
) (sizeof (SCM_STACKITEM
) - 1 +
1039 sizeof scm_save_regs_gc_mark
)
1040 / sizeof (SCM_STACKITEM
)));
1043 scm_sizet stack_len
= scm_stack_size (scm_stack_base
);
1044 #ifdef SCM_STACK_GROWS_UP
1045 scm_mark_locations (scm_stack_base
, stack_len
);
1047 scm_mark_locations (scm_stack_base
- stack_len
, stack_len
);
1051 #else /* USE_THREADS */
1053 /* Mark every thread's stack and registers */
1054 scm_threads_mark_stacks ();
1056 #endif /* USE_THREADS */
1058 j
= SCM_NUM_PROTECTS
;
1060 scm_gc_mark (scm_sys_protects
[j
]);
1062 /* FIXME: we should have a means to register C functions to be run
1063 * in different phases of GC
1065 scm_mark_subr_table ();
1068 scm_gc_mark (scm_root
->handle
);
1071 t_before_sweep
= scm_c_get_internal_run_time ();
1072 scm_gc_mark_time_taken
+= (t_before_sweep
- t_before_gc
);
1074 scm_c_hook_run (&scm_before_sweep_c_hook
, 0);
1078 scm_c_hook_run (&scm_after_sweep_c_hook
, 0);
1084 SCM_THREAD_CRITICAL_SECTION_END
;
1086 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
1095 #define MARK scm_gc_mark
1096 #define FNAME "scm_gc_mark"
1098 #endif /*!MARK_DEPENDENCIES*/
1100 /* Mark an object precisely.
1104 #define FUNC_NAME FNAME
1109 #ifndef MARK_DEPENDENCIES
1110 # define RECURSE scm_gc_mark
1112 /* go through the usual marking, but not for self-cycles. */
1113 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1117 #ifdef MARK_DEPENDENCIES
1118 goto gc_mark_loop_first_time
;
1127 #ifdef MARK_DEPENDENCIES
1128 if (SCM_EQ_P (ptr
, p
))
1134 gc_mark_loop_first_time
:
1137 if (!SCM_CELLP (ptr
))
1138 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL
);
1140 #if (defined (GUILE_DEBUG_FREELIST))
1142 if (SCM_GC_IN_CARD_HEADERP (SCM2PTR (ptr
)))
1143 scm_wta (ptr
, "rogue pointer in heap", NULL
);
1147 #ifndef MARK_DEPENDENCIES
1149 if (SCM_GCMARKP (ptr
))
1152 SCM_SETGCMARK (ptr
);
1156 switch (SCM_TYP7 (ptr
))
1158 case scm_tcs_cons_nimcar
:
1159 if (SCM_IMP (SCM_CDR (ptr
)))
1161 ptr
= SCM_CAR (ptr
);
1164 RECURSE (SCM_CAR (ptr
));
1165 ptr
= SCM_CDR (ptr
);
1167 case scm_tcs_cons_imcar
:
1168 ptr
= SCM_CDR (ptr
);
1171 RECURSE (SCM_CELL_OBJECT_2 (ptr
));
1172 ptr
= SCM_CDR (ptr
);
1174 case scm_tcs_cons_gloc
:
1176 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1177 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1178 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1179 * pointer to a struct vtable data region. The fact that these are
1180 * accessed in the same way restricts the possibilites to change the
1181 * data layout of structs or heap cells.
1183 scm_bits_t word0
= SCM_CELL_WORD_0 (ptr
) - scm_tc3_cons_gloc
;
1184 scm_bits_t
* vtable_data
= (scm_bits_t
*) word0
; /* access as struct */
1185 if (vtable_data
[scm_vtable_index_vcell
] != 0)
1188 SCM gloc_car
= SCM_PACK (word0
);
1190 ptr
= SCM_CDR (ptr
);
1195 /* ptr is a struct */
1196 SCM layout
= SCM_PACK (vtable_data
[scm_vtable_index_layout
]);
1197 int len
= SCM_SYMBOL_LENGTH (layout
);
1198 char * fields_desc
= SCM_SYMBOL_CHARS (layout
);
1199 scm_bits_t
* struct_data
= (scm_bits_t
*) SCM_STRUCT_DATA (ptr
);
1201 if (vtable_data
[scm_struct_i_flags
] & SCM_STRUCTF_ENTITY
)
1203 RECURSE (SCM_PACK (struct_data
[scm_struct_i_procedure
]));
1204 RECURSE (SCM_PACK (struct_data
[scm_struct_i_setter
]));
1210 for (x
= 0; x
< len
- 2; x
+= 2, ++struct_data
)
1211 if (fields_desc
[x
] == 'p')
1212 RECURSE (SCM_PACK (*struct_data
));
1213 if (fields_desc
[x
] == 'p')
1215 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
1216 for (x
= *struct_data
++; x
; --x
, ++struct_data
)
1217 RECURSE (SCM_PACK (*struct_data
));
1219 RECURSE (SCM_PACK (*struct_data
));
1223 ptr
= SCM_PACK (vtable_data
[scm_vtable_index_vtable
]);
1228 case scm_tcs_closures
:
1229 if (SCM_IMP (SCM_CDR (ptr
)))
1231 ptr
= SCM_CLOSCAR (ptr
);
1234 RECURSE (SCM_CLOSCAR (ptr
));
1235 ptr
= SCM_CDR (ptr
);
1237 case scm_tc7_vector
:
1238 i
= SCM_VECTOR_LENGTH (ptr
);
1242 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
1243 RECURSE (SCM_VELTS (ptr
)[i
]);
1244 ptr
= SCM_VELTS (ptr
)[0];
1249 unsigned long int i
= SCM_CCLO_LENGTH (ptr
);
1250 unsigned long int j
;
1251 for (j
= 1; j
!= i
; ++j
)
1253 SCM obj
= SCM_CCLO_REF (ptr
, j
);
1257 ptr
= SCM_CCLO_REF (ptr
, 0);
1263 case scm_tc7_byvect
:
1270 #ifdef HAVE_LONG_LONGS
1271 case scm_tc7_llvect
:
1274 case scm_tc7_string
:
1277 case scm_tc7_substring
:
1278 ptr
= SCM_CDR (ptr
);
1282 SCM_WVECT_GC_CHAIN (ptr
) = scm_weak_vectors
;
1283 scm_weak_vectors
= ptr
;
1284 if (SCM_IS_WHVEC_ANY (ptr
))
1291 len
= SCM_VECTOR_LENGTH (ptr
);
1292 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
1293 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
1295 for (x
= 0; x
< len
; ++x
)
1298 alist
= SCM_VELTS (ptr
)[x
];
1300 /* mark everything on the alist except the keys or
1301 * values, according to weak_values and weak_keys. */
1302 while ( SCM_CONSP (alist
)
1303 && !SCM_GCMARKP (alist
)
1304 && SCM_CONSP (SCM_CAR (alist
)))
1309 kvpair
= SCM_CAR (alist
);
1310 next_alist
= SCM_CDR (alist
);
1313 * SCM_SETGCMARK (alist);
1314 * SCM_SETGCMARK (kvpair);
1316 * It may be that either the key or value is protected by
1317 * an escaped reference to part of the spine of this alist.
1318 * If we mark the spine here, and only mark one or neither of the
1319 * key and value, they may never be properly marked.
1320 * This leads to a horrible situation in which an alist containing
1321 * freelist cells is exported.
1323 * So only mark the spines of these arrays last of all marking.
1324 * If somebody confuses us by constructing a weak vector
1325 * with a circular alist then we are hosed, but at least we
1326 * won't prematurely drop table entries.
1329 RECURSE (SCM_CAR (kvpair
));
1331 RECURSE (SCM_CDR (kvpair
));
1334 if (SCM_NIMP (alist
))
1340 case scm_tc7_symbol
:
1341 ptr
= SCM_PROP_SLOTS (ptr
);
1346 i
= SCM_PTOBNUM (ptr
);
1347 if (!(i
< scm_numptob
))
1349 if (SCM_PTAB_ENTRY(ptr
))
1350 RECURSE (SCM_FILENAME (ptr
));
1351 if (scm_ptobs
[i
].mark
)
1353 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1360 switch (SCM_TYP16 (ptr
))
1361 { /* should be faster than going through scm_smobs */
1362 case scm_tc_free_cell
:
1363 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1366 case scm_tc16_complex
:
1369 i
= SCM_SMOBNUM (ptr
);
1370 if (!(i
< scm_numsmob
))
1372 if (scm_smobs
[i
].mark
)
1374 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1383 SCM_MISC_ERROR ("unknown type", SCM_EOL
);
1389 #ifndef MARK_DEPENDENCIES
1394 /* And here we define `scm_gc_mark_dependencies', by including this
1395 * same file in itself.
1397 #define MARK scm_gc_mark_dependencies
1398 #define FNAME "scm_gc_mark_dependencies"
1399 #define MARK_DEPENDENCIES
1401 #undef MARK_DEPENDENCIES
1406 /* Mark a Region Conservatively
1410 scm_mark_locations (SCM_STACKITEM x
[], scm_sizet n
)
1414 for (m
= 0; m
< n
; ++m
)
1416 SCM obj
= * (SCM
*) &x
[m
];
1417 if (SCM_CELLP (obj
))
1419 SCM_CELLPTR ptr
= SCM2PTR (obj
);
1421 int j
= scm_n_heap_segs
- 1;
1422 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1423 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1430 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
1432 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1440 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1444 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1449 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1453 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1460 if (SCM_GC_IN_CARD_HEADERP (ptr
))
1463 if (scm_heap_table
[seg_id
].span
== 1
1464 || SCM_DOUBLE_CELLP (obj
))
1475 /* The function scm_cellp determines whether an SCM value can be regarded as a
1476 * pointer to a cell on the heap. Binary search is used in order to determine
1477 * the heap segment that contains the cell.
1480 scm_cellp (SCM value
)
1482 if (SCM_CELLP (value
)) {
1483 scm_cell
* ptr
= SCM2PTR (value
);
1485 unsigned int j
= scm_n_heap_segs
- 1;
1488 int k
= (i
+ j
) / 2;
1489 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
)) {
1491 } else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
)) {
1496 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1497 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
)
1498 && (scm_heap_table
[i
].span
== 1 || SCM_DOUBLE_CELLP (value
))
1499 && !SCM_GC_IN_CARD_HEADERP (ptr
)
1510 gc_sweep_freelist_start (scm_freelist_t
*freelist
)
1512 freelist
->cells
= SCM_EOL
;
1513 freelist
->left_to_collect
= freelist
->cluster_size
;
1514 freelist
->clusters_allocated
= 0;
1515 freelist
->clusters
= SCM_EOL
;
1516 freelist
->clustertail
= &freelist
->clusters
;
1517 freelist
->collected_1
= freelist
->collected
;
1518 freelist
->collected
= 0;
1522 gc_sweep_freelist_finish (scm_freelist_t
*freelist
)
1525 *freelist
->clustertail
= freelist
->cells
;
1526 if (!SCM_NULLP (freelist
->cells
))
1528 SCM c
= freelist
->cells
;
1529 SCM_SETCAR (c
, SCM_CDR (c
));
1530 SCM_SETCDR (c
, SCM_EOL
);
1531 freelist
->collected
+=
1532 freelist
->span
* (freelist
->cluster_size
- freelist
->left_to_collect
);
1534 scm_gc_cells_collected
+= freelist
->collected
;
1536 /* Although freelist->min_yield is used to test freelist->collected
1537 * (which is the local GC yield for freelist), it is adjusted so
1538 * that *total* yield is freelist->min_yield_fraction of total heap
1539 * size. This means that a too low yield is compensated by more
1540 * heap on the list which is currently doing most work, which is
1541 * just what we want.
1543 collected
= SCM_MAX (freelist
->collected_1
, freelist
->collected
);
1544 freelist
->grow_heap_p
= (collected
< freelist
->min_yield
);
1547 #define NEXT_DATA_CELL(ptr, span) \
1549 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1550 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1551 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1557 #define FUNC_NAME "scm_gc_sweep"
1559 register SCM_CELLPTR ptr
;
1560 register SCM nfreelist
;
1561 register scm_freelist_t
*freelist
;
1569 gc_sweep_freelist_start (&scm_master_freelist
);
1570 gc_sweep_freelist_start (&scm_master_freelist2
);
1572 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1574 register unsigned int left_to_collect
;
1575 register scm_sizet j
;
1577 /* Unmarked cells go onto the front of the freelist this heap
1578 segment points to. Rather than updating the real freelist
1579 pointer as we go along, we accumulate the new head in
1580 nfreelist. Then, if it turns out that the entire segment is
1581 free, we free (i.e., malloc's free) the whole segment, and
1582 simply don't assign nfreelist back into the real freelist. */
1583 freelist
= scm_heap_table
[i
].freelist
;
1584 nfreelist
= freelist
->cells
;
1585 left_to_collect
= freelist
->left_to_collect
;
1586 span
= scm_heap_table
[i
].span
;
1588 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0], span
);
1589 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1], span
) - ptr
;
1591 /* use only data cells in seg_size */
1592 seg_size
= (seg_size
/ SCM_GC_CARD_N_CELLS
) * (SCM_GC_CARD_N_DATA_CELLS
/ span
) * span
;
1594 scm_gc_cells_swept
+= seg_size
;
1596 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1600 if (SCM_GC_IN_CARD_HEADERP (ptr
))
1606 NEXT_DATA_CELL (nxt
, span
);
1613 scmptr
= PTR2SCM (ptr
);
1615 if (SCM_GCMARKP (scmptr
))
1618 switch SCM_TYP7 (scmptr
)
1620 case scm_tcs_cons_gloc
:
1622 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1623 * struct or a gloc. See the corresponding comment in
1626 scm_bits_t word0
= (SCM_CELL_WORD_0 (scmptr
)
1627 - scm_tc3_cons_gloc
);
1628 /* access as struct */
1629 scm_bits_t
* vtable_data
= (scm_bits_t
*) word0
;
1630 if (vtable_data
[scm_vtable_index_vcell
] == 0)
1632 /* Structs need to be freed in a special order.
1633 * This is handled by GC C hooks in struct.c.
1635 SCM_SET_STRUCT_GC_CHAIN (scmptr
, scm_structs_to_free
);
1636 scm_structs_to_free
= scmptr
;
1639 /* fall through so that scmptr gets collected */
1642 case scm_tcs_cons_imcar
:
1643 case scm_tcs_cons_nimcar
:
1644 case scm_tcs_closures
:
1648 m
+= (2 + SCM_VECTOR_LENGTH (scmptr
)) * sizeof (SCM
);
1649 scm_must_free (SCM_VECTOR_BASE (scmptr
) - 2);
1651 case scm_tc7_vector
:
1653 unsigned long int length
= SCM_VECTOR_LENGTH (scmptr
);
1656 m
+= length
* sizeof (scm_bits_t
);
1657 scm_must_free (SCM_VECTOR_BASE (scmptr
));
1663 m
+= (SCM_CCLO_LENGTH (scmptr
) * sizeof (SCM
));
1664 scm_must_free (SCM_CCLO_BASE (scmptr
));
1670 unsigned long int length
= SCM_BITVECTOR_LENGTH (scmptr
);
1673 m
+= sizeof (long) * ((length
+ SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1674 scm_must_free (SCM_BITVECTOR_BASE (scmptr
));
1678 case scm_tc7_byvect
:
1682 #ifdef HAVE_LONG_LONGS
1683 case scm_tc7_llvect
:
1688 m
+= SCM_UVECTOR_LENGTH (scmptr
) * scm_uniform_element_size (scmptr
);
1689 scm_must_free (SCM_UVECTOR_BASE (scmptr
));
1692 case scm_tc7_substring
:
1694 case scm_tc7_string
:
1695 m
+= SCM_STRING_LENGTH (scmptr
) + 1;
1696 scm_must_free (SCM_STRING_CHARS (scmptr
));
1698 case scm_tc7_symbol
:
1699 m
+= SCM_SYMBOL_LENGTH (scmptr
) + 1;
1700 scm_must_free (SCM_SYMBOL_CHARS (scmptr
));
1703 /* the various "subrs" (primitives) are never freed */
1706 if SCM_OPENP (scmptr
)
1708 int k
= SCM_PTOBNUM (scmptr
);
1709 if (!(k
< scm_numptob
))
1711 /* Keep "revealed" ports alive. */
1712 if (scm_revealed_count (scmptr
) > 0)
1714 /* Yes, I really do mean scm_ptobs[k].free */
1715 /* rather than ftobs[k].close. .close */
1716 /* is for explicit CLOSE-PORT by user */
1717 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1718 SCM_SETSTREAM (scmptr
, 0);
1719 scm_remove_from_port_table (scmptr
);
1720 scm_gc_ports_collected
++;
1721 SCM_SETAND_CAR (scmptr
, ~SCM_OPN
);
1725 switch SCM_TYP16 (scmptr
)
1727 case scm_tc_free_cell
:
1732 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1733 scm_must_free (SCM_BDIGITS (scmptr
));
1735 #endif /* def SCM_BIGDIG */
1736 case scm_tc16_complex
:
1737 m
+= sizeof (scm_complex_t
);
1738 scm_must_free (SCM_COMPLEX_MEM (scmptr
));
1743 k
= SCM_SMOBNUM (scmptr
);
1744 if (!(k
< scm_numsmob
))
1746 m
+= (scm_smobs
[k
].free
) (scmptr
);
1753 SCM_MISC_ERROR ("unknown type", SCM_EOL
);
1756 if (!--left_to_collect
)
1758 SCM_SETCAR (scmptr
, nfreelist
);
1759 *freelist
->clustertail
= scmptr
;
1760 freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1762 nfreelist
= SCM_EOL
;
1763 freelist
->collected
+= span
* freelist
->cluster_size
;
1764 left_to_collect
= freelist
->cluster_size
;
1768 /* Stick the new cell on the front of nfreelist. It's
1769 critical that we mark this cell as freed; otherwise, the
1770 conservative collector might trace it as some other type
1772 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1773 SCM_SET_FREE_CELL_CDR (scmptr
, nfreelist
);
1778 #ifdef GC_FREE_SEGMENTS
1783 freelist
->heap_size
-= seg_size
;
1784 free ((char *) scm_heap_table
[i
].bounds
[0]);
1785 scm_heap_table
[i
].bounds
[0] = 0;
1786 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1787 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1788 scm_n_heap_segs
-= 1;
1789 i
--; /* We need to scan the segment just moved. */
1792 #endif /* ifdef GC_FREE_SEGMENTS */
1794 /* Update the real freelist pointer to point to the head of
1795 the list of free cells we've built for this segment. */
1796 freelist
->cells
= nfreelist
;
1797 freelist
->left_to_collect
= left_to_collect
;
1800 #ifdef GUILE_DEBUG_FREELIST
1801 scm_map_free_list ();
1805 gc_sweep_freelist_finish (&scm_master_freelist
);
1806 gc_sweep_freelist_finish (&scm_master_freelist2
);
1808 /* When we move to POSIX threads private freelists should probably
1809 be GC-protected instead. */
1810 scm_freelist
= SCM_EOL
;
1811 scm_freelist2
= SCM_EOL
;
1813 scm_cells_allocated
= (SCM_HEAP_SIZE
- scm_gc_cells_collected
);
1814 scm_gc_yield
-= scm_cells_allocated
;
1815 scm_mallocated
-= m
;
1816 scm_gc_malloc_collected
= m
;
1822 /* {Front end to malloc}
1824 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1827 * These functions provide services comperable to malloc, realloc, and
1828 * free. They are for allocating malloced parts of scheme objects.
1829 * The primary purpose of the front end is to impose calls to gc. */
1833 * Return newly malloced storage or throw an error.
1835 * The parameter WHAT is a string for error reporting.
1836 * If the threshold scm_mtrigger will be passed by this
1837 * allocation, or if the first call to malloc fails,
1838 * garbage collect -- on the presumption that some objects
1839 * using malloced storage may be collected.
1841 * The limit scm_mtrigger may be raised by this allocation.
1844 scm_must_malloc (scm_sizet size
, const char *what
)
1847 unsigned long nm
= scm_mallocated
+ size
;
1849 if (nm
<= scm_mtrigger
)
1851 SCM_SYSCALL (ptr
= malloc (size
));
1854 scm_mallocated
= nm
;
1855 #ifdef GUILE_DEBUG_MALLOC
1856 scm_malloc_register (ptr
, what
);
1864 nm
= scm_mallocated
+ size
;
1865 SCM_SYSCALL (ptr
= malloc (size
));
1868 scm_mallocated
= nm
;
1869 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1870 if (nm
> scm_mtrigger
)
1871 scm_mtrigger
= nm
+ nm
/ 2;
1873 scm_mtrigger
+= scm_mtrigger
/ 2;
1875 #ifdef GUILE_DEBUG_MALLOC
1876 scm_malloc_register (ptr
, what
);
1882 scm_memory_error (what
);
1887 * is similar to scm_must_malloc.
1890 scm_must_realloc (void *where
,
1896 scm_sizet nm
= scm_mallocated
+ size
- old_size
;
1898 if (nm
<= scm_mtrigger
)
1900 SCM_SYSCALL (ptr
= realloc (where
, size
));
1903 scm_mallocated
= nm
;
1904 #ifdef GUILE_DEBUG_MALLOC
1905 scm_malloc_reregister (where
, ptr
, what
);
1913 nm
= scm_mallocated
+ size
- old_size
;
1914 SCM_SYSCALL (ptr
= realloc (where
, size
));
1917 scm_mallocated
= nm
;
1918 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1919 if (nm
> scm_mtrigger
)
1920 scm_mtrigger
= nm
+ nm
/ 2;
1922 scm_mtrigger
+= scm_mtrigger
/ 2;
1924 #ifdef GUILE_DEBUG_MALLOC
1925 scm_malloc_reregister (where
, ptr
, what
);
1930 scm_memory_error (what
);
1935 scm_must_free (void *obj
)
1936 #define FUNC_NAME "scm_must_free"
1938 #ifdef GUILE_DEBUG_MALLOC
1939 scm_malloc_unregister (obj
);
1944 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL
);
1949 /* Announce that there has been some malloc done that will be freed
1950 * during gc. A typical use is for a smob that uses some malloced
1951 * memory but can not get it from scm_must_malloc (for whatever
1952 * reason). When a new object of this smob is created you call
1953 * scm_done_malloc with the size of the object. When your smob free
1954 * function is called, be sure to include this size in the return
1957 * If you can't actually free the memory in the smob free function,
1958 * for whatever reason (like reference counting), you still can (and
1959 * should) report the amount of memory freed when you actually free it.
1960 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1961 * eh? Or even better, call scm_done_free. */
1964 scm_done_malloc (long size
)
1966 scm_mallocated
+= size
;
1968 if (scm_mallocated
> scm_mtrigger
)
1970 scm_igc ("foreign mallocs");
1971 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
1973 if (scm_mallocated
> scm_mtrigger
)
1974 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
1976 scm_mtrigger
+= scm_mtrigger
/ 2;
1982 scm_done_free (long size
)
1984 scm_mallocated
-= size
;
1991 * Each heap segment is an array of objects of a particular size.
1992 * Every segment has an associated (possibly shared) freelist.
1993 * A table of segment records is kept that records the upper and
1994 * lower extents of the segment; this is used during the conservative
1995 * phase of gc to identify probably gc roots (because they point
1996 * into valid segments at reasonable offsets). */
1999 * is true if the first segment was smaller than INIT_HEAP_SEG.
2000 * If scm_expmem is set to one, subsequent segment allocations will
2001 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2005 scm_sizet scm_max_segment_size
;
2008 * is the lowest base address of any heap segment.
2010 SCM_CELLPTR scm_heap_org
;
2012 scm_heap_seg_data_t
* scm_heap_table
= 0;
2013 static unsigned int heap_segment_table_size
= 0;
2014 int scm_n_heap_segs
= 0;
2017 * initializes a new heap segment and returns the number of objects it contains.
2019 * The segment origin and segment size in bytes are input parameters.
2020 * The freelist is both input and output.
2022 * This function presumes that the scm_heap_table has already been expanded
2023 * to accomodate a new segment record and that the markbit space was reserved
2024 * for all the cards in this segment.
2027 #define INIT_CARD(card, span) \
2029 SCM_GC_CARD_BVEC (card) = get_bvec (); \
2031 SCM_GC_SET_CARD_DOUBLECELL (card); \
2035 init_heap_seg (SCM_CELLPTR seg_org
, scm_sizet size
, scm_freelist_t
*freelist
)
2037 register SCM_CELLPTR ptr
;
2038 SCM_CELLPTR seg_end
;
2041 int span
= freelist
->span
;
2043 if (seg_org
== NULL
)
2046 /* Align the begin ptr up.
2048 ptr
= SCM_GC_CARD_UP (seg_org
);
2050 /* Compute the ceiling on valid object pointers w/in this segment.
2052 seg_end
= SCM_GC_CARD_DOWN ((char *)seg_org
+ size
);
2054 /* Find the right place and insert the segment record.
2057 for (new_seg_index
= 0;
2058 ( (new_seg_index
< scm_n_heap_segs
)
2059 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
));
2065 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
2066 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
2071 scm_heap_table
[new_seg_index
].span
= span
;
2072 scm_heap_table
[new_seg_index
].freelist
= freelist
;
2073 scm_heap_table
[new_seg_index
].bounds
[0] = ptr
;
2074 scm_heap_table
[new_seg_index
].bounds
[1] = seg_end
;
2077 n_new_cells
= seg_end
- ptr
;
2079 freelist
->heap_size
+= n_new_cells
;
2081 /* Partition objects in this segment into clusters */
2084 SCM
*clusterp
= &clusters
;
2086 NEXT_DATA_CELL (ptr
, span
);
2087 while (ptr
< seg_end
)
2089 scm_cell
*nxt
= ptr
;
2090 scm_cell
*prv
= NULL
;
2091 scm_cell
*last_card
= NULL
;
2092 int n_data_cells
= (SCM_GC_CARD_N_DATA_CELLS
/ span
) * SCM_CARDS_PER_CLUSTER
- 1;
2093 NEXT_DATA_CELL(nxt
, span
);
2095 /* Allocate cluster spine
2097 *clusterp
= PTR2SCM (ptr
);
2098 SCM_SETCAR (*clusterp
, PTR2SCM (nxt
));
2099 clusterp
= SCM_CDRLOC (*clusterp
);
2102 while (n_data_cells
--)
2104 scm_cell
*card
= SCM_GC_CELL_CARD (ptr
);
2105 SCM scmptr
= PTR2SCM (ptr
);
2107 NEXT_DATA_CELL (nxt
, span
);
2110 if (card
!= last_card
)
2112 INIT_CARD (card
, span
);
2116 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
2117 SCM_SETCDR (scmptr
, PTR2SCM (nxt
));
2122 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv
), SCM_EOL
);
2127 scm_cell
*ref
= seg_end
;
2128 NEXT_DATA_CELL (ref
, span
);
2130 /* [cmm] looks like the segment size doesn't divide cleanly by
2131 cluster size. bad cmm! */
2135 /* Patch up the last cluster pointer in the segment
2136 * to join it to the input freelist.
2138 *clusterp
= freelist
->clusters
;
2139 freelist
->clusters
= clusters
;
2143 fprintf (stderr
, "H");
2149 round_to_cluster_size (scm_freelist_t
*freelist
, scm_sizet len
)
2151 scm_sizet cluster_size_in_bytes
= CLUSTER_SIZE_IN_BYTES (freelist
);
2154 (len
+ cluster_size_in_bytes
- 1) / cluster_size_in_bytes
* cluster_size_in_bytes
2155 + ALIGNMENT_SLACK (freelist
);
2159 alloc_some_heap (scm_freelist_t
*freelist
, policy_on_error error_policy
)
2160 #define FUNC_NAME "alloc_some_heap"
2165 if (scm_gc_heap_lock
)
2167 /* Critical code sections (such as the garbage collector) aren't
2168 * supposed to add heap segments.
2170 fprintf (stderr
, "alloc_some_heap: Can not extend locked heap.\n");
2174 if (scm_n_heap_segs
== heap_segment_table_size
)
2176 /* We have to expand the heap segment table to have room for the new
2177 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2178 * init_heap_seg only if the allocation of the segment itself succeeds.
2180 unsigned int new_table_size
= scm_n_heap_segs
+ 1;
2181 size_t size
= new_table_size
* sizeof (scm_heap_seg_data_t
);
2182 scm_heap_seg_data_t
* new_heap_table
;
2184 SCM_SYSCALL (new_heap_table
= ((scm_heap_seg_data_t
*)
2185 realloc ((char *)scm_heap_table
, size
)));
2186 if (!new_heap_table
)
2188 if (error_policy
== abort_on_error
)
2190 fprintf (stderr
, "alloc_some_heap: Could not grow heap segment table.\n");
2200 scm_heap_table
= new_heap_table
;
2201 heap_segment_table_size
= new_table_size
;
2205 /* Pick a size for the new heap segment.
2206 * The rule for picking the size of a segment is explained in
2210 /* Assure that the new segment is predicted to be large enough.
2212 * New yield should at least equal GC fraction of new heap size, i.e.
2214 * y + dh > f * (h + dh)
2217 * f : min yield fraction
2219 * dh : size of new heap segment
2221 * This gives dh > (f * h - y) / (1 - f)
2223 int f
= freelist
->min_yield_fraction
;
2224 long h
= SCM_HEAP_SIZE
;
2225 long min_cells
= (f
* h
- 100 * (long) scm_gc_yield
) / (99 - f
);
2226 len
= SCM_EXPHEAP (freelist
->heap_size
);
2228 fprintf (stderr
, "(%d < %d)", len
, min_cells
);
2230 if (len
< min_cells
)
2231 len
= min_cells
+ freelist
->cluster_size
;
2232 len
*= sizeof (scm_cell
);
2233 /* force new sampling */
2234 freelist
->collected
= LONG_MAX
;
2237 if (len
> scm_max_segment_size
)
2238 len
= scm_max_segment_size
;
2243 smallest
= CLUSTER_SIZE_IN_BYTES (freelist
);
2248 /* Allocate with decaying ambition. */
2249 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2250 && (len
>= smallest
))
2252 scm_sizet rounded_len
= round_to_cluster_size (freelist
, len
);
2253 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (rounded_len
));
2256 init_heap_seg (ptr
, rounded_len
, freelist
);
2263 if (error_policy
== abort_on_error
)
2265 fprintf (stderr
, "alloc_some_heap: Could not grow heap.\n");
2272 SCM_DEFINE (scm_unhash_name
, "unhash-name", 1, 0, 0,
2275 #define FUNC_NAME s_scm_unhash_name
2279 SCM_VALIDATE_SYMBOL (1,name
);
2281 bound
= scm_n_heap_segs
;
2282 for (x
= 0; x
< bound
; ++x
)
2286 p
= scm_heap_table
[x
].bounds
[0];
2287 pbound
= scm_heap_table
[x
].bounds
[1];
2290 SCM cell
= PTR2SCM (p
);
2291 if (SCM_TYP3 (cell
) == scm_tc3_cons_gloc
)
2293 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2294 * struct cell. See the corresponding comment in scm_gc_mark.
2296 scm_bits_t word0
= SCM_CELL_WORD_0 (cell
) - scm_tc3_cons_gloc
;
2297 SCM gloc_car
= SCM_PACK (word0
); /* access as gloc */
2298 SCM vcell
= SCM_CELL_OBJECT_1 (gloc_car
);
2299 if ((SCM_EQ_P (name
, SCM_BOOL_T
) || SCM_EQ_P (SCM_CAR (gloc_car
), name
))
2300 && (SCM_UNPACK (vcell
) != 0) && (SCM_UNPACK (vcell
) != 1))
2302 SCM_SET_CELL_OBJECT_0 (cell
, name
);
2315 /* {GC Protection Helper Functions}
2320 * If within a function you need to protect one or more scheme objects from
2321 * garbage collection, pass them as parameters to one of the
2322 * scm_remember_upto_here* functions below. These functions don't do
2323 * anything, but since the compiler does not know that they are actually
2324 * no-ops, it will generate code that calls these functions with the given
2325 * parameters. Therefore, you can be sure that the compiler will keep those
2326 * scheme values alive (on the stack or in a register) up to the point where
2327 * scm_remember_upto_here* is called. In other words, place the call to
2328 * scm_remember_upt_here* _behind_ the last code in your function, that
2329 * depends on the scheme object to exist.
2331 * Example: We want to make sure, that the string object str does not get
2332 * garbage collected during the execution of 'some_function', because
2333 * otherwise the characters belonging to str would be freed and
2334 * 'some_function' might access freed memory. To make sure that the compiler
2335 * keeps str alive on the stack or in a register such that it is visible to
2336 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2337 * call to 'some_function'. Note that this would not be necessary if str was
2338 * used anyway after the call to 'some_function'.
2339 * char *chars = SCM_STRING_CHARS (str);
2340 * some_function (chars);
2341 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2345 scm_remember_upto_here_1 (SCM obj
)
2347 /* Empty. Protects a single object from garbage collection. */
2351 scm_remember_upto_here_2 (SCM obj1
, SCM obj2
)
2353 /* Empty. Protects two objects from garbage collection. */
2357 scm_remember_upto_here (SCM obj
, ...)
2359 /* Empty. Protects any number of objects from garbage collection. */
2363 #if (SCM_DEBUG_DEPRECATED == 0)
2366 scm_remember (SCM
*ptr
)
2371 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2374 These crazy functions prevent garbage collection
2375 of arguments after the first argument by
2376 ensuring they remain live throughout the
2377 function because they are used in the last
2378 line of the code block.
2379 It'd be better to have a nice compiler hint to
2380 aid the conservative stack-scanning GC. --03/09/00 gjb */
2382 scm_return_first (SCM elt
, ...)
2388 scm_return_first_int (int i
, ...)
2395 scm_permanent_object (SCM obj
)
2398 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2404 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2405 other references are dropped, until the object is unprotected by calling
2406 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2407 i. e. it is possible to protect the same object several times, but it is
2408 necessary to unprotect the object the same number of times to actually get
2409 the object unprotected. It is an error to unprotect an object more often
2410 than it has been protected before. The function scm_protect_object returns
2414 /* Implementation note: For every object X, there is a counter which
2415 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2419 scm_protect_object (SCM obj
)
2423 /* This critical section barrier will be replaced by a mutex. */
2426 handle
= scm_hashq_create_handle_x (scm_protects
, obj
, SCM_MAKINUM (0));
2427 SCM_SETCDR (handle
, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle
)) + 1));
2435 /* Remove any protection for OBJ established by a prior call to
2436 scm_protect_object. This function returns OBJ.
2438 See scm_protect_object for more information. */
2440 scm_unprotect_object (SCM obj
)
2444 /* This critical section barrier will be replaced by a mutex. */
2447 handle
= scm_hashq_get_handle (scm_protects
, obj
);
2449 if (SCM_IMP (handle
))
2451 fprintf (stderr
, "scm_unprotect_object called on unprotected object\n");
2456 unsigned long int count
= SCM_INUM (SCM_CDR (handle
)) - 1;
2458 scm_hashq_remove_x (scm_protects
, obj
);
2460 SCM_SETCDR (handle
, SCM_MAKINUM (count
));
2470 /* called on process termination. */
2476 extern int on_exit (void (*procp
) (), int arg
);
2479 cleanup (int status
, void *arg
)
2481 #error Dont know how to setup a cleanup handler on your system.
2486 scm_flush_all_ports ();
2491 make_initial_segment (scm_sizet init_heap_size
, scm_freelist_t
*freelist
)
2493 scm_sizet rounded_size
= round_to_cluster_size (freelist
, init_heap_size
);
2495 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2499 rounded_size
= round_to_cluster_size (freelist
, SCM_HEAP_SEG_SIZE
);
2500 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2508 if (freelist
->min_yield_fraction
)
2509 freelist
->min_yield
= (freelist
->heap_size
* freelist
->min_yield_fraction
2511 freelist
->grow_heap_p
= (freelist
->heap_size
< freelist
->min_yield
);
2518 init_freelist (scm_freelist_t
*freelist
,
2523 freelist
->clusters
= SCM_EOL
;
2524 freelist
->cluster_size
= cluster_size
+ 1;
2525 freelist
->left_to_collect
= 0;
2526 freelist
->clusters_allocated
= 0;
2527 freelist
->min_yield
= 0;
2528 freelist
->min_yield_fraction
= min_yield
;
2529 freelist
->span
= span
;
2530 freelist
->collected
= 0;
2531 freelist
->collected_1
= 0;
2532 freelist
->heap_size
= 0;
2536 /* Get an integer from an environment variable. */
2538 scm_i_getenv_int (const char *var
, int def
)
2540 char *end
, *val
= getenv (var
);
2544 res
= strtol (val
, &end
, 10);
2554 scm_sizet gc_trigger_1
;
2555 scm_sizet gc_trigger_2
;
2556 scm_sizet init_heap_size_1
;
2557 scm_sizet init_heap_size_2
;
2560 j
= SCM_NUM_PROTECTS
;
2562 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2565 scm_freelist
= SCM_EOL
;
2566 scm_freelist2
= SCM_EOL
;
2567 gc_trigger_1
= scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1
);
2568 init_freelist (&scm_master_freelist
, 1, SCM_CLUSTER_SIZE_1
, gc_trigger_1
);
2569 gc_trigger_2
= scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2
);
2570 init_freelist (&scm_master_freelist2
, 2, SCM_CLUSTER_SIZE_2
, gc_trigger_2
);
2571 scm_max_segment_size
= scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size
);
2575 j
= SCM_HEAP_SEG_SIZE
;
2576 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2577 scm_heap_table
= ((scm_heap_seg_data_t
*)
2578 scm_must_malloc (sizeof (scm_heap_seg_data_t
) * 2, "hplims"));
2579 heap_segment_table_size
= 2;
2581 mark_space_ptr
= &mark_space_head
;
2583 init_heap_size_1
= scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1
);
2584 init_heap_size_2
= scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2
);
2585 if (make_initial_segment (init_heap_size_1
, &scm_master_freelist
) ||
2586 make_initial_segment (init_heap_size_2
, &scm_master_freelist2
))
2589 /* scm_hplims[0] can change. do not remove scm_heap_org */
2590 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0], 1);
2592 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2593 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2594 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2595 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2596 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2598 /* Initialise the list of ports. */
2599 scm_port_table
= (scm_port
**)
2600 malloc (sizeof (scm_port
*) * scm_port_table_room
);
2601 if (!scm_port_table
)
2608 on_exit (cleanup
, 0);
2612 scm_undefineds
= scm_cons (SCM_UNDEFINED
, SCM_EOL
);
2613 SCM_SETCDR (scm_undefineds
, scm_undefineds
);
2615 scm_listofnull
= scm_cons (SCM_EOL
, SCM_EOL
);
2616 scm_nullstr
= scm_makstr (0L, 0);
2617 scm_nullvect
= scm_make_vector (SCM_INUM0
, SCM_UNDEFINED
);
2619 #define DEFAULT_SYMHASH_SIZE 277
2620 scm_symhash
= scm_make_vector (SCM_MAKINUM (DEFAULT_SYMHASH_SIZE
), SCM_EOL
);
2621 scm_symhash_vars
= scm_make_vector (SCM_MAKINUM (DEFAULT_SYMHASH_SIZE
), SCM_EOL
);
2623 scm_stand_in_procs
= SCM_EOL
;
2624 scm_permobjs
= SCM_EOL
;
2625 scm_protects
= scm_make_vector (SCM_MAKINUM (31), SCM_EOL
);
2632 SCM scm_after_gc_hook
;
2634 #if (SCM_DEBUG_DEPRECATED == 0)
2635 static SCM scm_gc_vcell
; /* the vcell for gc-thunk. */
2636 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2637 static SCM gc_async
;
2640 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2641 * is run after the gc, as soon as the asynchronous events are handled by the
2645 gc_async_thunk (void)
2647 scm_c_run_hook (scm_after_gc_hook
, SCM_EOL
);
2649 #if (SCM_DEBUG_DEPRECATED == 0)
2651 /* The following code will be removed in Guile 1.5. */
2652 if (SCM_NFALSEP (scm_gc_vcell
))
2654 SCM proc
= SCM_CDR (scm_gc_vcell
);
2656 if (SCM_NFALSEP (proc
) && !SCM_UNBNDP (proc
))
2657 scm_apply (proc
, SCM_EOL
, SCM_EOL
);
2660 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2662 return SCM_UNSPECIFIED
;
2666 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2667 * the garbage collection. The only purpose of this function is to mark the
2668 * gc_async (which will eventually lead to the execution of the
2672 mark_gc_async (void * hook_data
, void *func_data
, void *data
)
2674 scm_system_async_mark (gc_async
);
2684 scm_after_gc_hook
= scm_create_hook ("after-gc-hook", 0);
2686 #if (SCM_DEBUG_DEPRECATED == 0)
2687 scm_gc_vcell
= scm_sysintern ("gc-thunk", SCM_BOOL_F
);
2688 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2689 after_gc_thunk
= scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0
, gc_async_thunk
, 0);
2690 gc_async
= scm_system_async (after_gc_thunk
); /* protected via scm_asyncs */
2692 scm_c_hook_add (&scm_after_gc_c_hook
, mark_gc_async
, NULL
, 0);
2694 #ifndef SCM_MAGIC_SNARFER
2695 #include "libguile/gc.x"
2699 #endif /*MARK_DEPENDENCIES*/