Commit | Line | Data |
---|---|---|
22a52da1 | 1 | /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc. |
a00c95d9 | 2 | * |
0f2d19dd JB |
3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License as published by | |
5 | * the Free Software Foundation; either version 2, or (at your option) | |
6 | * any later version. | |
a00c95d9 | 7 | * |
0f2d19dd JB |
8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
a00c95d9 | 12 | * |
0f2d19dd JB |
13 | * You should have received a copy of the GNU General Public License |
14 | * along with this software; see the file COPYING. If not, write to | |
82892bed JB |
15 | * the Free Software Foundation, Inc., 59 Temple Place, Suite 330, |
16 | * Boston, MA 02111-1307 USA | |
0f2d19dd JB |
17 | * |
18 | * As a special exception, the Free Software Foundation gives permission | |
19 | * for additional uses of the text contained in its release of GUILE. | |
20 | * | |
21 | * The exception is that, if you link the GUILE library with other files | |
22 | * to produce an executable, this does not by itself cause the | |
23 | * resulting executable to be covered by the GNU General Public License. | |
24 | * Your use of that executable is in no way restricted on account of | |
25 | * linking the GUILE library code into it. | |
26 | * | |
27 | * This exception does not however invalidate any other reasons why | |
28 | * the executable file might be covered by the GNU General Public License. | |
29 | * | |
30 | * This exception applies only to the code released by the | |
31 | * Free Software Foundation under the name GUILE. If you copy | |
32 | * code from other Free Software Foundation releases into a copy of | |
33 | * GUILE, as the General Public License permits, the exception does | |
34 | * not apply to the code that you add in this way. To avoid misleading | |
35 | * anyone as to the status of such modified files, you must delete | |
36 | * this exception notice from them. | |
37 | * | |
38 | * If you write modifications of your own for GUILE, it is your choice | |
39 | * whether to permit this exception to apply to your modifications. | |
82892bed | 40 | * If you do not wish that, delete this exception notice. */ |
1bbd0b84 GB |
41 | |
42 | /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999, | |
43 | gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */ | |
44 | ||
37ddcaf6 MD |
45 | /* #define DEBUGINFO */ |
46 | ||
56495472 ML |
47 | /* SECTION: This code is compiled once. |
48 | */ | |
49 | ||
50 | #ifndef MARK_DEPENDENCIES | |
51 | ||
0f2d19dd JB |
52 | \f |
53 | #include <stdio.h> | |
e6e2e95a | 54 | #include <errno.h> |
783e7774 | 55 | #include <string.h> |
e6e2e95a | 56 | |
a0599745 | 57 | #include "libguile/_scm.h" |
0a7a7445 | 58 | #include "libguile/eval.h" |
a0599745 MD |
59 | #include "libguile/stime.h" |
60 | #include "libguile/stackchk.h" | |
61 | #include "libguile/struct.h" | |
a0599745 MD |
62 | #include "libguile/smob.h" |
63 | #include "libguile/unif.h" | |
64 | #include "libguile/async.h" | |
65 | #include "libguile/ports.h" | |
66 | #include "libguile/root.h" | |
67 | #include "libguile/strings.h" | |
68 | #include "libguile/vectors.h" | |
801cb5e7 | 69 | #include "libguile/weaks.h" |
686765af | 70 | #include "libguile/hashtab.h" |
ecf470a2 | 71 | #include "libguile/tags.h" |
a0599745 MD |
72 | |
73 | #include "libguile/validate.h" | |
74 | #include "libguile/gc.h" | |
fce59c93 | 75 | |
bc9d9bb2 | 76 | #ifdef GUILE_DEBUG_MALLOC |
a0599745 | 77 | #include "libguile/debug-malloc.h" |
bc9d9bb2 MD |
78 | #endif |
79 | ||
0f2d19dd | 80 | #ifdef HAVE_MALLOC_H |
95b88819 | 81 | #include <malloc.h> |
0f2d19dd JB |
82 | #endif |
83 | ||
84 | #ifdef HAVE_UNISTD_H | |
95b88819 | 85 | #include <unistd.h> |
0f2d19dd JB |
86 | #endif |
87 | ||
1cc91f1b JB |
88 | #ifdef __STDC__ |
89 | #include <stdarg.h> | |
90 | #define var_start(x, y) va_start(x, y) | |
91 | #else | |
92 | #include <varargs.h> | |
93 | #define var_start(x, y) va_start(x) | |
94 | #endif | |
95 | ||
0f2d19dd | 96 | \f |
406c7d90 DH |
97 | |
98 | unsigned int scm_gc_running_p = 0; | |
99 | ||
100 | \f | |
101 | ||
102 | #if (SCM_DEBUG_CELL_ACCESSES == 1) | |
103 | ||
61045190 DH |
104 | scm_bits_t scm_tc16_allocated; |
105 | ||
106 | /* Set this to != 0 if every cell that is accessed shall be checked: | |
107 | */ | |
108 | unsigned int scm_debug_cell_accesses_p = 1; | |
406c7d90 DH |
109 | |
110 | ||
111 | /* Assert that the given object is a valid reference to a valid cell. This | |
112 | * test involves to determine whether the object is a cell pointer, whether | |
113 | * this pointer actually points into a heap segment and whether the cell | |
114 | * pointed to is not a free cell. | |
115 | */ | |
116 | void | |
117 | scm_assert_cell_valid (SCM cell) | |
118 | { | |
61045190 DH |
119 | static unsigned int already_running = 0; |
120 | ||
121 | if (scm_debug_cell_accesses_p && !already_running) | |
406c7d90 | 122 | { |
61045190 | 123 | already_running = 1; /* set to avoid recursion */ |
406c7d90 | 124 | |
9d47a1e6 | 125 | if (!scm_cellp (cell)) |
406c7d90 DH |
126 | { |
127 | fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell)); | |
128 | abort (); | |
129 | } | |
130 | else if (!scm_gc_running_p) | |
131 | { | |
132 | /* Dirk::FIXME:: During garbage collection there occur references to | |
133 | free cells. This is allright during conservative marking, but | |
134 | should not happen otherwise (I think). The case of free cells | |
135 | accessed during conservative marking is handled in function | |
136 | scm_mark_locations. However, there still occur accesses to free | |
137 | cells during gc. I don't understand why this happens. If it is | |
138 | a bug and gets fixed, the following test should also work while | |
139 | gc is running. | |
140 | */ | |
141 | if (SCM_FREE_CELL_P (cell)) | |
142 | { | |
143 | fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell)); | |
144 | abort (); | |
145 | } | |
146 | } | |
61045190 | 147 | already_running = 0; /* re-enable */ |
406c7d90 DH |
148 | } |
149 | } | |
150 | ||
151 | ||
152 | SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0, | |
153 | (SCM flag), | |
1e6808ea MG |
154 | "If @var{flag} is @code{#f}, cell access checking is disabled.\n" |
155 | "If @var{flag} is @code{#t}, cell access checking is enabled.\n" | |
156 | "This procedure only exists when the compile-time flag\n" | |
157 | "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.") | |
406c7d90 DH |
158 | #define FUNC_NAME s_scm_set_debug_cell_accesses_x |
159 | { | |
160 | if (SCM_FALSEP (flag)) { | |
161 | scm_debug_cell_accesses_p = 0; | |
162 | } else if (SCM_EQ_P (flag, SCM_BOOL_T)) { | |
163 | scm_debug_cell_accesses_p = 1; | |
164 | } else { | |
165 | SCM_WRONG_TYPE_ARG (1, flag); | |
166 | } | |
167 | return SCM_UNSPECIFIED; | |
168 | } | |
169 | #undef FUNC_NAME | |
170 | ||
171 | #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */ | |
172 | ||
173 | \f | |
174 | ||
0f2d19dd | 175 | /* {heap tuning parameters} |
a00c95d9 | 176 | * |
0f2d19dd JB |
177 | * These are parameters for controlling memory allocation. The heap |
178 | * is the area out of which scm_cons, and object headers are allocated. | |
179 | * | |
180 | * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a | |
181 | * 64 bit machine. The units of the _SIZE parameters are bytes. | |
182 | * Cons pairs and object headers occupy one heap cell. | |
183 | * | |
184 | * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is | |
185 | * allocated initially the heap will grow by half its current size | |
186 | * each subsequent time more heap is needed. | |
187 | * | |
188 | * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE | |
189 | * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more | |
190 | * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code | |
191 | * is in scm_init_storage() and alloc_some_heap() in sys.c | |
a00c95d9 | 192 | * |
0f2d19dd JB |
193 | * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by |
194 | * SCM_EXPHEAP(scm_heap_size) when more heap is needed. | |
195 | * | |
196 | * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap | |
197 | * is needed. | |
198 | * | |
199 | * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will | |
a00c95d9 | 200 | * trigger a GC. |
6064dcc6 MV |
201 | * |
202 | * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be | |
203 | * reclaimed by a GC triggered by must_malloc. If less than this is | |
204 | * reclaimed, the trigger threshold is raised. [I don't know what a | |
205 | * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to | |
a00c95d9 | 206 | * work around a oscillation that caused almost constant GC.] |
0f2d19dd JB |
207 | */ |
208 | ||
8fef55a8 MD |
209 | /* |
210 | * Heap size 45000 and 40% min yield gives quick startup and no extra | |
211 | * heap allocation. Having higher values on min yield may lead to | |
212 | * large heaps, especially if code behaviour is varying its | |
213 | * maximum consumption between different freelists. | |
214 | */ | |
d6884e63 ML |
215 | |
216 | #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS) | |
217 | #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L) | |
218 | #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS) | |
219 | int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1) | |
220 | / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE); | |
aeacfc8f | 221 | int scm_default_min_yield_1 = 40; |
4c48ba06 | 222 | |
d6884e63 ML |
223 | #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2)) |
224 | int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1) | |
225 | / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE); | |
4c48ba06 MD |
226 | /* The following value may seem large, but note that if we get to GC at |
227 | * all, this means that we have a numerically intensive application | |
228 | */ | |
aeacfc8f | 229 | int scm_default_min_yield_2 = 40; |
4c48ba06 | 230 | |
aeacfc8f | 231 | int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */ |
4c48ba06 | 232 | |
d6884e63 | 233 | #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE) |
0f2d19dd JB |
234 | #ifdef _QC |
235 | # define SCM_HEAP_SEG_SIZE 32768L | |
236 | #else | |
237 | # ifdef sequent | |
4c48ba06 | 238 | # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell)) |
0f2d19dd | 239 | # else |
4c48ba06 | 240 | # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell)) |
0f2d19dd JB |
241 | # endif |
242 | #endif | |
4c48ba06 | 243 | /* Make heap grow with factor 1.5 */ |
4a4c9785 | 244 | #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2) |
0f2d19dd | 245 | #define SCM_INIT_MALLOC_LIMIT 100000 |
6064dcc6 | 246 | #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10) |
0f2d19dd | 247 | |
d6884e63 ML |
248 | /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span) |
249 | aligned inner bounds for allocated storage */ | |
0f2d19dd JB |
250 | |
251 | #ifdef PROT386 | |
252 | /*in 386 protected mode we must only adjust the offset */ | |
a00c95d9 ML |
253 | # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1)) |
254 | # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p)) | |
0f2d19dd JB |
255 | #else |
256 | # ifdef _UNICOS | |
a00c95d9 ML |
257 | # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span))) |
258 | # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p)) | |
0f2d19dd | 259 | # else |
a00c95d9 ML |
260 | # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L)) |
261 | # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p)) | |
0f2d19dd JB |
262 | # endif /* UNICOS */ |
263 | #endif /* PROT386 */ | |
264 | ||
ecf470a2 ML |
265 | #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0) |
266 | ||
d6884e63 ML |
267 | #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1) |
268 | #define CLUSTER_SIZE_IN_BYTES(freelist) \ | |
269 | (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE) | |
0f2d19dd JB |
270 | |
271 | \f | |
945fec60 | 272 | /* scm_freelists |
0f2d19dd | 273 | */ |
945fec60 | 274 | |
a00c95d9 ML |
275 | typedef struct scm_freelist_t { |
276 | /* collected cells */ | |
277 | SCM cells; | |
a00c95d9 ML |
278 | /* number of cells left to collect before cluster is full */ |
279 | unsigned int left_to_collect; | |
b37fe1c5 MD |
280 | /* number of clusters which have been allocated */ |
281 | unsigned int clusters_allocated; | |
8fef55a8 MD |
282 | /* a list of freelists, each of size cluster_size, |
283 | * except the last one which may be shorter | |
284 | */ | |
a00c95d9 ML |
285 | SCM clusters; |
286 | SCM *clustertail; | |
b37fe1c5 | 287 | /* this is the number of objects in each cluster, including the spine cell */ |
a00c95d9 | 288 | int cluster_size; |
8fef55a8 | 289 | /* indicates that we should grow heap instead of GC:ing |
a00c95d9 ML |
290 | */ |
291 | int grow_heap_p; | |
8fef55a8 | 292 | /* minimum yield on this list in order not to grow the heap |
a00c95d9 | 293 | */ |
8fef55a8 MD |
294 | long min_yield; |
295 | /* defines min_yield as percent of total heap size | |
a00c95d9 | 296 | */ |
8fef55a8 | 297 | int min_yield_fraction; |
a00c95d9 ML |
298 | /* number of cells per object on this list */ |
299 | int span; | |
300 | /* number of collected cells during last GC */ | |
1811ebce MD |
301 | long collected; |
302 | /* number of collected cells during penultimate GC */ | |
303 | long collected_1; | |
a00c95d9 ML |
304 | /* total number of cells in heap segments |
305 | * belonging to this list. | |
306 | */ | |
1811ebce | 307 | long heap_size; |
a00c95d9 ML |
308 | } scm_freelist_t; |
309 | ||
4a4c9785 MD |
310 | SCM scm_freelist = SCM_EOL; |
311 | scm_freelist_t scm_master_freelist = { | |
b37fe1c5 | 312 | SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0 |
4a4c9785 MD |
313 | }; |
314 | SCM scm_freelist2 = SCM_EOL; | |
315 | scm_freelist_t scm_master_freelist2 = { | |
b37fe1c5 | 316 | SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0 |
4a4c9785 | 317 | }; |
0f2d19dd JB |
318 | |
319 | /* scm_mtrigger | |
320 | * is the number of bytes of must_malloc allocation needed to trigger gc. | |
321 | */ | |
15e9d186 | 322 | unsigned long scm_mtrigger; |
0f2d19dd | 323 | |
0f2d19dd JB |
324 | /* scm_gc_heap_lock |
325 | * If set, don't expand the heap. Set only during gc, during which no allocation | |
326 | * is supposed to take place anyway. | |
327 | */ | |
328 | int scm_gc_heap_lock = 0; | |
329 | ||
330 | /* GC Blocking | |
331 | * Don't pause for collection if this is set -- just | |
332 | * expand the heap. | |
333 | */ | |
0f2d19dd JB |
334 | int scm_block_gc = 1; |
335 | ||
0f2d19dd JB |
336 | /* During collection, this accumulates objects holding |
337 | * weak references. | |
338 | */ | |
ab4bef85 | 339 | SCM scm_weak_vectors; |
0f2d19dd | 340 | |
7445e0e8 MD |
341 | /* During collection, this accumulates structures which are to be freed. |
342 | */ | |
343 | SCM scm_structs_to_free; | |
344 | ||
0f2d19dd JB |
345 | /* GC Statistics Keeping |
346 | */ | |
347 | unsigned long scm_cells_allocated = 0; | |
a5c314c8 | 348 | long scm_mallocated = 0; |
b37fe1c5 | 349 | unsigned long scm_gc_cells_collected; |
8b0d194f | 350 | unsigned long scm_gc_yield; |
37ddcaf6 | 351 | static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */ |
0f2d19dd JB |
352 | unsigned long scm_gc_malloc_collected; |
353 | unsigned long scm_gc_ports_collected; | |
0f2d19dd | 354 | unsigned long scm_gc_time_taken = 0; |
c9b0d4b0 ML |
355 | static unsigned long t_before_gc; |
356 | static unsigned long t_before_sweep; | |
357 | unsigned long scm_gc_mark_time_taken = 0; | |
358 | unsigned long scm_gc_sweep_time_taken = 0; | |
359 | unsigned long scm_gc_times = 0; | |
360 | unsigned long scm_gc_cells_swept = 0; | |
361 | double scm_gc_cells_marked_acc = 0.; | |
362 | double scm_gc_cells_swept_acc = 0.; | |
0f2d19dd JB |
363 | |
364 | SCM_SYMBOL (sym_cells_allocated, "cells-allocated"); | |
365 | SCM_SYMBOL (sym_heap_size, "cell-heap-size"); | |
366 | SCM_SYMBOL (sym_mallocated, "bytes-malloced"); | |
367 | SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold"); | |
368 | SCM_SYMBOL (sym_heap_segments, "cell-heap-segments"); | |
369 | SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken"); | |
c9b0d4b0 ML |
370 | SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken"); |
371 | SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken"); | |
372 | SCM_SYMBOL (sym_times, "gc-times"); | |
373 | SCM_SYMBOL (sym_cells_marked, "cells-marked"); | |
374 | SCM_SYMBOL (sym_cells_swept, "cells-swept"); | |
0f2d19dd | 375 | |
a00c95d9 | 376 | typedef struct scm_heap_seg_data_t |
0f2d19dd | 377 | { |
cf2d30f6 JB |
378 | /* lower and upper bounds of the segment */ |
379 | SCM_CELLPTR bounds[2]; | |
380 | ||
381 | /* address of the head-of-freelist pointer for this segment's cells. | |
382 | All segments usually point to the same one, scm_freelist. */ | |
4c48ba06 | 383 | scm_freelist_t *freelist; |
cf2d30f6 | 384 | |
fe517a7d | 385 | /* number of cells per object in this segment */ |
945fec60 | 386 | int span; |
a00c95d9 | 387 | } scm_heap_seg_data_t; |
0f2d19dd JB |
388 | |
389 | ||
390 | ||
945fec60 | 391 | static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *); |
b6efc951 DH |
392 | |
393 | typedef enum { return_on_error, abort_on_error } policy_on_error; | |
394 | static void alloc_some_heap (scm_freelist_t *, policy_on_error); | |
0f2d19dd JB |
395 | |
396 | ||
d6884e63 ML |
397 | #define SCM_HEAP_SIZE \ |
398 | (scm_master_freelist.heap_size + scm_master_freelist2.heap_size) | |
399 | #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B)) | |
400 | ||
401 | #define BVEC_GROW_SIZE 256 | |
402 | #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE) | |
403 | #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t)) | |
404 | ||
405 | /* mark space allocation */ | |
406 | ||
407 | typedef struct scm_mark_space_t | |
408 | { | |
409 | scm_c_bvec_limb_t *bvec_space; | |
410 | struct scm_mark_space_t *next; | |
411 | } scm_mark_space_t; | |
412 | ||
413 | static scm_mark_space_t *current_mark_space; | |
414 | static scm_mark_space_t **mark_space_ptr; | |
415 | static int current_mark_space_offset; | |
416 | static scm_mark_space_t *mark_space_head; | |
417 | ||
418 | static scm_c_bvec_limb_t * | |
419 | get_bvec () | |
db4b4ca6 | 420 | #define FUNC_NAME "get_bvec" |
d6884e63 ML |
421 | { |
422 | scm_c_bvec_limb_t *res; | |
423 | ||
424 | if (!current_mark_space) | |
425 | { | |
426 | SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t))); | |
427 | if (!current_mark_space) | |
db4b4ca6 | 428 | SCM_MISC_ERROR ("could not grow heap", SCM_EOL); |
d6884e63 ML |
429 | |
430 | current_mark_space->bvec_space = NULL; | |
431 | current_mark_space->next = NULL; | |
432 | ||
433 | *mark_space_ptr = current_mark_space; | |
434 | mark_space_ptr = &(current_mark_space->next); | |
435 | ||
436 | return get_bvec (); | |
437 | } | |
438 | ||
439 | if (!(current_mark_space->bvec_space)) | |
440 | { | |
441 | SCM_SYSCALL (current_mark_space->bvec_space = | |
442 | (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1)); | |
443 | if (!(current_mark_space->bvec_space)) | |
db4b4ca6 | 444 | SCM_MISC_ERROR ("could not grow heap", SCM_EOL); |
d6884e63 ML |
445 | |
446 | current_mark_space_offset = 0; | |
447 | ||
448 | return get_bvec (); | |
449 | } | |
450 | ||
451 | if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS) | |
452 | { | |
453 | current_mark_space = NULL; | |
454 | ||
455 | return get_bvec (); | |
456 | } | |
457 | ||
458 | res = current_mark_space->bvec_space + current_mark_space_offset; | |
459 | current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS; | |
460 | ||
461 | return res; | |
462 | } | |
db4b4ca6 DH |
463 | #undef FUNC_NAME |
464 | ||
d6884e63 ML |
465 | |
466 | static void | |
467 | clear_mark_space () | |
468 | { | |
469 | scm_mark_space_t *ms; | |
470 | ||
471 | for (ms = mark_space_head; ms; ms = ms->next) | |
472 | memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES); | |
473 | } | |
474 | ||
475 | ||
0f2d19dd | 476 | \f |
cf2d30f6 JB |
477 | /* Debugging functions. */ |
478 | ||
bb2c57fa | 479 | #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) |
cf2d30f6 JB |
480 | |
481 | /* Return the number of the heap segment containing CELL. */ | |
482 | static int | |
483 | which_seg (SCM cell) | |
484 | { | |
485 | int i; | |
486 | ||
487 | for (i = 0; i < scm_n_heap_segs; i++) | |
195e6201 DH |
488 | if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell)) |
489 | && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell))) | |
cf2d30f6 JB |
490 | return i; |
491 | fprintf (stderr, "which_seg: can't find segment containing cell %lx\n", | |
945fec60 | 492 | SCM_UNPACK (cell)); |
cf2d30f6 JB |
493 | abort (); |
494 | } | |
495 | ||
496 | ||
8ded62a3 MD |
497 | static void |
498 | map_free_list (scm_freelist_t *master, SCM freelist) | |
499 | { | |
500 | int last_seg = -1, count = 0; | |
501 | SCM f; | |
a00c95d9 | 502 | |
3f5d82cd | 503 | for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f)) |
8ded62a3 MD |
504 | { |
505 | int this_seg = which_seg (f); | |
506 | ||
507 | if (this_seg != last_seg) | |
508 | { | |
509 | if (last_seg != -1) | |
510 | fprintf (stderr, " %5d %d-cells in segment %d\n", | |
511 | count, master->span, last_seg); | |
512 | last_seg = this_seg; | |
513 | count = 0; | |
514 | } | |
515 | count++; | |
516 | } | |
517 | if (last_seg != -1) | |
518 | fprintf (stderr, " %5d %d-cells in segment %d\n", | |
519 | count, master->span, last_seg); | |
520 | } | |
cf2d30f6 | 521 | |
a00c95d9 | 522 | SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0, |
acb0a19c | 523 | (), |
5352393c MG |
524 | "Print debugging information about the free-list.\n" |
525 | "@code{map-free-list} is only included in\n" | |
526 | "@code{--enable-guile-debug} builds of Guile.") | |
acb0a19c MD |
527 | #define FUNC_NAME s_scm_map_free_list |
528 | { | |
4c48ba06 MD |
529 | int i; |
530 | fprintf (stderr, "%d segments total (%d:%d", | |
531 | scm_n_heap_segs, | |
532 | scm_heap_table[0].span, | |
533 | scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]); | |
534 | for (i = 1; i < scm_n_heap_segs; i++) | |
535 | fprintf (stderr, ", %d:%d", | |
536 | scm_heap_table[i].span, | |
537 | scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]); | |
538 | fprintf (stderr, ")\n"); | |
8ded62a3 MD |
539 | map_free_list (&scm_master_freelist, scm_freelist); |
540 | map_free_list (&scm_master_freelist2, scm_freelist2); | |
cf2d30f6 JB |
541 | fflush (stderr); |
542 | ||
543 | return SCM_UNSPECIFIED; | |
544 | } | |
1bbd0b84 | 545 | #undef FUNC_NAME |
cf2d30f6 | 546 | |
4c48ba06 MD |
547 | static int last_cluster; |
548 | static int last_size; | |
549 | ||
5384bc5b MD |
550 | static int |
551 | free_list_length (char *title, int i, SCM freelist) | |
552 | { | |
553 | SCM ls; | |
554 | int n = 0; | |
3f5d82cd DH |
555 | for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls)) |
556 | if (SCM_FREE_CELL_P (ls)) | |
5384bc5b MD |
557 | ++n; |
558 | else | |
559 | { | |
560 | fprintf (stderr, "bad cell in %s at position %d\n", title, n); | |
561 | abort (); | |
562 | } | |
4c48ba06 MD |
563 | if (n != last_size) |
564 | { | |
565 | if (i > 0) | |
566 | { | |
567 | if (last_cluster == i - 1) | |
568 | fprintf (stderr, "\t%d\n", last_size); | |
569 | else | |
570 | fprintf (stderr, "-%d\t%d\n", i - 1, last_size); | |
571 | } | |
572 | if (i >= 0) | |
573 | fprintf (stderr, "%s %d", title, i); | |
574 | else | |
575 | fprintf (stderr, "%s\t%d\n", title, n); | |
576 | last_cluster = i; | |
577 | last_size = n; | |
578 | } | |
5384bc5b MD |
579 | return n; |
580 | } | |
581 | ||
582 | static void | |
583 | free_list_lengths (char *title, scm_freelist_t *master, SCM freelist) | |
584 | { | |
585 | SCM clusters; | |
4c48ba06 | 586 | int i = 0, len, n = 0; |
5384bc5b MD |
587 | fprintf (stderr, "%s\n\n", title); |
588 | n += free_list_length ("free list", -1, freelist); | |
589 | for (clusters = master->clusters; | |
590 | SCM_NNULLP (clusters); | |
591 | clusters = SCM_CDR (clusters)) | |
4c48ba06 MD |
592 | { |
593 | len = free_list_length ("cluster", i++, SCM_CAR (clusters)); | |
594 | n += len; | |
595 | } | |
596 | if (last_cluster == i - 1) | |
597 | fprintf (stderr, "\t%d\n", last_size); | |
598 | else | |
599 | fprintf (stderr, "-%d\t%d\n", i - 1, last_size); | |
600 | fprintf (stderr, "\ntotal %d objects\n\n", n); | |
5384bc5b MD |
601 | } |
602 | ||
a00c95d9 | 603 | SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0, |
5384bc5b | 604 | (), |
5352393c MG |
605 | "Print debugging information about the free-list.\n" |
606 | "@code{free-list-length} is only included in\n" | |
607 | "@code{--enable-guile-debug} builds of Guile.") | |
5384bc5b MD |
608 | #define FUNC_NAME s_scm_free_list_length |
609 | { | |
b37fe1c5 MD |
610 | free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist); |
611 | free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2); | |
12e5fb3b | 612 | return SCM_UNSPECIFIED; |
5384bc5b MD |
613 | } |
614 | #undef FUNC_NAME | |
615 | ||
bb2c57fa MD |
616 | #endif |
617 | ||
618 | #ifdef GUILE_DEBUG_FREELIST | |
cf2d30f6 | 619 | |
d3dd80ab MG |
620 | /* Non-zero if freelist debugging is in effect. Set this via |
621 | `gc-set-debug-check-freelist!'. */ | |
622 | static int scm_debug_check_freelist = 0; | |
623 | ||
cf2d30f6 JB |
624 | /* Number of calls to SCM_NEWCELL since startup. */ |
625 | static unsigned long scm_newcell_count; | |
acb0a19c | 626 | static unsigned long scm_newcell2_count; |
cf2d30f6 JB |
627 | |
628 | /* Search freelist for anything that isn't marked as a free cell. | |
629 | Abort if we find something. */ | |
8ded62a3 MD |
630 | static void |
631 | scm_check_freelist (SCM freelist) | |
632 | { | |
633 | SCM f; | |
634 | int i = 0; | |
635 | ||
3f5d82cd DH |
636 | for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++) |
637 | if (!SCM_FREE_CELL_P (f)) | |
8ded62a3 MD |
638 | { |
639 | fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n", | |
640 | scm_newcell_count, i); | |
8ded62a3 MD |
641 | abort (); |
642 | } | |
643 | } | |
cf2d30f6 | 644 | |
a00c95d9 | 645 | SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0, |
1bbd0b84 | 646 | (SCM flag), |
1e6808ea MG |
647 | "If @var{flag} is @code{#t}, check the freelist for consistency\n" |
648 | "on each cell allocation. This procedure only exists when the\n" | |
649 | "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.") | |
1bbd0b84 | 650 | #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x |
25748c78 | 651 | { |
d6884e63 ML |
652 | /* [cmm] I did a double-take when I read this code the first time. |
653 | well, FWIW. */ | |
945fec60 | 654 | SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist); |
25748c78 GB |
655 | return SCM_UNSPECIFIED; |
656 | } | |
1bbd0b84 | 657 | #undef FUNC_NAME |
25748c78 GB |
658 | |
659 | ||
4a4c9785 MD |
660 | SCM |
661 | scm_debug_newcell (void) | |
662 | { | |
663 | SCM new; | |
664 | ||
665 | scm_newcell_count++; | |
666 | if (scm_debug_check_freelist) | |
667 | { | |
8ded62a3 | 668 | scm_check_freelist (scm_freelist); |
4a4c9785 MD |
669 | scm_gc(); |
670 | } | |
671 | ||
672 | /* The rest of this is supposed to be identical to the SCM_NEWCELL | |
673 | macro. */ | |
3f5d82cd | 674 | if (SCM_NULLP (scm_freelist)) |
7c33806a DH |
675 | { |
676 | new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist); | |
677 | SCM_GC_SET_ALLOCATED (new); | |
678 | } | |
4a4c9785 MD |
679 | else |
680 | { | |
681 | new = scm_freelist; | |
3f5d82cd | 682 | scm_freelist = SCM_FREE_CELL_CDR (scm_freelist); |
7c33806a | 683 | SCM_GC_SET_ALLOCATED (new); |
4a4c9785 MD |
684 | } |
685 | ||
686 | return new; | |
687 | } | |
688 | ||
689 | SCM | |
690 | scm_debug_newcell2 (void) | |
691 | { | |
692 | SCM new; | |
693 | ||
694 | scm_newcell2_count++; | |
695 | if (scm_debug_check_freelist) | |
696 | { | |
8ded62a3 | 697 | scm_check_freelist (scm_freelist2); |
4a4c9785 MD |
698 | scm_gc (); |
699 | } | |
700 | ||
701 | /* The rest of this is supposed to be identical to the SCM_NEWCELL | |
702 | macro. */ | |
3f5d82cd | 703 | if (SCM_NULLP (scm_freelist2)) |
7c33806a DH |
704 | { |
705 | new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2); | |
706 | SCM_GC_SET_ALLOCATED (new); | |
707 | } | |
4a4c9785 MD |
708 | else |
709 | { | |
710 | new = scm_freelist2; | |
3f5d82cd | 711 | scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2); |
7c33806a | 712 | SCM_GC_SET_ALLOCATED (new); |
4a4c9785 MD |
713 | } |
714 | ||
715 | return new; | |
716 | } | |
717 | ||
fca7547b | 718 | #endif /* GUILE_DEBUG_FREELIST */ |
cf2d30f6 JB |
719 | |
720 | \f | |
0f2d19dd | 721 | |
b37fe1c5 MD |
722 | static unsigned long |
723 | master_cells_allocated (scm_freelist_t *master) | |
724 | { | |
d6884e63 | 725 | /* the '- 1' below is to ignore the cluster spine cells. */ |
b37fe1c5 MD |
726 | int objects = master->clusters_allocated * (master->cluster_size - 1); |
727 | if (SCM_NULLP (master->clusters)) | |
728 | objects -= master->left_to_collect; | |
729 | return master->span * objects; | |
730 | } | |
731 | ||
732 | static unsigned long | |
733 | freelist_length (SCM freelist) | |
734 | { | |
735 | int n; | |
3f5d82cd | 736 | for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist)) |
b37fe1c5 MD |
737 | ++n; |
738 | return n; | |
739 | } | |
740 | ||
741 | static unsigned long | |
742 | compute_cells_allocated () | |
743 | { | |
744 | return (scm_cells_allocated | |
745 | + master_cells_allocated (&scm_master_freelist) | |
746 | + master_cells_allocated (&scm_master_freelist2) | |
747 | - scm_master_freelist.span * freelist_length (scm_freelist) | |
748 | - scm_master_freelist2.span * freelist_length (scm_freelist2)); | |
749 | } | |
b37fe1c5 | 750 | |
0f2d19dd JB |
751 | /* {Scheme Interface to GC} |
752 | */ | |
753 | ||
a00c95d9 | 754 | SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0, |
1bbd0b84 | 755 | (), |
1e6808ea MG |
756 | "Return an association list of statistics about Guile's current\n" |
757 | "use of storage.") | |
1bbd0b84 | 758 | #define FUNC_NAME s_scm_gc_stats |
0f2d19dd JB |
759 | { |
760 | int i; | |
761 | int n; | |
762 | SCM heap_segs; | |
c209c88e GB |
763 | long int local_scm_mtrigger; |
764 | long int local_scm_mallocated; | |
765 | long int local_scm_heap_size; | |
766 | long int local_scm_cells_allocated; | |
767 | long int local_scm_gc_time_taken; | |
c9b0d4b0 ML |
768 | long int local_scm_gc_times; |
769 | long int local_scm_gc_mark_time_taken; | |
770 | long int local_scm_gc_sweep_time_taken; | |
771 | double local_scm_gc_cells_swept; | |
772 | double local_scm_gc_cells_marked; | |
0f2d19dd JB |
773 | SCM answer; |
774 | ||
775 | SCM_DEFER_INTS; | |
939794ce DH |
776 | |
777 | ++scm_block_gc; | |
778 | ||
0f2d19dd JB |
779 | retry: |
780 | heap_segs = SCM_EOL; | |
781 | n = scm_n_heap_segs; | |
782 | for (i = scm_n_heap_segs; i--; ) | |
783 | heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]), | |
784 | scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])), | |
785 | heap_segs); | |
786 | if (scm_n_heap_segs != n) | |
787 | goto retry; | |
939794ce DH |
788 | |
789 | --scm_block_gc; | |
0f2d19dd | 790 | |
7febb4a2 MD |
791 | /* Below, we cons to produce the resulting list. We want a snapshot of |
792 | * the heap situation before consing. | |
793 | */ | |
0f2d19dd JB |
794 | local_scm_mtrigger = scm_mtrigger; |
795 | local_scm_mallocated = scm_mallocated; | |
b37fe1c5 | 796 | local_scm_heap_size = SCM_HEAP_SIZE; |
b37fe1c5 | 797 | local_scm_cells_allocated = compute_cells_allocated (); |
0f2d19dd | 798 | local_scm_gc_time_taken = scm_gc_time_taken; |
c9b0d4b0 ML |
799 | local_scm_gc_mark_time_taken = scm_gc_mark_time_taken; |
800 | local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken; | |
801 | local_scm_gc_times = scm_gc_times; | |
802 | local_scm_gc_cells_swept = scm_gc_cells_swept_acc; | |
803 | local_scm_gc_cells_marked = scm_gc_cells_marked_acc; | |
0f2d19dd JB |
804 | |
805 | answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)), | |
806 | scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)), | |
807 | scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)), | |
808 | scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)), | |
809 | scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)), | |
c9b0d4b0 ML |
810 | scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)), |
811 | scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)), | |
812 | scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)), | |
813 | scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)), | |
814 | scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)), | |
0f2d19dd JB |
815 | scm_cons (sym_heap_segments, heap_segs), |
816 | SCM_UNDEFINED); | |
817 | SCM_ALLOW_INTS; | |
818 | return answer; | |
819 | } | |
1bbd0b84 | 820 | #undef FUNC_NAME |
0f2d19dd JB |
821 | |
822 | ||
c9b0d4b0 ML |
823 | static void |
824 | gc_start_stats (const char *what) | |
0f2d19dd | 825 | { |
c9b0d4b0 ML |
826 | t_before_gc = scm_c_get_internal_run_time (); |
827 | scm_gc_cells_swept = 0; | |
b37fe1c5 | 828 | scm_gc_cells_collected = 0; |
37ddcaf6 | 829 | scm_gc_yield_1 = scm_gc_yield; |
8b0d194f MD |
830 | scm_gc_yield = (scm_cells_allocated |
831 | + master_cells_allocated (&scm_master_freelist) | |
832 | + master_cells_allocated (&scm_master_freelist2)); | |
0f2d19dd JB |
833 | scm_gc_malloc_collected = 0; |
834 | scm_gc_ports_collected = 0; | |
835 | } | |
836 | ||
939794ce | 837 | |
c9b0d4b0 ML |
838 | static void |
839 | gc_end_stats () | |
0f2d19dd | 840 | { |
c9b0d4b0 ML |
841 | unsigned long t = scm_c_get_internal_run_time (); |
842 | scm_gc_time_taken += (t - t_before_gc); | |
843 | scm_gc_sweep_time_taken += (t - t_before_sweep); | |
844 | ++scm_gc_times; | |
845 | ||
846 | scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected; | |
847 | scm_gc_cells_swept_acc += scm_gc_cells_swept; | |
0f2d19dd JB |
848 | } |
849 | ||
850 | ||
a00c95d9 | 851 | SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0, |
1bbd0b84 | 852 | (SCM obj), |
b380b885 MD |
853 | "Return an integer that for the lifetime of @var{obj} is uniquely\n" |
854 | "returned by this function for @var{obj}") | |
1bbd0b84 | 855 | #define FUNC_NAME s_scm_object_address |
0f2d19dd | 856 | { |
54778cd3 | 857 | return scm_ulong2num ((unsigned long) SCM_UNPACK (obj)); |
0f2d19dd | 858 | } |
1bbd0b84 | 859 | #undef FUNC_NAME |
0f2d19dd JB |
860 | |
861 | ||
a00c95d9 | 862 | SCM_DEFINE (scm_gc, "gc", 0, 0, 0, |
1bbd0b84 | 863 | (), |
b380b885 MD |
864 | "Scans all of SCM objects and reclaims for further use those that are\n" |
865 | "no longer accessible.") | |
1bbd0b84 | 866 | #define FUNC_NAME s_scm_gc |
0f2d19dd JB |
867 | { |
868 | SCM_DEFER_INTS; | |
869 | scm_igc ("call"); | |
870 | SCM_ALLOW_INTS; | |
871 | return SCM_UNSPECIFIED; | |
872 | } | |
1bbd0b84 | 873 | #undef FUNC_NAME |
0f2d19dd JB |
874 | |
875 | ||
876 | \f | |
877 | /* {C Interface For When GC is Triggered} | |
878 | */ | |
879 | ||
b37fe1c5 | 880 | static void |
8fef55a8 | 881 | adjust_min_yield (scm_freelist_t *freelist) |
b37fe1c5 | 882 | { |
8fef55a8 | 883 | /* min yield is adjusted upwards so that next predicted total yield |
bda1446c | 884 | * (allocated cells actually freed by GC) becomes |
8fef55a8 MD |
885 | * `min_yield_fraction' of total heap size. Note, however, that |
886 | * the absolute value of min_yield will correspond to `collected' | |
bda1446c | 887 | * on one master (the one which currently is triggering GC). |
b37fe1c5 | 888 | * |
bda1446c MD |
889 | * The reason why we look at total yield instead of cells collected |
890 | * on one list is that we want to take other freelists into account. | |
891 | * On this freelist, we know that (local) yield = collected cells, | |
892 | * but that's probably not the case on the other lists. | |
b37fe1c5 MD |
893 | * |
894 | * (We might consider computing a better prediction, for example | |
895 | * by computing an average over multiple GC:s.) | |
896 | */ | |
8fef55a8 | 897 | if (freelist->min_yield_fraction) |
b37fe1c5 | 898 | { |
37ddcaf6 | 899 | /* Pick largest of last two yields. */ |
8fef55a8 MD |
900 | int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100) |
901 | - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield)); | |
b37fe1c5 MD |
902 | #ifdef DEBUGINFO |
903 | fprintf (stderr, " after GC = %d, delta = %d\n", | |
904 | scm_cells_allocated, | |
905 | delta); | |
906 | #endif | |
907 | if (delta > 0) | |
8fef55a8 | 908 | freelist->min_yield += delta; |
b37fe1c5 MD |
909 | } |
910 | } | |
911 | ||
b6efc951 | 912 | |
4a4c9785 | 913 | /* When we get POSIX threads support, the master will be global and |
4c48ba06 MD |
914 | * common while the freelist will be individual for each thread. |
915 | */ | |
4a4c9785 MD |
916 | |
917 | SCM | |
918 | scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist) | |
919 | { | |
920 | SCM cell; | |
921 | ++scm_ints_disabled; | |
4c48ba06 MD |
922 | do |
923 | { | |
c7387918 | 924 | if (SCM_NULLP (master->clusters)) |
4c48ba06 | 925 | { |
150c200b | 926 | if (master->grow_heap_p || scm_block_gc) |
4c48ba06 | 927 | { |
b6efc951 DH |
928 | /* In order to reduce gc frequency, try to allocate a new heap |
929 | * segment first, even if gc might find some free cells. If we | |
930 | * can't obtain a new heap segment, we will try gc later. | |
931 | */ | |
4c48ba06 | 932 | master->grow_heap_p = 0; |
b6efc951 | 933 | alloc_some_heap (master, return_on_error); |
4c48ba06 | 934 | } |
b6efc951 | 935 | if (SCM_NULLP (master->clusters)) |
b37fe1c5 | 936 | { |
b6efc951 DH |
937 | /* The heap was not grown, either because it wasn't scheduled to |
938 | * grow, or because there was not enough memory available. In | |
939 | * both cases we have to try gc to get some free cells. | |
940 | */ | |
37ddcaf6 MD |
941 | #ifdef DEBUGINFO |
942 | fprintf (stderr, "allocated = %d, ", | |
943 | scm_cells_allocated | |
944 | + master_cells_allocated (&scm_master_freelist) | |
945 | + master_cells_allocated (&scm_master_freelist2)); | |
946 | #endif | |
b37fe1c5 | 947 | scm_igc ("cells"); |
8fef55a8 | 948 | adjust_min_yield (master); |
c7387918 DH |
949 | if (SCM_NULLP (master->clusters)) |
950 | { | |
b6efc951 DH |
951 | /* gc could not free any cells. Now, we _must_ allocate a |
952 | * new heap segment, because there is no other possibility | |
953 | * to provide a new cell for the caller. | |
954 | */ | |
955 | alloc_some_heap (master, abort_on_error); | |
c7387918 | 956 | } |
b37fe1c5 | 957 | } |
4c48ba06 MD |
958 | } |
959 | cell = SCM_CAR (master->clusters); | |
960 | master->clusters = SCM_CDR (master->clusters); | |
b37fe1c5 | 961 | ++master->clusters_allocated; |
4c48ba06 MD |
962 | } |
963 | while (SCM_NULLP (cell)); | |
d6884e63 ML |
964 | |
965 | #ifdef GUILE_DEBUG_FREELIST | |
966 | scm_check_freelist (cell); | |
967 | #endif | |
968 | ||
4a4c9785 | 969 | --scm_ints_disabled; |
3f5d82cd | 970 | *freelist = SCM_FREE_CELL_CDR (cell); |
4a4c9785 MD |
971 | return cell; |
972 | } | |
973 | ||
b6efc951 | 974 | |
4c48ba06 MD |
975 | #if 0 |
976 | /* This is a support routine which can be used to reserve a cluster | |
977 | * for some special use, such as debugging. It won't be useful until | |
978 | * free cells are preserved between garbage collections. | |
979 | */ | |
980 | ||
981 | void | |
982 | scm_alloc_cluster (scm_freelist_t *master) | |
983 | { | |
984 | SCM freelist, cell; | |
985 | cell = scm_gc_for_newcell (master, &freelist); | |
986 | SCM_SETCDR (cell, freelist); | |
987 | return cell; | |
988 | } | |
989 | #endif | |
990 | ||
801cb5e7 MD |
991 | |
992 | scm_c_hook_t scm_before_gc_c_hook; | |
993 | scm_c_hook_t scm_before_mark_c_hook; | |
994 | scm_c_hook_t scm_before_sweep_c_hook; | |
995 | scm_c_hook_t scm_after_sweep_c_hook; | |
996 | scm_c_hook_t scm_after_gc_c_hook; | |
997 | ||
b6efc951 | 998 | |
0f2d19dd | 999 | void |
1bbd0b84 | 1000 | scm_igc (const char *what) |
0f2d19dd JB |
1001 | { |
1002 | int j; | |
1003 | ||
406c7d90 | 1004 | ++scm_gc_running_p; |
801cb5e7 | 1005 | scm_c_hook_run (&scm_before_gc_c_hook, 0); |
4c48ba06 MD |
1006 | #ifdef DEBUGINFO |
1007 | fprintf (stderr, | |
1008 | SCM_NULLP (scm_freelist) | |
1009 | ? "*" | |
1010 | : (SCM_NULLP (scm_freelist2) ? "o" : "m")); | |
1011 | #endif | |
42db06f0 | 1012 | /* During the critical section, only the current thread may run. */ |
216eedfc | 1013 | SCM_CRITICAL_SECTION_START; |
42db06f0 | 1014 | |
e242dfd2 | 1015 | /* fprintf (stderr, "gc: %s\n", what); */ |
c68296f8 | 1016 | |
ab4bef85 JB |
1017 | if (!scm_stack_base || scm_block_gc) |
1018 | { | |
406c7d90 | 1019 | --scm_gc_running_p; |
ab4bef85 JB |
1020 | return; |
1021 | } | |
1022 | ||
c9b0d4b0 ML |
1023 | gc_start_stats (what); |
1024 | ||
a5c314c8 JB |
1025 | if (scm_mallocated < 0) |
1026 | /* The byte count of allocated objects has underflowed. This is | |
1027 | probably because you forgot to report the sizes of objects you | |
1028 | have allocated, by calling scm_done_malloc or some such. When | |
1029 | the GC freed them, it subtracted their size from | |
1030 | scm_mallocated, which underflowed. */ | |
1031 | abort (); | |
c45acc34 | 1032 | |
ab4bef85 JB |
1033 | if (scm_gc_heap_lock) |
1034 | /* We've invoked the collector while a GC is already in progress. | |
1035 | That should never happen. */ | |
1036 | abort (); | |
0f2d19dd JB |
1037 | |
1038 | ++scm_gc_heap_lock; | |
ab4bef85 | 1039 | |
0f2d19dd JB |
1040 | /* flush dead entries from the continuation stack */ |
1041 | { | |
1042 | int x; | |
1043 | int bound; | |
1044 | SCM * elts; | |
1045 | elts = SCM_VELTS (scm_continuation_stack); | |
b5c2579a | 1046 | bound = SCM_VECTOR_LENGTH (scm_continuation_stack); |
0f2d19dd JB |
1047 | x = SCM_INUM (scm_continuation_stack_ptr); |
1048 | while (x < bound) | |
1049 | { | |
1050 | elts[x] = SCM_BOOL_F; | |
1051 | ++x; | |
1052 | } | |
1053 | } | |
1054 | ||
801cb5e7 MD |
1055 | scm_c_hook_run (&scm_before_mark_c_hook, 0); |
1056 | ||
d6884e63 ML |
1057 | clear_mark_space (); |
1058 | ||
42db06f0 | 1059 | #ifndef USE_THREADS |
a00c95d9 | 1060 | |
1b9be268 | 1061 | /* Mark objects on the C stack. */ |
0f2d19dd JB |
1062 | SCM_FLUSH_REGISTER_WINDOWS; |
1063 | /* This assumes that all registers are saved into the jmp_buf */ | |
1064 | setjmp (scm_save_regs_gc_mark); | |
1065 | scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark, | |
ce4a361d JB |
1066 | ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 + |
1067 | sizeof scm_save_regs_gc_mark) | |
1068 | / sizeof (SCM_STACKITEM))); | |
0f2d19dd JB |
1069 | |
1070 | { | |
6ba93e5e | 1071 | scm_sizet stack_len = scm_stack_size (scm_stack_base); |
0f2d19dd | 1072 | #ifdef SCM_STACK_GROWS_UP |
6ba93e5e | 1073 | scm_mark_locations (scm_stack_base, stack_len); |
0f2d19dd | 1074 | #else |
6ba93e5e | 1075 | scm_mark_locations (scm_stack_base - stack_len, stack_len); |
0f2d19dd JB |
1076 | #endif |
1077 | } | |
1078 | ||
42db06f0 MD |
1079 | #else /* USE_THREADS */ |
1080 | ||
1081 | /* Mark every thread's stack and registers */ | |
945fec60 | 1082 | scm_threads_mark_stacks (); |
42db06f0 MD |
1083 | |
1084 | #endif /* USE_THREADS */ | |
0f2d19dd | 1085 | |
0f2d19dd JB |
1086 | j = SCM_NUM_PROTECTS; |
1087 | while (j--) | |
1088 | scm_gc_mark (scm_sys_protects[j]); | |
1089 | ||
9de33deb MD |
1090 | /* FIXME: we should have a means to register C functions to be run |
1091 | * in different phases of GC | |
a00c95d9 | 1092 | */ |
9de33deb | 1093 | scm_mark_subr_table (); |
a00c95d9 | 1094 | |
42db06f0 MD |
1095 | #ifndef USE_THREADS |
1096 | scm_gc_mark (scm_root->handle); | |
1097 | #endif | |
a00c95d9 | 1098 | |
c9b0d4b0 ML |
1099 | t_before_sweep = scm_c_get_internal_run_time (); |
1100 | scm_gc_mark_time_taken += (t_before_sweep - t_before_gc); | |
1101 | ||
801cb5e7 | 1102 | scm_c_hook_run (&scm_before_sweep_c_hook, 0); |
0493cd89 | 1103 | |
0f2d19dd JB |
1104 | scm_gc_sweep (); |
1105 | ||
801cb5e7 MD |
1106 | scm_c_hook_run (&scm_after_sweep_c_hook, 0); |
1107 | ||
0f2d19dd | 1108 | --scm_gc_heap_lock; |
c9b0d4b0 | 1109 | gc_end_stats (); |
42db06f0 | 1110 | |
216eedfc | 1111 | SCM_CRITICAL_SECTION_END; |
801cb5e7 | 1112 | scm_c_hook_run (&scm_after_gc_c_hook, 0); |
406c7d90 | 1113 | --scm_gc_running_p; |
0f2d19dd JB |
1114 | } |
1115 | ||
1116 | \f | |
939794ce | 1117 | |
a00c95d9 | 1118 | /* {Mark/Sweep} |
0f2d19dd JB |
1119 | */ |
1120 | ||
56495472 ML |
1121 | #define MARK scm_gc_mark |
1122 | #define FNAME "scm_gc_mark" | |
0f2d19dd | 1123 | |
56495472 | 1124 | #endif /*!MARK_DEPENDENCIES*/ |
0f2d19dd JB |
1125 | |
1126 | /* Mark an object precisely. | |
1127 | */ | |
a00c95d9 | 1128 | void |
56495472 ML |
1129 | MARK (SCM p) |
1130 | #define FUNC_NAME FNAME | |
0f2d19dd JB |
1131 | { |
1132 | register long i; | |
1133 | register SCM ptr; | |
61045190 | 1134 | scm_bits_t cell_type; |
0f2d19dd | 1135 | |
56495472 ML |
1136 | #ifndef MARK_DEPENDENCIES |
1137 | # define RECURSE scm_gc_mark | |
1138 | #else | |
1139 | /* go through the usual marking, but not for self-cycles. */ | |
1140 | # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0) | |
1141 | #endif | |
0f2d19dd JB |
1142 | ptr = p; |
1143 | ||
56495472 ML |
1144 | #ifdef MARK_DEPENDENCIES |
1145 | goto gc_mark_loop_first_time; | |
1146 | #endif | |
1147 | ||
86d31dfe MV |
1148 | /* A simple hack for debugging. Chose the second branch to get a |
1149 | meaningful backtrace for crashes inside the GC. | |
1150 | */ | |
1151 | #if 1 | |
1152 | #define goto_gc_mark_loop goto gc_mark_loop | |
1153 | #define goto_gc_mark_nimp goto gc_mark_nimp | |
1154 | #else | |
1155 | #define goto_gc_mark_loop RECURSE(ptr); return | |
1156 | #define goto_gc_mark_nimp RECURSE(ptr); return | |
1157 | #endif | |
1158 | ||
0f2d19dd JB |
1159 | gc_mark_loop: |
1160 | if (SCM_IMP (ptr)) | |
1161 | return; | |
1162 | ||
1163 | gc_mark_nimp: | |
56495472 ML |
1164 | |
1165 | #ifdef MARK_DEPENDENCIES | |
0209177b | 1166 | if (SCM_EQ_P (ptr, p)) |
56495472 ML |
1167 | return; |
1168 | ||
1169 | scm_gc_mark (ptr); | |
0209177b | 1170 | return; |
56495472 ML |
1171 | |
1172 | gc_mark_loop_first_time: | |
1173 | #endif | |
9a6976cd | 1174 | |
61045190 | 1175 | #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST)) |
9a6976cd | 1176 | /* We are in debug mode. Check the ptr exhaustively. */ |
61045190 | 1177 | if (!scm_cellp (ptr)) |
db4b4ca6 | 1178 | SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL); |
9a6976cd DH |
1179 | #else |
1180 | /* In non-debug mode, do at least some cheap testing. */ | |
1181 | if (!SCM_CELLP (ptr)) | |
1182 | SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL); | |
d6884e63 ML |
1183 | #endif |
1184 | ||
56495472 ML |
1185 | #ifndef MARK_DEPENDENCIES |
1186 | ||
d6884e63 ML |
1187 | if (SCM_GCMARKP (ptr)) |
1188 | return; | |
56495472 | 1189 | |
d6884e63 ML |
1190 | SCM_SETGCMARK (ptr); |
1191 | ||
56495472 ML |
1192 | #endif |
1193 | ||
61045190 DH |
1194 | cell_type = SCM_GC_CELL_TYPE (ptr); |
1195 | switch (SCM_ITAG7 (cell_type)) | |
0f2d19dd JB |
1196 | { |
1197 | case scm_tcs_cons_nimcar: | |
d6884e63 | 1198 | if (SCM_IMP (SCM_CDR (ptr))) |
0f2d19dd JB |
1199 | { |
1200 | ptr = SCM_CAR (ptr); | |
86d31dfe | 1201 | goto_gc_mark_nimp; |
0f2d19dd | 1202 | } |
56495472 | 1203 | RECURSE (SCM_CAR (ptr)); |
d6884e63 | 1204 | ptr = SCM_CDR (ptr); |
86d31dfe | 1205 | goto_gc_mark_nimp; |
0f2d19dd | 1206 | case scm_tcs_cons_imcar: |
d6884e63 | 1207 | ptr = SCM_CDR (ptr); |
86d31dfe | 1208 | goto_gc_mark_loop; |
e641afaf | 1209 | case scm_tc7_pws: |
22a52da1 DH |
1210 | RECURSE (SCM_SETTER (ptr)); |
1211 | ptr = SCM_PROCEDURE (ptr); | |
86d31dfe | 1212 | goto_gc_mark_loop; |
0f2d19dd | 1213 | case scm_tcs_cons_gloc: |
0f2d19dd | 1214 | { |
86d31dfe MV |
1215 | /* Dirk:FIXME:: The following code is super ugly: ptr may be a |
1216 | * struct or a gloc. If it is a gloc, the cell word #0 of ptr | |
1217 | * is the address of a scm_tc16_variable smob. If it is a | |
1218 | * struct, the cell word #0 of ptr is a pointer to a struct | |
1219 | * vtable data region. (The fact that these are accessed in | |
1220 | * the same way restricts the possibilites to change the data | |
1221 | * layout of structs or heap cells.) To discriminate between | |
1222 | * the two, it is guaranteed that the scm_vtable_index_vcell | |
1223 | * element of the prospective vtable is always zero. For a | |
1224 | * gloc, this location has the CDR of the variable smob, which | |
1225 | * is guaranteed to be non-zero. | |
c8045e8d DH |
1226 | */ |
1227 | scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc; | |
1228 | scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */ | |
7445e0e8 | 1229 | if (vtable_data [scm_vtable_index_vcell] != 0) |
0f2d19dd | 1230 | { |
d6884e63 ML |
1231 | /* ptr is a gloc */ |
1232 | SCM gloc_car = SCM_PACK (word0); | |
56495472 | 1233 | RECURSE (gloc_car); |
d6884e63 ML |
1234 | ptr = SCM_CDR (ptr); |
1235 | goto gc_mark_loop; | |
1236 | } | |
1237 | else | |
1238 | { | |
1239 | /* ptr is a struct */ | |
1240 | SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]); | |
b5c2579a | 1241 | int len = SCM_SYMBOL_LENGTH (layout); |
06ee04b2 | 1242 | char * fields_desc = SCM_SYMBOL_CHARS (layout); |
d6884e63 | 1243 | scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr); |
7bb8eac7 | 1244 | |
d6884e63 ML |
1245 | if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY) |
1246 | { | |
56495472 ML |
1247 | RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure])); |
1248 | RECURSE (SCM_PACK (struct_data[scm_struct_i_setter])); | |
d6884e63 ML |
1249 | } |
1250 | if (len) | |
1251 | { | |
1252 | int x; | |
7bb8eac7 | 1253 | |
d6884e63 ML |
1254 | for (x = 0; x < len - 2; x += 2, ++struct_data) |
1255 | if (fields_desc[x] == 'p') | |
56495472 | 1256 | RECURSE (SCM_PACK (*struct_data)); |
d6884e63 ML |
1257 | if (fields_desc[x] == 'p') |
1258 | { | |
1259 | if (SCM_LAYOUT_TAILP (fields_desc[x + 1])) | |
56495472 ML |
1260 | for (x = *struct_data++; x; --x, ++struct_data) |
1261 | RECURSE (SCM_PACK (*struct_data)); | |
d6884e63 | 1262 | else |
56495472 | 1263 | RECURSE (SCM_PACK (*struct_data)); |
d6884e63 ML |
1264 | } |
1265 | } | |
1266 | /* mark vtable */ | |
1267 | ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]); | |
86d31dfe | 1268 | goto_gc_mark_loop; |
0f2d19dd JB |
1269 | } |
1270 | } | |
1271 | break; | |
1272 | case scm_tcs_closures: | |
22a52da1 | 1273 | if (SCM_IMP (SCM_ENV (ptr))) |
0f2d19dd JB |
1274 | { |
1275 | ptr = SCM_CLOSCAR (ptr); | |
86d31dfe | 1276 | goto_gc_mark_nimp; |
0f2d19dd | 1277 | } |
56495472 | 1278 | RECURSE (SCM_CLOSCAR (ptr)); |
22a52da1 | 1279 | ptr = SCM_ENV (ptr); |
86d31dfe | 1280 | goto_gc_mark_nimp; |
0f2d19dd | 1281 | case scm_tc7_vector: |
b5c2579a DH |
1282 | i = SCM_VECTOR_LENGTH (ptr); |
1283 | if (i == 0) | |
1284 | break; | |
1285 | while (--i > 0) | |
1286 | if (SCM_NIMP (SCM_VELTS (ptr)[i])) | |
56495472 | 1287 | RECURSE (SCM_VELTS (ptr)[i]); |
b5c2579a | 1288 | ptr = SCM_VELTS (ptr)[0]; |
86d31dfe | 1289 | goto_gc_mark_loop; |
0f2d19dd JB |
1290 | #ifdef CCLO |
1291 | case scm_tc7_cclo: | |
362306b9 DH |
1292 | { |
1293 | unsigned long int i = SCM_CCLO_LENGTH (ptr); | |
1294 | unsigned long int j; | |
1295 | for (j = 1; j != i; ++j) | |
1296 | { | |
1297 | SCM obj = SCM_CCLO_REF (ptr, j); | |
1298 | if (!SCM_IMP (obj)) | |
56495472 | 1299 | RECURSE (obj); |
362306b9 DH |
1300 | } |
1301 | ptr = SCM_CCLO_REF (ptr, 0); | |
86d31dfe | 1302 | goto_gc_mark_loop; |
362306b9 | 1303 | } |
b5c2579a | 1304 | #endif |
afe5177e | 1305 | #ifdef HAVE_ARRAYS |
0f2d19dd JB |
1306 | case scm_tc7_bvect: |
1307 | case scm_tc7_byvect: | |
1308 | case scm_tc7_ivect: | |
1309 | case scm_tc7_uvect: | |
1310 | case scm_tc7_fvect: | |
1311 | case scm_tc7_dvect: | |
1312 | case scm_tc7_cvect: | |
1313 | case scm_tc7_svect: | |
5c11cc9d | 1314 | #ifdef HAVE_LONG_LONGS |
0f2d19dd JB |
1315 | case scm_tc7_llvect: |
1316 | #endif | |
afe5177e | 1317 | #endif |
0f2d19dd | 1318 | case scm_tc7_string: |
0f2d19dd JB |
1319 | break; |
1320 | ||
1321 | case scm_tc7_substring: | |
0f2d19dd | 1322 | ptr = SCM_CDR (ptr); |
86d31dfe | 1323 | goto_gc_mark_loop; |
0f2d19dd JB |
1324 | |
1325 | case scm_tc7_wvect: | |
ab4bef85 JB |
1326 | SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors; |
1327 | scm_weak_vectors = ptr; | |
0f2d19dd JB |
1328 | if (SCM_IS_WHVEC_ANY (ptr)) |
1329 | { | |
1330 | int x; | |
1331 | int len; | |
1332 | int weak_keys; | |
1333 | int weak_values; | |
1334 | ||
b5c2579a | 1335 | len = SCM_VECTOR_LENGTH (ptr); |
0f2d19dd JB |
1336 | weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr); |
1337 | weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr); | |
a00c95d9 | 1338 | |
0f2d19dd JB |
1339 | for (x = 0; x < len; ++x) |
1340 | { | |
1341 | SCM alist; | |
1342 | alist = SCM_VELTS (ptr)[x]; | |
46408039 JB |
1343 | |
1344 | /* mark everything on the alist except the keys or | |
1345 | * values, according to weak_values and weak_keys. */ | |
0b5f3f34 | 1346 | while ( SCM_CONSP (alist) |
0f2d19dd | 1347 | && !SCM_GCMARKP (alist) |
0f2d19dd JB |
1348 | && SCM_CONSP (SCM_CAR (alist))) |
1349 | { | |
1350 | SCM kvpair; | |
1351 | SCM next_alist; | |
1352 | ||
1353 | kvpair = SCM_CAR (alist); | |
1354 | next_alist = SCM_CDR (alist); | |
a00c95d9 | 1355 | /* |
0f2d19dd JB |
1356 | * Do not do this: |
1357 | * SCM_SETGCMARK (alist); | |
1358 | * SCM_SETGCMARK (kvpair); | |
1359 | * | |
1360 | * It may be that either the key or value is protected by | |
1361 | * an escaped reference to part of the spine of this alist. | |
1362 | * If we mark the spine here, and only mark one or neither of the | |
1363 | * key and value, they may never be properly marked. | |
1364 | * This leads to a horrible situation in which an alist containing | |
1365 | * freelist cells is exported. | |
1366 | * | |
1367 | * So only mark the spines of these arrays last of all marking. | |
1368 | * If somebody confuses us by constructing a weak vector | |
1369 | * with a circular alist then we are hosed, but at least we | |
1370 | * won't prematurely drop table entries. | |
1371 | */ | |
1372 | if (!weak_keys) | |
56495472 | 1373 | RECURSE (SCM_CAR (kvpair)); |
0f2d19dd | 1374 | if (!weak_values) |
56495472 | 1375 | RECURSE (SCM_CDR (kvpair)); |
0f2d19dd JB |
1376 | alist = next_alist; |
1377 | } | |
1378 | if (SCM_NIMP (alist)) | |
56495472 | 1379 | RECURSE (alist); |
0f2d19dd JB |
1380 | } |
1381 | } | |
1382 | break; | |
1383 | ||
28b06554 DH |
1384 | case scm_tc7_symbol: |
1385 | ptr = SCM_PROP_SLOTS (ptr); | |
86d31dfe | 1386 | goto_gc_mark_loop; |
0f2d19dd | 1387 | case scm_tcs_subrs: |
9de33deb | 1388 | break; |
0f2d19dd JB |
1389 | case scm_tc7_port: |
1390 | i = SCM_PTOBNUM (ptr); | |
7a7f7c53 | 1391 | #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST)) |
0f2d19dd | 1392 | if (!(i < scm_numptob)) |
7a7f7c53 DH |
1393 | SCM_MISC_ERROR ("undefined port type", SCM_EOL); |
1394 | #endif | |
ebf7394e | 1395 | if (SCM_PTAB_ENTRY(ptr)) |
56495472 | 1396 | RECURSE (SCM_FILENAME (ptr)); |
dc53f026 JB |
1397 | if (scm_ptobs[i].mark) |
1398 | { | |
1399 | ptr = (scm_ptobs[i].mark) (ptr); | |
86d31dfe | 1400 | goto_gc_mark_loop; |
dc53f026 JB |
1401 | } |
1402 | else | |
1403 | return; | |
0f2d19dd JB |
1404 | break; |
1405 | case scm_tc7_smob: | |
d6884e63 | 1406 | switch (SCM_TYP16 (ptr)) |
0f2d19dd JB |
1407 | { /* should be faster than going through scm_smobs */ |
1408 | case scm_tc_free_cell: | |
1409 | /* printf("found free_cell %X ", ptr); fflush(stdout); */ | |
acb0a19c MD |
1410 | case scm_tc16_big: |
1411 | case scm_tc16_real: | |
1412 | case scm_tc16_complex: | |
0f2d19dd JB |
1413 | break; |
1414 | default: | |
1415 | i = SCM_SMOBNUM (ptr); | |
7a7f7c53 | 1416 | #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST)) |
0f2d19dd | 1417 | if (!(i < scm_numsmob)) |
7a7f7c53 DH |
1418 | SCM_MISC_ERROR ("undefined smob type", SCM_EOL); |
1419 | #endif | |
dc53f026 JB |
1420 | if (scm_smobs[i].mark) |
1421 | { | |
1422 | ptr = (scm_smobs[i].mark) (ptr); | |
86d31dfe | 1423 | goto_gc_mark_loop; |
dc53f026 JB |
1424 | } |
1425 | else | |
1426 | return; | |
0f2d19dd JB |
1427 | } |
1428 | break; | |
1429 | default: | |
acf4331f | 1430 | SCM_MISC_ERROR ("unknown type", SCM_EOL); |
0f2d19dd | 1431 | } |
0209177b | 1432 | #undef RECURSE |
0f2d19dd | 1433 | } |
acf4331f | 1434 | #undef FUNC_NAME |
0f2d19dd | 1435 | |
56495472 ML |
1436 | #ifndef MARK_DEPENDENCIES |
1437 | ||
1438 | #undef MARK | |
56495472 ML |
1439 | #undef FNAME |
1440 | ||
1441 | /* And here we define `scm_gc_mark_dependencies', by including this | |
1442 | * same file in itself. | |
1443 | */ | |
1444 | #define MARK scm_gc_mark_dependencies | |
1445 | #define FNAME "scm_gc_mark_dependencies" | |
1446 | #define MARK_DEPENDENCIES | |
1447 | #include "gc.c" | |
1448 | #undef MARK_DEPENDENCIES | |
1449 | #undef MARK | |
56495472 ML |
1450 | #undef FNAME |
1451 | ||
0f2d19dd JB |
1452 | |
1453 | /* Mark a Region Conservatively | |
1454 | */ | |
1455 | ||
a00c95d9 | 1456 | void |
6e8d25a6 | 1457 | scm_mark_locations (SCM_STACKITEM x[], scm_sizet n) |
0f2d19dd | 1458 | { |
c4da09e2 | 1459 | unsigned long m; |
0f2d19dd | 1460 | |
c4da09e2 DH |
1461 | for (m = 0; m < n; ++m) |
1462 | { | |
1463 | SCM obj = * (SCM *) &x[m]; | |
1464 | if (SCM_CELLP (obj)) | |
1465 | { | |
1466 | SCM_CELLPTR ptr = SCM2PTR (obj); | |
1467 | int i = 0; | |
1468 | int j = scm_n_heap_segs - 1; | |
1469 | if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr) | |
1470 | && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr)) | |
1471 | { | |
1472 | while (i <= j) | |
1473 | { | |
1474 | int seg_id; | |
1475 | seg_id = -1; | |
1476 | if ((i == j) | |
1477 | || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)) | |
1478 | seg_id = i; | |
1479 | else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr)) | |
1480 | seg_id = j; | |
1481 | else | |
1482 | { | |
1483 | int k; | |
1484 | k = (i + j) / 2; | |
1485 | if (k == i) | |
1486 | break; | |
1487 | if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) | |
1488 | { | |
1489 | j = k; | |
1490 | ++i; | |
1491 | if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)) | |
1492 | continue; | |
1493 | else | |
1494 | break; | |
1495 | } | |
1496 | else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) | |
1497 | { | |
1498 | i = k; | |
1499 | --j; | |
1500 | if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr)) | |
1501 | continue; | |
1502 | else | |
1503 | break; | |
1504 | } | |
1505 | } | |
7bb8eac7 | 1506 | |
d6884e63 ML |
1507 | if (SCM_GC_IN_CARD_HEADERP (ptr)) |
1508 | break; | |
7bb8eac7 | 1509 | |
c4da09e2 | 1510 | if (scm_heap_table[seg_id].span == 1 |
ecf470a2 | 1511 | || DOUBLECELL_ALIGNED_P (obj)) |
3731149d ML |
1512 | scm_gc_mark (obj); |
1513 | ||
c4da09e2 DH |
1514 | break; |
1515 | } | |
1516 | } | |
1517 | } | |
1518 | } | |
0f2d19dd JB |
1519 | } |
1520 | ||
1521 | ||
1a548472 DH |
1522 | /* The function scm_cellp determines whether an SCM value can be regarded as a |
1523 | * pointer to a cell on the heap. Binary search is used in order to determine | |
1524 | * the heap segment that contains the cell. | |
1525 | */ | |
2e11a577 | 1526 | int |
6e8d25a6 | 1527 | scm_cellp (SCM value) |
2e11a577 | 1528 | { |
1a548472 DH |
1529 | if (SCM_CELLP (value)) { |
1530 | scm_cell * ptr = SCM2PTR (value); | |
1531 | unsigned int i = 0; | |
1532 | unsigned int j = scm_n_heap_segs - 1; | |
1533 | ||
61045190 DH |
1534 | if (SCM_GC_IN_CARD_HEADERP (ptr)) |
1535 | return 0; | |
1536 | ||
1a548472 DH |
1537 | while (i < j) { |
1538 | int k = (i + j) / 2; | |
1539 | if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) { | |
1540 | j = k; | |
1541 | } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) { | |
1542 | i = k + 1; | |
1543 | } | |
1544 | } | |
2e11a577 | 1545 | |
9d47a1e6 | 1546 | if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr) |
1a548472 | 1547 | && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr) |
ecf470a2 | 1548 | && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value)) |
d6884e63 ML |
1549 | && !SCM_GC_IN_CARD_HEADERP (ptr) |
1550 | ) | |
1a548472 | 1551 | return 1; |
d6884e63 | 1552 | else |
1a548472 | 1553 | return 0; |
d6884e63 | 1554 | } else |
1a548472 | 1555 | return 0; |
2e11a577 MD |
1556 | } |
1557 | ||
1558 | ||
4c48ba06 MD |
1559 | static void |
1560 | gc_sweep_freelist_start (scm_freelist_t *freelist) | |
1561 | { | |
1562 | freelist->cells = SCM_EOL; | |
1563 | freelist->left_to_collect = freelist->cluster_size; | |
b37fe1c5 | 1564 | freelist->clusters_allocated = 0; |
4c48ba06 MD |
1565 | freelist->clusters = SCM_EOL; |
1566 | freelist->clustertail = &freelist->clusters; | |
1811ebce | 1567 | freelist->collected_1 = freelist->collected; |
4c48ba06 MD |
1568 | freelist->collected = 0; |
1569 | } | |
1570 | ||
1571 | static void | |
1572 | gc_sweep_freelist_finish (scm_freelist_t *freelist) | |
1573 | { | |
1811ebce | 1574 | int collected; |
4c48ba06 | 1575 | *freelist->clustertail = freelist->cells; |
3f5d82cd | 1576 | if (!SCM_NULLP (freelist->cells)) |
4c48ba06 MD |
1577 | { |
1578 | SCM c = freelist->cells; | |
22a52da1 DH |
1579 | SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c)); |
1580 | SCM_SET_CELL_WORD_1 (c, SCM_EOL); | |
4c48ba06 MD |
1581 | freelist->collected += |
1582 | freelist->span * (freelist->cluster_size - freelist->left_to_collect); | |
1583 | } | |
b37fe1c5 | 1584 | scm_gc_cells_collected += freelist->collected; |
a00c95d9 | 1585 | |
8fef55a8 | 1586 | /* Although freelist->min_yield is used to test freelist->collected |
7dbff8b1 | 1587 | * (which is the local GC yield for freelist), it is adjusted so |
8fef55a8 | 1588 | * that *total* yield is freelist->min_yield_fraction of total heap |
7dbff8b1 MD |
1589 | * size. This means that a too low yield is compensated by more |
1590 | * heap on the list which is currently doing most work, which is | |
1591 | * just what we want. | |
1592 | */ | |
1811ebce | 1593 | collected = SCM_MAX (freelist->collected_1, freelist->collected); |
8fef55a8 | 1594 | freelist->grow_heap_p = (collected < freelist->min_yield); |
4c48ba06 | 1595 | } |
0f2d19dd | 1596 | |
d6884e63 ML |
1597 | #define NEXT_DATA_CELL(ptr, span) \ |
1598 | do { \ | |
1599 | scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \ | |
1600 | (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \ | |
1601 | CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \ | |
1602 | : nxt__); \ | |
1603 | } while (0) | |
1604 | ||
a00c95d9 | 1605 | void |
0f2d19dd | 1606 | scm_gc_sweep () |
acf4331f | 1607 | #define FUNC_NAME "scm_gc_sweep" |
0f2d19dd JB |
1608 | { |
1609 | register SCM_CELLPTR ptr; | |
0f2d19dd | 1610 | register SCM nfreelist; |
4c48ba06 | 1611 | register scm_freelist_t *freelist; |
0f2d19dd | 1612 | register long m; |
0f2d19dd | 1613 | register int span; |
15e9d186 | 1614 | long i; |
0f2d19dd JB |
1615 | scm_sizet seg_size; |
1616 | ||
0f2d19dd | 1617 | m = 0; |
0f2d19dd | 1618 | |
4c48ba06 MD |
1619 | gc_sweep_freelist_start (&scm_master_freelist); |
1620 | gc_sweep_freelist_start (&scm_master_freelist2); | |
a00c95d9 | 1621 | |
cf2d30f6 | 1622 | for (i = 0; i < scm_n_heap_segs; i++) |
0f2d19dd | 1623 | { |
4c48ba06 | 1624 | register unsigned int left_to_collect; |
4c48ba06 | 1625 | register scm_sizet j; |
15e9d186 | 1626 | |
cf2d30f6 JB |
1627 | /* Unmarked cells go onto the front of the freelist this heap |
1628 | segment points to. Rather than updating the real freelist | |
1629 | pointer as we go along, we accumulate the new head in | |
1630 | nfreelist. Then, if it turns out that the entire segment is | |
1631 | free, we free (i.e., malloc's free) the whole segment, and | |
1632 | simply don't assign nfreelist back into the real freelist. */ | |
4c48ba06 MD |
1633 | freelist = scm_heap_table[i].freelist; |
1634 | nfreelist = freelist->cells; | |
4c48ba06 | 1635 | left_to_collect = freelist->left_to_collect; |
945fec60 | 1636 | span = scm_heap_table[i].span; |
cf2d30f6 | 1637 | |
a00c95d9 ML |
1638 | ptr = CELL_UP (scm_heap_table[i].bounds[0], span); |
1639 | seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr; | |
c9b0d4b0 | 1640 | |
d6884e63 ML |
1641 | /* use only data cells in seg_size */ |
1642 | seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span; | |
1643 | ||
c9b0d4b0 ML |
1644 | scm_gc_cells_swept += seg_size; |
1645 | ||
0f2d19dd JB |
1646 | for (j = seg_size + span; j -= span; ptr += span) |
1647 | { | |
d6884e63 | 1648 | SCM scmptr; |
96f6f4ae | 1649 | |
d6884e63 | 1650 | if (SCM_GC_IN_CARD_HEADERP (ptr)) |
0f2d19dd | 1651 | { |
d6884e63 ML |
1652 | SCM_CELLPTR nxt; |
1653 | ||
1654 | /* cheat here */ | |
1655 | nxt = ptr; | |
1656 | NEXT_DATA_CELL (nxt, span); | |
1657 | j += span; | |
1658 | ||
1659 | ptr = nxt - span; | |
1660 | continue; | |
1661 | } | |
1662 | ||
1663 | scmptr = PTR2SCM (ptr); | |
1664 | ||
1665 | if (SCM_GCMARKP (scmptr)) | |
1666 | continue; | |
7bb8eac7 | 1667 | |
d6884e63 ML |
1668 | switch SCM_TYP7 (scmptr) |
1669 | { | |
0f2d19dd | 1670 | case scm_tcs_cons_gloc: |
0f2d19dd | 1671 | { |
c8045e8d DH |
1672 | /* Dirk:FIXME:: Again, super ugly code: scmptr may be a |
1673 | * struct or a gloc. See the corresponding comment in | |
1674 | * scm_gc_mark. | |
1675 | */ | |
7445e0e8 MD |
1676 | scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr) |
1677 | - scm_tc3_cons_gloc); | |
1678 | /* access as struct */ | |
1679 | scm_bits_t * vtable_data = (scm_bits_t *) word0; | |
d6884e63 | 1680 | if (vtable_data[scm_vtable_index_vcell] == 0) |
0f2d19dd | 1681 | { |
7445e0e8 MD |
1682 | /* Structs need to be freed in a special order. |
1683 | * This is handled by GC C hooks in struct.c. | |
1684 | */ | |
1685 | SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free); | |
1686 | scm_structs_to_free = scmptr; | |
7bb8eac7 | 1687 | continue; |
c8045e8d | 1688 | } |
7445e0e8 | 1689 | /* fall through so that scmptr gets collected */ |
0f2d19dd JB |
1690 | } |
1691 | break; | |
1692 | case scm_tcs_cons_imcar: | |
1693 | case scm_tcs_cons_nimcar: | |
1694 | case scm_tcs_closures: | |
e641afaf | 1695 | case scm_tc7_pws: |
0f2d19dd JB |
1696 | break; |
1697 | case scm_tc7_wvect: | |
b5c2579a | 1698 | m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM); |
06ee04b2 | 1699 | scm_must_free (SCM_VECTOR_BASE (scmptr) - 2); |
d6884e63 | 1700 | break; |
0f2d19dd | 1701 | case scm_tc7_vector: |
1b9be268 DH |
1702 | { |
1703 | unsigned long int length = SCM_VECTOR_LENGTH (scmptr); | |
1704 | if (length > 0) | |
1705 | { | |
1706 | m += length * sizeof (scm_bits_t); | |
1707 | scm_must_free (SCM_VECTOR_BASE (scmptr)); | |
1708 | } | |
1709 | break; | |
1710 | } | |
0f2d19dd JB |
1711 | #ifdef CCLO |
1712 | case scm_tc7_cclo: | |
b5c2579a | 1713 | m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM)); |
06ee04b2 | 1714 | scm_must_free (SCM_CCLO_BASE (scmptr)); |
0f2d19dd | 1715 | break; |
06ee04b2 | 1716 | #endif |
afe5177e | 1717 | #ifdef HAVE_ARRAYS |
0f2d19dd | 1718 | case scm_tc7_bvect: |
93778877 DH |
1719 | { |
1720 | unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr); | |
1721 | if (length > 0) | |
1722 | { | |
1723 | m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT); | |
1724 | scm_must_free (SCM_BITVECTOR_BASE (scmptr)); | |
1725 | } | |
1726 | } | |
06ee04b2 | 1727 | break; |
0f2d19dd | 1728 | case scm_tc7_byvect: |
0f2d19dd JB |
1729 | case scm_tc7_ivect: |
1730 | case scm_tc7_uvect: | |
0f2d19dd | 1731 | case scm_tc7_svect: |
5c11cc9d | 1732 | #ifdef HAVE_LONG_LONGS |
0f2d19dd | 1733 | case scm_tc7_llvect: |
0f2d19dd JB |
1734 | #endif |
1735 | case scm_tc7_fvect: | |
0f2d19dd | 1736 | case scm_tc7_dvect: |
0f2d19dd | 1737 | case scm_tc7_cvect: |
d1ca2c64 | 1738 | m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr); |
06ee04b2 DH |
1739 | scm_must_free (SCM_UVECTOR_BASE (scmptr)); |
1740 | break; | |
afe5177e | 1741 | #endif |
0f2d19dd | 1742 | case scm_tc7_substring: |
0f2d19dd JB |
1743 | break; |
1744 | case scm_tc7_string: | |
b5c2579a | 1745 | m += SCM_STRING_LENGTH (scmptr) + 1; |
f151f912 DH |
1746 | scm_must_free (SCM_STRING_CHARS (scmptr)); |
1747 | break; | |
28b06554 | 1748 | case scm_tc7_symbol: |
b5c2579a | 1749 | m += SCM_SYMBOL_LENGTH (scmptr) + 1; |
f151f912 | 1750 | scm_must_free (SCM_SYMBOL_CHARS (scmptr)); |
0f2d19dd | 1751 | break; |
0f2d19dd | 1752 | case scm_tcs_subrs: |
d6884e63 | 1753 | /* the various "subrs" (primitives) are never freed */ |
0f2d19dd JB |
1754 | continue; |
1755 | case scm_tc7_port: | |
0f2d19dd JB |
1756 | if SCM_OPENP (scmptr) |
1757 | { | |
1758 | int k = SCM_PTOBNUM (scmptr); | |
7a7f7c53 | 1759 | #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST)) |
0f2d19dd | 1760 | if (!(k < scm_numptob)) |
7a7f7c53 DH |
1761 | SCM_MISC_ERROR ("undefined port type", SCM_EOL); |
1762 | #endif | |
0f2d19dd | 1763 | /* Keep "revealed" ports alive. */ |
945fec60 | 1764 | if (scm_revealed_count (scmptr) > 0) |
0f2d19dd JB |
1765 | continue; |
1766 | /* Yes, I really do mean scm_ptobs[k].free */ | |
1767 | /* rather than ftobs[k].close. .close */ | |
1768 | /* is for explicit CLOSE-PORT by user */ | |
84af0382 | 1769 | m += (scm_ptobs[k].free) (scmptr); |
0f2d19dd JB |
1770 | SCM_SETSTREAM (scmptr, 0); |
1771 | scm_remove_from_port_table (scmptr); | |
1772 | scm_gc_ports_collected++; | |
22a52da1 | 1773 | SCM_CLR_PORT_OPEN_FLAG (scmptr); |
0f2d19dd JB |
1774 | } |
1775 | break; | |
1776 | case scm_tc7_smob: | |
d6884e63 | 1777 | switch SCM_TYP16 (scmptr) |
0f2d19dd JB |
1778 | { |
1779 | case scm_tc_free_cell: | |
acb0a19c | 1780 | case scm_tc16_real: |
0f2d19dd JB |
1781 | break; |
1782 | #ifdef SCM_BIGDIG | |
acb0a19c | 1783 | case scm_tc16_big: |
0f2d19dd | 1784 | m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT); |
06ee04b2 DH |
1785 | scm_must_free (SCM_BDIGITS (scmptr)); |
1786 | break; | |
0f2d19dd | 1787 | #endif /* def SCM_BIGDIG */ |
acb0a19c | 1788 | case scm_tc16_complex: |
06ee04b2 | 1789 | m += sizeof (scm_complex_t); |
405aaef9 | 1790 | scm_must_free (SCM_COMPLEX_MEM (scmptr)); |
06ee04b2 | 1791 | break; |
0f2d19dd | 1792 | default: |
0f2d19dd JB |
1793 | { |
1794 | int k; | |
1795 | k = SCM_SMOBNUM (scmptr); | |
7a7f7c53 | 1796 | #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST)) |
0f2d19dd | 1797 | if (!(k < scm_numsmob)) |
7a7f7c53 DH |
1798 | SCM_MISC_ERROR ("undefined smob type", SCM_EOL); |
1799 | #endif | |
1800 | if (scm_smobs[k].free) | |
1801 | m += (scm_smobs[k].free) (scmptr); | |
0f2d19dd JB |
1802 | break; |
1803 | } | |
1804 | } | |
1805 | break; | |
1806 | default: | |
acf4331f | 1807 | SCM_MISC_ERROR ("unknown type", SCM_EOL); |
0f2d19dd | 1808 | } |
7bb8eac7 | 1809 | |
4c48ba06 | 1810 | if (!--left_to_collect) |
4a4c9785 | 1811 | { |
22a52da1 | 1812 | SCM_SET_CELL_WORD_0 (scmptr, nfreelist); |
4c48ba06 MD |
1813 | *freelist->clustertail = scmptr; |
1814 | freelist->clustertail = SCM_CDRLOC (scmptr); | |
a00c95d9 | 1815 | |
4a4c9785 | 1816 | nfreelist = SCM_EOL; |
4c48ba06 MD |
1817 | freelist->collected += span * freelist->cluster_size; |
1818 | left_to_collect = freelist->cluster_size; | |
4a4c9785 MD |
1819 | } |
1820 | else | |
4a4c9785 MD |
1821 | { |
1822 | /* Stick the new cell on the front of nfreelist. It's | |
1823 | critical that we mark this cell as freed; otherwise, the | |
1824 | conservative collector might trace it as some other type | |
1825 | of object. */ | |
54778cd3 | 1826 | SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell); |
3f5d82cd | 1827 | SCM_SET_FREE_CELL_CDR (scmptr, nfreelist); |
4a4c9785 MD |
1828 | nfreelist = scmptr; |
1829 | } | |
0f2d19dd | 1830 | } |
d6884e63 | 1831 | |
0f2d19dd JB |
1832 | #ifdef GC_FREE_SEGMENTS |
1833 | if (n == seg_size) | |
1834 | { | |
15e9d186 JB |
1835 | register long j; |
1836 | ||
4c48ba06 | 1837 | freelist->heap_size -= seg_size; |
cf2d30f6 JB |
1838 | free ((char *) scm_heap_table[i].bounds[0]); |
1839 | scm_heap_table[i].bounds[0] = 0; | |
1840 | for (j = i + 1; j < scm_n_heap_segs; j++) | |
0f2d19dd JB |
1841 | scm_heap_table[j - 1] = scm_heap_table[j]; |
1842 | scm_n_heap_segs -= 1; | |
cf2d30f6 | 1843 | i--; /* We need to scan the segment just moved. */ |
0f2d19dd JB |
1844 | } |
1845 | else | |
1846 | #endif /* ifdef GC_FREE_SEGMENTS */ | |
4a4c9785 MD |
1847 | { |
1848 | /* Update the real freelist pointer to point to the head of | |
1849 | the list of free cells we've built for this segment. */ | |
4c48ba06 | 1850 | freelist->cells = nfreelist; |
4c48ba06 | 1851 | freelist->left_to_collect = left_to_collect; |
4a4c9785 MD |
1852 | } |
1853 | ||
fca7547b | 1854 | #ifdef GUILE_DEBUG_FREELIST |
cf2d30f6 JB |
1855 | scm_map_free_list (); |
1856 | #endif | |
4a4c9785 | 1857 | } |
a00c95d9 | 1858 | |
4c48ba06 MD |
1859 | gc_sweep_freelist_finish (&scm_master_freelist); |
1860 | gc_sweep_freelist_finish (&scm_master_freelist2); | |
a00c95d9 | 1861 | |
8ded62a3 MD |
1862 | /* When we move to POSIX threads private freelists should probably |
1863 | be GC-protected instead. */ | |
1864 | scm_freelist = SCM_EOL; | |
1865 | scm_freelist2 = SCM_EOL; | |
a00c95d9 | 1866 | |
b37fe1c5 | 1867 | scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected); |
8b0d194f | 1868 | scm_gc_yield -= scm_cells_allocated; |
0f2d19dd JB |
1869 | scm_mallocated -= m; |
1870 | scm_gc_malloc_collected = m; | |
1871 | } | |
acf4331f | 1872 | #undef FUNC_NAME |
0f2d19dd JB |
1873 | |
1874 | ||
1875 | \f | |
0f2d19dd JB |
1876 | /* {Front end to malloc} |
1877 | * | |
9d47a1e6 ML |
1878 | * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc, |
1879 | * scm_done_free | |
0f2d19dd | 1880 | * |
c6c79933 GH |
1881 | * These functions provide services comparable to malloc, realloc, and |
1882 | * free. They should be used when allocating memory that will be under | |
1883 | * control of the garbage collector, i.e., if the memory may be freed | |
1884 | * during garbage collection. | |
1885 | */ | |
bc9d9bb2 | 1886 | |
0f2d19dd JB |
1887 | /* scm_must_malloc |
1888 | * Return newly malloced storage or throw an error. | |
1889 | * | |
1890 | * The parameter WHAT is a string for error reporting. | |
a00c95d9 | 1891 | * If the threshold scm_mtrigger will be passed by this |
0f2d19dd JB |
1892 | * allocation, or if the first call to malloc fails, |
1893 | * garbage collect -- on the presumption that some objects | |
1894 | * using malloced storage may be collected. | |
1895 | * | |
1896 | * The limit scm_mtrigger may be raised by this allocation. | |
1897 | */ | |
07806695 | 1898 | void * |
e4ef2330 | 1899 | scm_must_malloc (scm_sizet size, const char *what) |
0f2d19dd | 1900 | { |
07806695 | 1901 | void *ptr; |
15e9d186 | 1902 | unsigned long nm = scm_mallocated + size; |
e4ef2330 MD |
1903 | |
1904 | if (nm <= scm_mtrigger) | |
0f2d19dd | 1905 | { |
07806695 | 1906 | SCM_SYSCALL (ptr = malloc (size)); |
0f2d19dd JB |
1907 | if (NULL != ptr) |
1908 | { | |
1909 | scm_mallocated = nm; | |
bc9d9bb2 MD |
1910 | #ifdef GUILE_DEBUG_MALLOC |
1911 | scm_malloc_register (ptr, what); | |
1912 | #endif | |
0f2d19dd JB |
1913 | return ptr; |
1914 | } | |
1915 | } | |
6064dcc6 | 1916 | |
0f2d19dd | 1917 | scm_igc (what); |
e4ef2330 | 1918 | |
0f2d19dd | 1919 | nm = scm_mallocated + size; |
07806695 | 1920 | SCM_SYSCALL (ptr = malloc (size)); |
0f2d19dd JB |
1921 | if (NULL != ptr) |
1922 | { | |
1923 | scm_mallocated = nm; | |
6064dcc6 MV |
1924 | if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) { |
1925 | if (nm > scm_mtrigger) | |
1926 | scm_mtrigger = nm + nm / 2; | |
1927 | else | |
1928 | scm_mtrigger += scm_mtrigger / 2; | |
1929 | } | |
bc9d9bb2 MD |
1930 | #ifdef GUILE_DEBUG_MALLOC |
1931 | scm_malloc_register (ptr, what); | |
1932 | #endif | |
1933 | ||
0f2d19dd JB |
1934 | return ptr; |
1935 | } | |
e4ef2330 | 1936 | |
acf4331f | 1937 | scm_memory_error (what); |
0f2d19dd JB |
1938 | } |
1939 | ||
1940 | ||
1941 | /* scm_must_realloc | |
1942 | * is similar to scm_must_malloc. | |
1943 | */ | |
07806695 JB |
1944 | void * |
1945 | scm_must_realloc (void *where, | |
e4ef2330 MD |
1946 | scm_sizet old_size, |
1947 | scm_sizet size, | |
3eeba8d4 | 1948 | const char *what) |
0f2d19dd | 1949 | { |
07806695 | 1950 | void *ptr; |
e4ef2330 MD |
1951 | scm_sizet nm = scm_mallocated + size - old_size; |
1952 | ||
1953 | if (nm <= scm_mtrigger) | |
0f2d19dd | 1954 | { |
07806695 | 1955 | SCM_SYSCALL (ptr = realloc (where, size)); |
0f2d19dd JB |
1956 | if (NULL != ptr) |
1957 | { | |
1958 | scm_mallocated = nm; | |
bc9d9bb2 MD |
1959 | #ifdef GUILE_DEBUG_MALLOC |
1960 | scm_malloc_reregister (where, ptr, what); | |
1961 | #endif | |
0f2d19dd JB |
1962 | return ptr; |
1963 | } | |
1964 | } | |
e4ef2330 | 1965 | |
0f2d19dd | 1966 | scm_igc (what); |
e4ef2330 MD |
1967 | |
1968 | nm = scm_mallocated + size - old_size; | |
07806695 | 1969 | SCM_SYSCALL (ptr = realloc (where, size)); |
0f2d19dd JB |
1970 | if (NULL != ptr) |
1971 | { | |
1972 | scm_mallocated = nm; | |
6064dcc6 MV |
1973 | if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) { |
1974 | if (nm > scm_mtrigger) | |
1975 | scm_mtrigger = nm + nm / 2; | |
1976 | else | |
1977 | scm_mtrigger += scm_mtrigger / 2; | |
1978 | } | |
bc9d9bb2 MD |
1979 | #ifdef GUILE_DEBUG_MALLOC |
1980 | scm_malloc_reregister (where, ptr, what); | |
1981 | #endif | |
0f2d19dd JB |
1982 | return ptr; |
1983 | } | |
e4ef2330 | 1984 | |
acf4331f | 1985 | scm_memory_error (what); |
0f2d19dd JB |
1986 | } |
1987 | ||
e4a7824f MV |
1988 | char * |
1989 | scm_must_strndup (const char *str, unsigned long length) | |
1990 | { | |
1991 | char * dst = scm_must_malloc (length + 1, "scm_must_strndup"); | |
1992 | memcpy (dst, str, length); | |
1993 | dst[length] = 0; | |
1994 | return dst; | |
1995 | } | |
1996 | ||
1997 | char * | |
1998 | scm_must_strdup (const char *str) | |
1999 | { | |
2000 | return scm_must_strndup (str, strlen (str)); | |
2001 | } | |
acf4331f | 2002 | |
a00c95d9 | 2003 | void |
07806695 | 2004 | scm_must_free (void *obj) |
acf4331f | 2005 | #define FUNC_NAME "scm_must_free" |
0f2d19dd | 2006 | { |
bc9d9bb2 MD |
2007 | #ifdef GUILE_DEBUG_MALLOC |
2008 | scm_malloc_unregister (obj); | |
2009 | #endif | |
0f2d19dd JB |
2010 | if (obj) |
2011 | free (obj); | |
2012 | else | |
acf4331f | 2013 | SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL); |
0f2d19dd | 2014 | } |
acf4331f DH |
2015 | #undef FUNC_NAME |
2016 | ||
0f2d19dd | 2017 | |
c68296f8 MV |
2018 | /* Announce that there has been some malloc done that will be freed |
2019 | * during gc. A typical use is for a smob that uses some malloced | |
2020 | * memory but can not get it from scm_must_malloc (for whatever | |
2021 | * reason). When a new object of this smob is created you call | |
2022 | * scm_done_malloc with the size of the object. When your smob free | |
2023 | * function is called, be sure to include this size in the return | |
9d47a1e6 ML |
2024 | * value. |
2025 | * | |
2026 | * If you can't actually free the memory in the smob free function, | |
2027 | * for whatever reason (like reference counting), you still can (and | |
2028 | * should) report the amount of memory freed when you actually free it. | |
2029 | * Do it by calling scm_done_malloc with the _negated_ size. Clever, | |
2030 | * eh? Or even better, call scm_done_free. */ | |
0f2d19dd | 2031 | |
c68296f8 | 2032 | void |
6e8d25a6 | 2033 | scm_done_malloc (long size) |
c68296f8 MV |
2034 | { |
2035 | scm_mallocated += size; | |
2036 | ||
2037 | if (scm_mallocated > scm_mtrigger) | |
2038 | { | |
2039 | scm_igc ("foreign mallocs"); | |
2040 | if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) | |
2041 | { | |
2042 | if (scm_mallocated > scm_mtrigger) | |
2043 | scm_mtrigger = scm_mallocated + scm_mallocated / 2; | |
2044 | else | |
2045 | scm_mtrigger += scm_mtrigger / 2; | |
2046 | } | |
2047 | } | |
2048 | } | |
2049 | ||
9d47a1e6 ML |
2050 | void |
2051 | scm_done_free (long size) | |
2052 | { | |
2053 | scm_mallocated -= size; | |
2054 | } | |
2055 | ||
c68296f8 MV |
2056 | |
2057 | \f | |
0f2d19dd JB |
2058 | /* {Heap Segments} |
2059 | * | |
2060 | * Each heap segment is an array of objects of a particular size. | |
2061 | * Every segment has an associated (possibly shared) freelist. | |
2062 | * A table of segment records is kept that records the upper and | |
2063 | * lower extents of the segment; this is used during the conservative | |
2064 | * phase of gc to identify probably gc roots (because they point | |
c68296f8 | 2065 | * into valid segments at reasonable offsets). */ |
0f2d19dd JB |
2066 | |
2067 | /* scm_expmem | |
2068 | * is true if the first segment was smaller than INIT_HEAP_SEG. | |
2069 | * If scm_expmem is set to one, subsequent segment allocations will | |
2070 | * allocate segments of size SCM_EXPHEAP(scm_heap_size). | |
2071 | */ | |
2072 | int scm_expmem = 0; | |
2073 | ||
4c48ba06 MD |
2074 | scm_sizet scm_max_segment_size; |
2075 | ||
0f2d19dd JB |
2076 | /* scm_heap_org |
2077 | * is the lowest base address of any heap segment. | |
2078 | */ | |
2079 | SCM_CELLPTR scm_heap_org; | |
2080 | ||
a00c95d9 | 2081 | scm_heap_seg_data_t * scm_heap_table = 0; |
b6efc951 | 2082 | static unsigned int heap_segment_table_size = 0; |
0f2d19dd JB |
2083 | int scm_n_heap_segs = 0; |
2084 | ||
0f2d19dd | 2085 | /* init_heap_seg |
d6884e63 | 2086 | * initializes a new heap segment and returns the number of objects it contains. |
0f2d19dd | 2087 | * |
d6884e63 ML |
2088 | * The segment origin and segment size in bytes are input parameters. |
2089 | * The freelist is both input and output. | |
0f2d19dd | 2090 | * |
d6884e63 ML |
2091 | * This function presumes that the scm_heap_table has already been expanded |
2092 | * to accomodate a new segment record and that the markbit space was reserved | |
2093 | * for all the cards in this segment. | |
0f2d19dd JB |
2094 | */ |
2095 | ||
d6884e63 ML |
2096 | #define INIT_CARD(card, span) \ |
2097 | do { \ | |
322ec19d | 2098 | SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \ |
d6884e63 ML |
2099 | if ((span) == 2) \ |
2100 | SCM_GC_SET_CARD_DOUBLECELL (card); \ | |
2101 | } while (0) | |
0f2d19dd | 2102 | |
a00c95d9 | 2103 | static scm_sizet |
4c48ba06 | 2104 | init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist) |
0f2d19dd JB |
2105 | { |
2106 | register SCM_CELLPTR ptr; | |
0f2d19dd | 2107 | SCM_CELLPTR seg_end; |
15e9d186 | 2108 | int new_seg_index; |
acb0a19c | 2109 | int n_new_cells; |
4c48ba06 | 2110 | int span = freelist->span; |
a00c95d9 | 2111 | |
0f2d19dd JB |
2112 | if (seg_org == NULL) |
2113 | return 0; | |
2114 | ||
d6884e63 ML |
2115 | /* Align the begin ptr up. |
2116 | */ | |
2117 | ptr = SCM_GC_CARD_UP (seg_org); | |
acb0a19c | 2118 | |
a00c95d9 | 2119 | /* Compute the ceiling on valid object pointers w/in this segment. |
0f2d19dd | 2120 | */ |
d6884e63 | 2121 | seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size); |
0f2d19dd | 2122 | |
a00c95d9 | 2123 | /* Find the right place and insert the segment record. |
0f2d19dd JB |
2124 | * |
2125 | */ | |
2126 | for (new_seg_index = 0; | |
2127 | ( (new_seg_index < scm_n_heap_segs) | |
2128 | && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org)); | |
2129 | new_seg_index++) | |
2130 | ; | |
2131 | ||
2132 | { | |
2133 | int i; | |
2134 | for (i = scm_n_heap_segs; i > new_seg_index; --i) | |
2135 | scm_heap_table[i] = scm_heap_table[i - 1]; | |
2136 | } | |
a00c95d9 | 2137 | |
0f2d19dd JB |
2138 | ++scm_n_heap_segs; |
2139 | ||
945fec60 | 2140 | scm_heap_table[new_seg_index].span = span; |
4c48ba06 | 2141 | scm_heap_table[new_seg_index].freelist = freelist; |
195e6201 DH |
2142 | scm_heap_table[new_seg_index].bounds[0] = ptr; |
2143 | scm_heap_table[new_seg_index].bounds[1] = seg_end; | |
0f2d19dd | 2144 | |
acb0a19c MD |
2145 | /*n_new_cells*/ |
2146 | n_new_cells = seg_end - ptr; | |
0f2d19dd | 2147 | |
4c48ba06 | 2148 | freelist->heap_size += n_new_cells; |
4a4c9785 | 2149 | |
a00c95d9 | 2150 | /* Partition objects in this segment into clusters */ |
4a4c9785 MD |
2151 | { |
2152 | SCM clusters; | |
2153 | SCM *clusterp = &clusters; | |
4a4c9785 | 2154 | |
d6884e63 ML |
2155 | NEXT_DATA_CELL (ptr, span); |
2156 | while (ptr < seg_end) | |
4a4c9785 | 2157 | { |
d6884e63 ML |
2158 | scm_cell *nxt = ptr; |
2159 | scm_cell *prv = NULL; | |
2160 | scm_cell *last_card = NULL; | |
2161 | int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1; | |
2162 | NEXT_DATA_CELL(nxt, span); | |
4a4c9785 | 2163 | |
4c48ba06 MD |
2164 | /* Allocate cluster spine |
2165 | */ | |
4a4c9785 | 2166 | *clusterp = PTR2SCM (ptr); |
d6884e63 | 2167 | SCM_SETCAR (*clusterp, PTR2SCM (nxt)); |
4a4c9785 | 2168 | clusterp = SCM_CDRLOC (*clusterp); |
d6884e63 | 2169 | ptr = nxt; |
a00c95d9 | 2170 | |
d6884e63 | 2171 | while (n_data_cells--) |
4a4c9785 | 2172 | { |
d6884e63 | 2173 | scm_cell *card = SCM_GC_CELL_CARD (ptr); |
96f6f4ae | 2174 | SCM scmptr = PTR2SCM (ptr); |
d6884e63 ML |
2175 | nxt = ptr; |
2176 | NEXT_DATA_CELL (nxt, span); | |
2177 | prv = ptr; | |
2178 | ||
2179 | if (card != last_card) | |
2180 | { | |
2181 | INIT_CARD (card, span); | |
2182 | last_card = card; | |
2183 | } | |
96f6f4ae | 2184 | |
54778cd3 | 2185 | SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell); |
22a52da1 | 2186 | SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt)); |
d6884e63 ML |
2187 | |
2188 | ptr = nxt; | |
4a4c9785 | 2189 | } |
4c48ba06 | 2190 | |
d6884e63 | 2191 | SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL); |
4a4c9785 | 2192 | } |
a00c95d9 | 2193 | |
d6884e63 ML |
2194 | /* sanity check */ |
2195 | { | |
2196 | scm_cell *ref = seg_end; | |
2197 | NEXT_DATA_CELL (ref, span); | |
2198 | if (ref != ptr) | |
2199 | /* [cmm] looks like the segment size doesn't divide cleanly by | |
2200 | cluster size. bad cmm! */ | |
2201 | abort(); | |
2202 | } | |
2203 | ||
4a4c9785 MD |
2204 | /* Patch up the last cluster pointer in the segment |
2205 | * to join it to the input freelist. | |
2206 | */ | |
4c48ba06 MD |
2207 | *clusterp = freelist->clusters; |
2208 | freelist->clusters = clusters; | |
4a4c9785 MD |
2209 | } |
2210 | ||
4c48ba06 MD |
2211 | #ifdef DEBUGINFO |
2212 | fprintf (stderr, "H"); | |
2213 | #endif | |
0f2d19dd | 2214 | return size; |
0f2d19dd JB |
2215 | } |
2216 | ||
a00c95d9 ML |
2217 | static scm_sizet |
2218 | round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len) | |
2219 | { | |
2220 | scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist); | |
2221 | ||
2222 | return | |
2223 | (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes | |
2224 | + ALIGNMENT_SLACK (freelist); | |
2225 | } | |
2226 | ||
a00c95d9 | 2227 | static void |
b6efc951 | 2228 | alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy) |
acf4331f | 2229 | #define FUNC_NAME "alloc_some_heap" |
0f2d19dd | 2230 | { |
0f2d19dd | 2231 | SCM_CELLPTR ptr; |
b37fe1c5 | 2232 | long len; |
a00c95d9 | 2233 | |
9d47a1e6 | 2234 | if (scm_gc_heap_lock) |
b6efc951 DH |
2235 | { |
2236 | /* Critical code sections (such as the garbage collector) aren't | |
2237 | * supposed to add heap segments. | |
2238 | */ | |
2239 | fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n"); | |
2240 | abort (); | |
2241 | } | |
0f2d19dd | 2242 | |
9d47a1e6 | 2243 | if (scm_n_heap_segs == heap_segment_table_size) |
b6efc951 DH |
2244 | { |
2245 | /* We have to expand the heap segment table to have room for the new | |
2246 | * segment. Do not yet increment scm_n_heap_segs -- that is done by | |
2247 | * init_heap_seg only if the allocation of the segment itself succeeds. | |
2248 | */ | |
2249 | unsigned int new_table_size = scm_n_heap_segs + 1; | |
2250 | size_t size = new_table_size * sizeof (scm_heap_seg_data_t); | |
2251 | scm_heap_seg_data_t * new_heap_table; | |
2252 | ||
2253 | SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *) | |
2254 | realloc ((char *)scm_heap_table, size))); | |
2255 | if (!new_heap_table) | |
2256 | { | |
2257 | if (error_policy == abort_on_error) | |
2258 | { | |
2259 | fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n"); | |
2260 | abort (); | |
2261 | } | |
2262 | else | |
2263 | { | |
2264 | return; | |
2265 | } | |
2266 | } | |
2267 | else | |
2268 | { | |
2269 | scm_heap_table = new_heap_table; | |
2270 | heap_segment_table_size = new_table_size; | |
2271 | } | |
2272 | } | |
0f2d19dd | 2273 | |
0f2d19dd | 2274 | /* Pick a size for the new heap segment. |
a00c95d9 | 2275 | * The rule for picking the size of a segment is explained in |
0f2d19dd JB |
2276 | * gc.h |
2277 | */ | |
4c48ba06 | 2278 | { |
1811ebce MD |
2279 | /* Assure that the new segment is predicted to be large enough. |
2280 | * | |
2281 | * New yield should at least equal GC fraction of new heap size, i.e. | |
2282 | * | |
2283 | * y + dh > f * (h + dh) | |
2284 | * | |
2285 | * y : yield | |
8fef55a8 | 2286 | * f : min yield fraction |
1811ebce MD |
2287 | * h : heap size |
2288 | * dh : size of new heap segment | |
2289 | * | |
2290 | * This gives dh > (f * h - y) / (1 - f) | |
bda1446c | 2291 | */ |
8fef55a8 | 2292 | int f = freelist->min_yield_fraction; |
1811ebce MD |
2293 | long h = SCM_HEAP_SIZE; |
2294 | long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f); | |
4c48ba06 MD |
2295 | len = SCM_EXPHEAP (freelist->heap_size); |
2296 | #ifdef DEBUGINFO | |
2297 | fprintf (stderr, "(%d < %d)", len, min_cells); | |
2298 | #endif | |
2299 | if (len < min_cells) | |
1811ebce | 2300 | len = min_cells + freelist->cluster_size; |
4c48ba06 | 2301 | len *= sizeof (scm_cell); |
1811ebce MD |
2302 | /* force new sampling */ |
2303 | freelist->collected = LONG_MAX; | |
4c48ba06 | 2304 | } |
a00c95d9 | 2305 | |
4c48ba06 MD |
2306 | if (len > scm_max_segment_size) |
2307 | len = scm_max_segment_size; | |
0f2d19dd JB |
2308 | |
2309 | { | |
2310 | scm_sizet smallest; | |
2311 | ||
a00c95d9 | 2312 | smallest = CLUSTER_SIZE_IN_BYTES (freelist); |
a00c95d9 | 2313 | |
0f2d19dd | 2314 | if (len < smallest) |
a00c95d9 | 2315 | len = smallest; |
0f2d19dd JB |
2316 | |
2317 | /* Allocate with decaying ambition. */ | |
2318 | while ((len >= SCM_MIN_HEAP_SEG_SIZE) | |
2319 | && (len >= smallest)) | |
2320 | { | |
1811ebce | 2321 | scm_sizet rounded_len = round_to_cluster_size (freelist, len); |
a00c95d9 | 2322 | SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len)); |
0f2d19dd JB |
2323 | if (ptr) |
2324 | { | |
a00c95d9 | 2325 | init_heap_seg (ptr, rounded_len, freelist); |
0f2d19dd JB |
2326 | return; |
2327 | } | |
2328 | len /= 2; | |
2329 | } | |
2330 | } | |
2331 | ||
b6efc951 DH |
2332 | if (error_policy == abort_on_error) |
2333 | { | |
2334 | fprintf (stderr, "alloc_some_heap: Could not grow heap.\n"); | |
2335 | abort (); | |
2336 | } | |
0f2d19dd | 2337 | } |
acf4331f | 2338 | #undef FUNC_NAME |
0f2d19dd | 2339 | |
0f2d19dd JB |
2340 | \f |
2341 | /* {GC Protection Helper Functions} | |
2342 | */ | |
2343 | ||
2344 | ||
5d2b97cd DH |
2345 | /* |
2346 | * If within a function you need to protect one or more scheme objects from | |
2347 | * garbage collection, pass them as parameters to one of the | |
2348 | * scm_remember_upto_here* functions below. These functions don't do | |
2349 | * anything, but since the compiler does not know that they are actually | |
2350 | * no-ops, it will generate code that calls these functions with the given | |
2351 | * parameters. Therefore, you can be sure that the compiler will keep those | |
2352 | * scheme values alive (on the stack or in a register) up to the point where | |
2353 | * scm_remember_upto_here* is called. In other words, place the call to | |
2354 | * scm_remember_upt_here* _behind_ the last code in your function, that | |
2355 | * depends on the scheme object to exist. | |
2356 | * | |
2357 | * Example: We want to make sure, that the string object str does not get | |
2358 | * garbage collected during the execution of 'some_function', because | |
2359 | * otherwise the characters belonging to str would be freed and | |
2360 | * 'some_function' might access freed memory. To make sure that the compiler | |
2361 | * keeps str alive on the stack or in a register such that it is visible to | |
2362 | * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the | |
2363 | * call to 'some_function'. Note that this would not be necessary if str was | |
2364 | * used anyway after the call to 'some_function'. | |
2365 | * char *chars = SCM_STRING_CHARS (str); | |
2366 | * some_function (chars); | |
2367 | * scm_remember_upto_here_1 (str); // str will be alive up to this point. | |
2368 | */ | |
2369 | ||
2370 | void | |
2371 | scm_remember_upto_here_1 (SCM obj) | |
2372 | { | |
2373 | /* Empty. Protects a single object from garbage collection. */ | |
2374 | } | |
2375 | ||
2376 | void | |
2377 | scm_remember_upto_here_2 (SCM obj1, SCM obj2) | |
2378 | { | |
2379 | /* Empty. Protects two objects from garbage collection. */ | |
2380 | } | |
2381 | ||
2382 | void | |
2383 | scm_remember_upto_here (SCM obj, ...) | |
2384 | { | |
2385 | /* Empty. Protects any number of objects from garbage collection. */ | |
2386 | } | |
2387 | ||
2388 | ||
2389 | #if (SCM_DEBUG_DEPRECATED == 0) | |
2390 | ||
0f2d19dd | 2391 | void |
6e8d25a6 | 2392 | scm_remember (SCM *ptr) |
b24b5e13 DH |
2393 | { |
2394 | /* empty */ | |
2395 | } | |
0f2d19dd | 2396 | |
5d2b97cd | 2397 | #endif /* SCM_DEBUG_DEPRECATED == 0 */ |
1cc91f1b | 2398 | |
c209c88e | 2399 | /* |
41b0806d GB |
2400 | These crazy functions prevent garbage collection |
2401 | of arguments after the first argument by | |
2402 | ensuring they remain live throughout the | |
2403 | function because they are used in the last | |
2404 | line of the code block. | |
2405 | It'd be better to have a nice compiler hint to | |
2406 | aid the conservative stack-scanning GC. --03/09/00 gjb */ | |
0f2d19dd JB |
2407 | SCM |
2408 | scm_return_first (SCM elt, ...) | |
0f2d19dd JB |
2409 | { |
2410 | return elt; | |
2411 | } | |
2412 | ||
41b0806d GB |
2413 | int |
2414 | scm_return_first_int (int i, ...) | |
2415 | { | |
2416 | return i; | |
2417 | } | |
2418 | ||
0f2d19dd | 2419 | |
0f2d19dd | 2420 | SCM |
6e8d25a6 | 2421 | scm_permanent_object (SCM obj) |
0f2d19dd JB |
2422 | { |
2423 | SCM_REDEFER_INTS; | |
2424 | scm_permobjs = scm_cons (obj, scm_permobjs); | |
2425 | SCM_REALLOW_INTS; | |
2426 | return obj; | |
2427 | } | |
2428 | ||
2429 | ||
7bd4fbe2 MD |
2430 | /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all |
2431 | other references are dropped, until the object is unprotected by calling | |
2432 | scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest, | |
2433 | i. e. it is possible to protect the same object several times, but it is | |
2434 | necessary to unprotect the object the same number of times to actually get | |
2435 | the object unprotected. It is an error to unprotect an object more often | |
2436 | than it has been protected before. The function scm_protect_object returns | |
2437 | OBJ. | |
2438 | */ | |
2439 | ||
2440 | /* Implementation note: For every object X, there is a counter which | |
2441 | scm_protect_object(X) increments and scm_unprotect_object(X) decrements. | |
2442 | */ | |
686765af | 2443 | |
ef290276 | 2444 | SCM |
6e8d25a6 | 2445 | scm_protect_object (SCM obj) |
ef290276 | 2446 | { |
686765af | 2447 | SCM handle; |
9d47a1e6 | 2448 | |
686765af | 2449 | /* This critical section barrier will be replaced by a mutex. */ |
2dd6a83a | 2450 | SCM_REDEFER_INTS; |
9d47a1e6 | 2451 | |
0f0f0899 MD |
2452 | handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0)); |
2453 | SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1)); | |
9d47a1e6 | 2454 | |
2dd6a83a | 2455 | SCM_REALLOW_INTS; |
9d47a1e6 | 2456 | |
ef290276 JB |
2457 | return obj; |
2458 | } | |
2459 | ||
2460 | ||
2461 | /* Remove any protection for OBJ established by a prior call to | |
dab7f566 | 2462 | scm_protect_object. This function returns OBJ. |
ef290276 | 2463 | |
dab7f566 | 2464 | See scm_protect_object for more information. */ |
ef290276 | 2465 | SCM |
6e8d25a6 | 2466 | scm_unprotect_object (SCM obj) |
ef290276 | 2467 | { |
686765af | 2468 | SCM handle; |
9d47a1e6 | 2469 | |
686765af | 2470 | /* This critical section barrier will be replaced by a mutex. */ |
2dd6a83a | 2471 | SCM_REDEFER_INTS; |
9d47a1e6 | 2472 | |
686765af | 2473 | handle = scm_hashq_get_handle (scm_protects, obj); |
9d47a1e6 | 2474 | |
22a52da1 | 2475 | if (SCM_FALSEP (handle)) |
686765af | 2476 | { |
0f0f0899 MD |
2477 | fprintf (stderr, "scm_unprotect_object called on unprotected object\n"); |
2478 | abort (); | |
686765af | 2479 | } |
6a199940 DH |
2480 | else |
2481 | { | |
2482 | unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1; | |
2483 | if (count == 0) | |
2484 | scm_hashq_remove_x (scm_protects, obj); | |
2485 | else | |
2486 | SCM_SETCDR (handle, SCM_MAKINUM (count)); | |
2487 | } | |
686765af | 2488 | |
2dd6a83a | 2489 | SCM_REALLOW_INTS; |
ef290276 JB |
2490 | |
2491 | return obj; | |
2492 | } | |
2493 | ||
c45acc34 JB |
2494 | int terminating; |
2495 | ||
2496 | /* called on process termination. */ | |
e52ceaac MD |
2497 | #ifdef HAVE_ATEXIT |
2498 | static void | |
2499 | cleanup (void) | |
2500 | #else | |
2501 | #ifdef HAVE_ON_EXIT | |
51157deb MD |
2502 | extern int on_exit (void (*procp) (), int arg); |
2503 | ||
e52ceaac MD |
2504 | static void |
2505 | cleanup (int status, void *arg) | |
2506 | #else | |
2507 | #error Dont know how to setup a cleanup handler on your system. | |
2508 | #endif | |
2509 | #endif | |
c45acc34 JB |
2510 | { |
2511 | terminating = 1; | |
2512 | scm_flush_all_ports (); | |
2513 | } | |
ef290276 | 2514 | |
0f2d19dd | 2515 | \f |
acb0a19c | 2516 | static int |
4c48ba06 | 2517 | make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist) |
acb0a19c | 2518 | { |
a00c95d9 | 2519 | scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size); |
d6884e63 | 2520 | |
a00c95d9 ML |
2521 | if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size), |
2522 | rounded_size, | |
4c48ba06 | 2523 | freelist)) |
acb0a19c | 2524 | { |
a00c95d9 ML |
2525 | rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE); |
2526 | if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size), | |
2527 | rounded_size, | |
4c48ba06 | 2528 | freelist)) |
acb0a19c MD |
2529 | return 1; |
2530 | } | |
2531 | else | |
2532 | scm_expmem = 1; | |
2533 | ||
8fef55a8 MD |
2534 | if (freelist->min_yield_fraction) |
2535 | freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction | |
b37fe1c5 | 2536 | / 100); |
8fef55a8 | 2537 | freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield); |
a00c95d9 | 2538 | |
acb0a19c MD |
2539 | return 0; |
2540 | } | |
2541 | ||
2542 | \f | |
4c48ba06 MD |
2543 | static void |
2544 | init_freelist (scm_freelist_t *freelist, | |
2545 | int span, | |
2546 | int cluster_size, | |
8fef55a8 | 2547 | int min_yield) |
4c48ba06 MD |
2548 | { |
2549 | freelist->clusters = SCM_EOL; | |
2550 | freelist->cluster_size = cluster_size + 1; | |
b37fe1c5 MD |
2551 | freelist->left_to_collect = 0; |
2552 | freelist->clusters_allocated = 0; | |
8fef55a8 MD |
2553 | freelist->min_yield = 0; |
2554 | freelist->min_yield_fraction = min_yield; | |
4c48ba06 MD |
2555 | freelist->span = span; |
2556 | freelist->collected = 0; | |
1811ebce | 2557 | freelist->collected_1 = 0; |
4c48ba06 MD |
2558 | freelist->heap_size = 0; |
2559 | } | |
2560 | ||
85db4a2c DH |
2561 | |
2562 | /* Get an integer from an environment variable. */ | |
2563 | static int | |
2564 | scm_i_getenv_int (const char *var, int def) | |
2565 | { | |
2566 | char *end, *val = getenv (var); | |
2567 | long res; | |
2568 | if (!val) | |
2569 | return def; | |
2570 | res = strtol (val, &end, 10); | |
2571 | if (end == val) | |
2572 | return def; | |
2573 | return res; | |
2574 | } | |
2575 | ||
2576 | ||
4a4c9785 | 2577 | int |
85db4a2c | 2578 | scm_init_storage () |
0f2d19dd | 2579 | { |
85db4a2c DH |
2580 | scm_sizet gc_trigger_1; |
2581 | scm_sizet gc_trigger_2; | |
2582 | scm_sizet init_heap_size_1; | |
2583 | scm_sizet init_heap_size_2; | |
0f2d19dd JB |
2584 | scm_sizet j; |
2585 | ||
7c33806a DH |
2586 | #if (SCM_DEBUG_CELL_ACCESSES == 1) |
2587 | scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0); | |
2588 | #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */ | |
2589 | ||
0f2d19dd JB |
2590 | j = SCM_NUM_PROTECTS; |
2591 | while (j) | |
2592 | scm_sys_protects[--j] = SCM_BOOL_F; | |
2593 | scm_block_gc = 1; | |
4a4c9785 | 2594 | |
4a4c9785 | 2595 | scm_freelist = SCM_EOL; |
4c48ba06 | 2596 | scm_freelist2 = SCM_EOL; |
85db4a2c DH |
2597 | gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1); |
2598 | init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1); | |
2599 | gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2); | |
2600 | init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2); | |
2601 | scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size); | |
4a4c9785 | 2602 | |
0f2d19dd JB |
2603 | scm_expmem = 0; |
2604 | ||
2605 | j = SCM_HEAP_SEG_SIZE; | |
2606 | scm_mtrigger = SCM_INIT_MALLOC_LIMIT; | |
a00c95d9 ML |
2607 | scm_heap_table = ((scm_heap_seg_data_t *) |
2608 | scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims")); | |
b6efc951 | 2609 | heap_segment_table_size = 2; |
acb0a19c | 2610 | |
d6884e63 ML |
2611 | mark_space_ptr = &mark_space_head; |
2612 | ||
85db4a2c DH |
2613 | init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1); |
2614 | init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2); | |
4c48ba06 MD |
2615 | if (make_initial_segment (init_heap_size_1, &scm_master_freelist) || |
2616 | make_initial_segment (init_heap_size_2, &scm_master_freelist2)) | |
4a4c9785 | 2617 | return 1; |
acb0a19c | 2618 | |
801cb5e7 | 2619 | /* scm_hplims[0] can change. do not remove scm_heap_org */ |
a00c95d9 | 2620 | scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1); |
acb0a19c | 2621 | |
801cb5e7 MD |
2622 | scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL); |
2623 | scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL); | |
2624 | scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL); | |
2625 | scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL); | |
2626 | scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL); | |
0f2d19dd JB |
2627 | |
2628 | /* Initialise the list of ports. */ | |
840ae05d JB |
2629 | scm_port_table = (scm_port **) |
2630 | malloc (sizeof (scm_port *) * scm_port_table_room); | |
0f2d19dd JB |
2631 | if (!scm_port_table) |
2632 | return 1; | |
2633 | ||
a18bcd0e | 2634 | #ifdef HAVE_ATEXIT |
c45acc34 | 2635 | atexit (cleanup); |
e52ceaac MD |
2636 | #else |
2637 | #ifdef HAVE_ON_EXIT | |
2638 | on_exit (cleanup, 0); | |
2639 | #endif | |
a18bcd0e | 2640 | #endif |
0f2d19dd | 2641 | |
8960e0a0 | 2642 | scm_stand_in_procs = SCM_EOL; |
0f2d19dd | 2643 | scm_permobjs = SCM_EOL; |
00ffa0e7 | 2644 | scm_protects = scm_c_make_hash_table (31); |
d6884e63 | 2645 | |
0f2d19dd JB |
2646 | return 0; |
2647 | } | |
939794ce | 2648 | |
0f2d19dd JB |
2649 | \f |
2650 | ||
939794ce DH |
2651 | SCM scm_after_gc_hook; |
2652 | ||
939794ce DH |
2653 | static SCM gc_async; |
2654 | ||
939794ce DH |
2655 | /* The function gc_async_thunk causes the execution of the after-gc-hook. It |
2656 | * is run after the gc, as soon as the asynchronous events are handled by the | |
2657 | * evaluator. | |
2658 | */ | |
2659 | static SCM | |
2660 | gc_async_thunk (void) | |
2661 | { | |
2662 | scm_c_run_hook (scm_after_gc_hook, SCM_EOL); | |
939794ce DH |
2663 | return SCM_UNSPECIFIED; |
2664 | } | |
2665 | ||
2666 | ||
2667 | /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of | |
2668 | * the garbage collection. The only purpose of this function is to mark the | |
2669 | * gc_async (which will eventually lead to the execution of the | |
2670 | * gc_async_thunk). | |
2671 | */ | |
2672 | static void * | |
2673 | mark_gc_async (void * hook_data, void *func_data, void *data) | |
2674 | { | |
2675 | scm_system_async_mark (gc_async); | |
2676 | return NULL; | |
2677 | } | |
2678 | ||
2679 | ||
0f2d19dd JB |
2680 | void |
2681 | scm_init_gc () | |
0f2d19dd | 2682 | { |
939794ce DH |
2683 | SCM after_gc_thunk; |
2684 | ||
56e55ac7 | 2685 | /* Dirk:FIXME:: scm_create_hook is strange. */ |
801cb5e7 | 2686 | scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0); |
939794ce | 2687 | |
9a441ddb MV |
2688 | after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0, |
2689 | gc_async_thunk); | |
23670993 | 2690 | gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */ |
939794ce DH |
2691 | |
2692 | scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0); | |
2693 | ||
8dc9439f | 2694 | #ifndef SCM_MAGIC_SNARFER |
a0599745 | 2695 | #include "libguile/gc.x" |
8dc9439f | 2696 | #endif |
0f2d19dd | 2697 | } |
89e00824 | 2698 | |
56495472 ML |
2699 | #endif /*MARK_DEPENDENCIES*/ |
2700 | ||
89e00824 ML |
2701 | /* |
2702 | Local Variables: | |
2703 | c-file-style: "gnu" | |
2704 | End: | |
2705 | */ |