Commit | Line | Data |
---|---|---|
22a52da1 | 1 | /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc. |
a00c95d9 | 2 | * |
0f2d19dd JB |
3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License as published by | |
5 | * the Free Software Foundation; either version 2, or (at your option) | |
6 | * any later version. | |
a00c95d9 | 7 | * |
0f2d19dd JB |
8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
a00c95d9 | 12 | * |
0f2d19dd JB |
13 | * You should have received a copy of the GNU General Public License |
14 | * along with this software; see the file COPYING. If not, write to | |
82892bed JB |
15 | * the Free Software Foundation, Inc., 59 Temple Place, Suite 330, |
16 | * Boston, MA 02111-1307 USA | |
0f2d19dd JB |
17 | * |
18 | * As a special exception, the Free Software Foundation gives permission | |
19 | * for additional uses of the text contained in its release of GUILE. | |
20 | * | |
21 | * The exception is that, if you link the GUILE library with other files | |
22 | * to produce an executable, this does not by itself cause the | |
23 | * resulting executable to be covered by the GNU General Public License. | |
24 | * Your use of that executable is in no way restricted on account of | |
25 | * linking the GUILE library code into it. | |
26 | * | |
27 | * This exception does not however invalidate any other reasons why | |
28 | * the executable file might be covered by the GNU General Public License. | |
29 | * | |
30 | * This exception applies only to the code released by the | |
31 | * Free Software Foundation under the name GUILE. If you copy | |
32 | * code from other Free Software Foundation releases into a copy of | |
33 | * GUILE, as the General Public License permits, the exception does | |
34 | * not apply to the code that you add in this way. To avoid misleading | |
35 | * anyone as to the status of such modified files, you must delete | |
36 | * this exception notice from them. | |
37 | * | |
38 | * If you write modifications of your own for GUILE, it is your choice | |
39 | * whether to permit this exception to apply to your modifications. | |
82892bed | 40 | * If you do not wish that, delete this exception notice. */ |
1bbd0b84 | 41 | |
1bbd0b84 | 42 | |
37ddcaf6 MD |
43 | /* #define DEBUGINFO */ |
44 | ||
56495472 ML |
45 | /* SECTION: This code is compiled once. |
46 | */ | |
47 | ||
48 | #ifndef MARK_DEPENDENCIES | |
49 | ||
0f2d19dd JB |
50 | \f |
51 | #include <stdio.h> | |
e6e2e95a | 52 | #include <errno.h> |
783e7774 | 53 | #include <string.h> |
e6e2e95a | 54 | |
d9189652 RB |
55 | #ifdef __ia64__ |
56 | #include <ucontext.h> | |
bb1180ef | 57 | extern unsigned long * __libc_ia64_register_backing_store_base; |
d9189652 RB |
58 | #endif |
59 | ||
a0599745 | 60 | #include "libguile/_scm.h" |
0a7a7445 | 61 | #include "libguile/eval.h" |
a0599745 MD |
62 | #include "libguile/stime.h" |
63 | #include "libguile/stackchk.h" | |
64 | #include "libguile/struct.h" | |
a0599745 MD |
65 | #include "libguile/smob.h" |
66 | #include "libguile/unif.h" | |
67 | #include "libguile/async.h" | |
68 | #include "libguile/ports.h" | |
69 | #include "libguile/root.h" | |
70 | #include "libguile/strings.h" | |
71 | #include "libguile/vectors.h" | |
801cb5e7 | 72 | #include "libguile/weaks.h" |
686765af | 73 | #include "libguile/hashtab.h" |
ecf470a2 | 74 | #include "libguile/tags.h" |
a0599745 MD |
75 | |
76 | #include "libguile/validate.h" | |
1be6b49c | 77 | #include "libguile/deprecation.h" |
a0599745 | 78 | #include "libguile/gc.h" |
fce59c93 | 79 | |
bc9d9bb2 | 80 | #ifdef GUILE_DEBUG_MALLOC |
a0599745 | 81 | #include "libguile/debug-malloc.h" |
bc9d9bb2 MD |
82 | #endif |
83 | ||
0f2d19dd | 84 | #ifdef HAVE_MALLOC_H |
95b88819 | 85 | #include <malloc.h> |
0f2d19dd JB |
86 | #endif |
87 | ||
88 | #ifdef HAVE_UNISTD_H | |
95b88819 | 89 | #include <unistd.h> |
0f2d19dd JB |
90 | #endif |
91 | ||
1cc91f1b JB |
92 | #ifdef __STDC__ |
93 | #include <stdarg.h> | |
94 | #define var_start(x, y) va_start(x, y) | |
95 | #else | |
96 | #include <varargs.h> | |
97 | #define var_start(x, y) va_start(x) | |
98 | #endif | |
99 | ||
0f2d19dd | 100 | \f |
406c7d90 | 101 | |
8c494e99 DH |
102 | #define CELL_P(x) (SCM_ITAG3 (x) == scm_tc3_cons) |
103 | ||
406c7d90 DH |
104 | unsigned int scm_gc_running_p = 0; |
105 | ||
106 | \f | |
107 | ||
108 | #if (SCM_DEBUG_CELL_ACCESSES == 1) | |
109 | ||
eae33935 | 110 | /* Set this to != 0 if every cell that is accessed shall be checked: |
61045190 DH |
111 | */ |
112 | unsigned int scm_debug_cell_accesses_p = 1; | |
406c7d90 | 113 | |
e81d98ec DH |
114 | /* Set this to 0 if no additional gc's shall be performed, otherwise set it to |
115 | * the number of cell accesses after which a gc shall be called. | |
116 | */ | |
117 | static unsigned int debug_cells_gc_interval = 0; | |
118 | ||
406c7d90 DH |
119 | |
120 | /* Assert that the given object is a valid reference to a valid cell. This | |
121 | * test involves to determine whether the object is a cell pointer, whether | |
122 | * this pointer actually points into a heap segment and whether the cell | |
e81d98ec DH |
123 | * pointed to is not a free cell. Further, additional garbage collections may |
124 | * get executed after a user defined number of cell accesses. This helps to | |
125 | * find places in the C code where references are dropped for extremely short | |
126 | * periods. | |
406c7d90 DH |
127 | */ |
128 | void | |
129 | scm_assert_cell_valid (SCM cell) | |
130 | { | |
61045190 DH |
131 | static unsigned int already_running = 0; |
132 | ||
133 | if (scm_debug_cell_accesses_p && !already_running) | |
406c7d90 | 134 | { |
61045190 | 135 | already_running = 1; /* set to avoid recursion */ |
406c7d90 | 136 | |
9d47a1e6 | 137 | if (!scm_cellp (cell)) |
406c7d90 | 138 | { |
1be6b49c ML |
139 | fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lux\n", |
140 | (unsigned long) SCM_UNPACK (cell)); | |
406c7d90 DH |
141 | abort (); |
142 | } | |
143 | else if (!scm_gc_running_p) | |
144 | { | |
145 | /* Dirk::FIXME:: During garbage collection there occur references to | |
146 | free cells. This is allright during conservative marking, but | |
147 | should not happen otherwise (I think). The case of free cells | |
148 | accessed during conservative marking is handled in function | |
149 | scm_mark_locations. However, there still occur accesses to free | |
150 | cells during gc. I don't understand why this happens. If it is | |
151 | a bug and gets fixed, the following test should also work while | |
152 | gc is running. | |
153 | */ | |
154 | if (SCM_FREE_CELL_P (cell)) | |
155 | { | |
1be6b49c ML |
156 | fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lux\n", |
157 | (unsigned long) SCM_UNPACK (cell)); | |
406c7d90 DH |
158 | abort (); |
159 | } | |
e81d98ec DH |
160 | |
161 | /* If desired, perform additional garbage collections after a user | |
162 | * defined number of cell accesses. | |
163 | */ | |
164 | if (debug_cells_gc_interval) | |
165 | { | |
166 | static unsigned int counter = 0; | |
eae33935 | 167 | |
e81d98ec DH |
168 | if (counter != 0) |
169 | { | |
170 | --counter; | |
171 | } | |
172 | else | |
173 | { | |
174 | counter = debug_cells_gc_interval; | |
175 | scm_igc ("scm_assert_cell_valid"); | |
176 | } | |
177 | } | |
406c7d90 | 178 | } |
61045190 | 179 | already_running = 0; /* re-enable */ |
406c7d90 DH |
180 | } |
181 | } | |
182 | ||
183 | ||
184 | SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0, | |
185 | (SCM flag), | |
1e6808ea | 186 | "If @var{flag} is @code{#f}, cell access checking is disabled.\n" |
e81d98ec DH |
187 | "If @var{flag} is @code{#t}, cell access checking is enabled,\n" |
188 | "but no additional calls to garbage collection are issued.\n" | |
189 | "If @var{flag} is a number, cell access checking is enabled,\n" | |
190 | "with an additional garbage collection after the given\n" | |
191 | "number of cell accesses.\n" | |
1e6808ea MG |
192 | "This procedure only exists when the compile-time flag\n" |
193 | "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.") | |
406c7d90 DH |
194 | #define FUNC_NAME s_scm_set_debug_cell_accesses_x |
195 | { | |
196 | if (SCM_FALSEP (flag)) { | |
197 | scm_debug_cell_accesses_p = 0; | |
198 | } else if (SCM_EQ_P (flag, SCM_BOOL_T)) { | |
e81d98ec DH |
199 | debug_cells_gc_interval = 0; |
200 | scm_debug_cell_accesses_p = 1; | |
201 | } else if (SCM_INUMP (flag)) { | |
202 | long int f = SCM_INUM (flag); | |
203 | if (f <= 0) SCM_OUT_OF_RANGE (1, flag); | |
204 | debug_cells_gc_interval = f; | |
406c7d90 DH |
205 | scm_debug_cell_accesses_p = 1; |
206 | } else { | |
207 | SCM_WRONG_TYPE_ARG (1, flag); | |
208 | } | |
209 | return SCM_UNSPECIFIED; | |
210 | } | |
211 | #undef FUNC_NAME | |
212 | ||
213 | #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */ | |
214 | ||
215 | \f | |
216 | ||
0f2d19dd | 217 | /* {heap tuning parameters} |
a00c95d9 | 218 | * |
0f2d19dd JB |
219 | * These are parameters for controlling memory allocation. The heap |
220 | * is the area out of which scm_cons, and object headers are allocated. | |
221 | * | |
222 | * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a | |
223 | * 64 bit machine. The units of the _SIZE parameters are bytes. | |
224 | * Cons pairs and object headers occupy one heap cell. | |
225 | * | |
226 | * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is | |
227 | * allocated initially the heap will grow by half its current size | |
228 | * each subsequent time more heap is needed. | |
229 | * | |
230 | * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE | |
231 | * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more | |
1be6b49c | 232 | * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code |
0f2d19dd | 233 | * is in scm_init_storage() and alloc_some_heap() in sys.c |
a00c95d9 | 234 | * |
0f2d19dd JB |
235 | * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by |
236 | * SCM_EXPHEAP(scm_heap_size) when more heap is needed. | |
237 | * | |
238 | * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap | |
239 | * is needed. | |
240 | * | |
241 | * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will | |
a00c95d9 | 242 | * trigger a GC. |
6064dcc6 | 243 | * |
4c9419ac MV |
244 | * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must |
245 | * be reclaimed by a GC triggered by a malloc. If less than this is | |
6064dcc6 MV |
246 | * reclaimed, the trigger threshold is raised. [I don't know what a |
247 | * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to | |
a00c95d9 | 248 | * work around a oscillation that caused almost constant GC.] |
0f2d19dd JB |
249 | */ |
250 | ||
8fef55a8 MD |
251 | /* |
252 | * Heap size 45000 and 40% min yield gives quick startup and no extra | |
253 | * heap allocation. Having higher values on min yield may lead to | |
254 | * large heaps, especially if code behaviour is varying its | |
255 | * maximum consumption between different freelists. | |
256 | */ | |
d6884e63 ML |
257 | |
258 | #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS) | |
259 | #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L) | |
260 | #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS) | |
1be6b49c | 261 | size_t scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1) |
d6884e63 | 262 | / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE); |
aeacfc8f | 263 | int scm_default_min_yield_1 = 40; |
4c48ba06 | 264 | |
d6884e63 | 265 | #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2)) |
1be6b49c | 266 | size_t scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1) |
d6884e63 | 267 | / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE); |
4c48ba06 MD |
268 | /* The following value may seem large, but note that if we get to GC at |
269 | * all, this means that we have a numerically intensive application | |
270 | */ | |
aeacfc8f | 271 | int scm_default_min_yield_2 = 40; |
4c48ba06 | 272 | |
1be6b49c | 273 | size_t scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */ |
4c48ba06 | 274 | |
d6884e63 | 275 | #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE) |
0f2d19dd JB |
276 | #ifdef _QC |
277 | # define SCM_HEAP_SEG_SIZE 32768L | |
278 | #else | |
279 | # ifdef sequent | |
4c48ba06 | 280 | # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell)) |
0f2d19dd | 281 | # else |
4c48ba06 | 282 | # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell)) |
0f2d19dd JB |
283 | # endif |
284 | #endif | |
4c48ba06 | 285 | /* Make heap grow with factor 1.5 */ |
4a4c9785 | 286 | #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2) |
0f2d19dd | 287 | #define SCM_INIT_MALLOC_LIMIT 100000 |
6064dcc6 | 288 | #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10) |
0f2d19dd | 289 | |
d6884e63 ML |
290 | /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span) |
291 | aligned inner bounds for allocated storage */ | |
0f2d19dd JB |
292 | |
293 | #ifdef PROT386 | |
294 | /*in 386 protected mode we must only adjust the offset */ | |
a00c95d9 ML |
295 | # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1)) |
296 | # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p)) | |
0f2d19dd JB |
297 | #else |
298 | # ifdef _UNICOS | |
c014a02e ML |
299 | # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span))) |
300 | # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p)) | |
0f2d19dd | 301 | # else |
c014a02e ML |
302 | # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L)) |
303 | # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p)) | |
0f2d19dd JB |
304 | # endif /* UNICOS */ |
305 | #endif /* PROT386 */ | |
306 | ||
ecf470a2 ML |
307 | #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0) |
308 | ||
d6884e63 ML |
309 | #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1) |
310 | #define CLUSTER_SIZE_IN_BYTES(freelist) \ | |
311 | (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE) | |
0f2d19dd JB |
312 | |
313 | \f | |
945fec60 | 314 | /* scm_freelists |
0f2d19dd | 315 | */ |
945fec60 | 316 | |
92c2555f | 317 | typedef struct scm_t_freelist { |
a00c95d9 ML |
318 | /* collected cells */ |
319 | SCM cells; | |
a00c95d9 ML |
320 | /* number of cells left to collect before cluster is full */ |
321 | unsigned int left_to_collect; | |
b37fe1c5 MD |
322 | /* number of clusters which have been allocated */ |
323 | unsigned int clusters_allocated; | |
8fef55a8 MD |
324 | /* a list of freelists, each of size cluster_size, |
325 | * except the last one which may be shorter | |
326 | */ | |
a00c95d9 ML |
327 | SCM clusters; |
328 | SCM *clustertail; | |
b37fe1c5 | 329 | /* this is the number of objects in each cluster, including the spine cell */ |
1be6b49c | 330 | unsigned int cluster_size; |
8fef55a8 | 331 | /* indicates that we should grow heap instead of GC:ing |
a00c95d9 ML |
332 | */ |
333 | int grow_heap_p; | |
8fef55a8 | 334 | /* minimum yield on this list in order not to grow the heap |
a00c95d9 | 335 | */ |
8fef55a8 MD |
336 | long min_yield; |
337 | /* defines min_yield as percent of total heap size | |
a00c95d9 | 338 | */ |
8fef55a8 | 339 | int min_yield_fraction; |
a00c95d9 ML |
340 | /* number of cells per object on this list */ |
341 | int span; | |
342 | /* number of collected cells during last GC */ | |
c014a02e | 343 | unsigned long collected; |
1811ebce | 344 | /* number of collected cells during penultimate GC */ |
c014a02e | 345 | unsigned long collected_1; |
a00c95d9 ML |
346 | /* total number of cells in heap segments |
347 | * belonging to this list. | |
348 | */ | |
c014a02e | 349 | unsigned long heap_size; |
92c2555f | 350 | } scm_t_freelist; |
a00c95d9 | 351 | |
4a4c9785 | 352 | SCM scm_freelist = SCM_EOL; |
92c2555f | 353 | scm_t_freelist scm_master_freelist = { |
729dbac3 | 354 | SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0 |
4a4c9785 MD |
355 | }; |
356 | SCM scm_freelist2 = SCM_EOL; | |
92c2555f | 357 | scm_t_freelist scm_master_freelist2 = { |
729dbac3 | 358 | SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0 |
4a4c9785 | 359 | }; |
0f2d19dd JB |
360 | |
361 | /* scm_mtrigger | |
362 | * is the number of bytes of must_malloc allocation needed to trigger gc. | |
363 | */ | |
c014a02e | 364 | unsigned long scm_mtrigger; |
0f2d19dd | 365 | |
0f2d19dd JB |
366 | /* scm_gc_heap_lock |
367 | * If set, don't expand the heap. Set only during gc, during which no allocation | |
368 | * is supposed to take place anyway. | |
369 | */ | |
370 | int scm_gc_heap_lock = 0; | |
371 | ||
372 | /* GC Blocking | |
373 | * Don't pause for collection if this is set -- just | |
374 | * expand the heap. | |
375 | */ | |
0f2d19dd JB |
376 | int scm_block_gc = 1; |
377 | ||
0f2d19dd JB |
378 | /* During collection, this accumulates objects holding |
379 | * weak references. | |
380 | */ | |
ab4bef85 | 381 | SCM scm_weak_vectors; |
0f2d19dd | 382 | |
7445e0e8 MD |
383 | /* During collection, this accumulates structures which are to be freed. |
384 | */ | |
385 | SCM scm_structs_to_free; | |
386 | ||
0f2d19dd JB |
387 | /* GC Statistics Keeping |
388 | */ | |
c014a02e ML |
389 | unsigned long scm_cells_allocated = 0; |
390 | unsigned long scm_mallocated = 0; | |
391 | unsigned long scm_gc_cells_collected; | |
392 | unsigned long scm_gc_yield; | |
393 | static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */ | |
394 | unsigned long scm_gc_malloc_collected; | |
395 | unsigned long scm_gc_ports_collected; | |
0f2d19dd | 396 | unsigned long scm_gc_time_taken = 0; |
c014a02e ML |
397 | static unsigned long t_before_gc; |
398 | static unsigned long t_before_sweep; | |
c9b0d4b0 ML |
399 | unsigned long scm_gc_mark_time_taken = 0; |
400 | unsigned long scm_gc_sweep_time_taken = 0; | |
c014a02e ML |
401 | unsigned long scm_gc_times = 0; |
402 | unsigned long scm_gc_cells_swept = 0; | |
c9b0d4b0 ML |
403 | double scm_gc_cells_marked_acc = 0.; |
404 | double scm_gc_cells_swept_acc = 0.; | |
0f2d19dd JB |
405 | |
406 | SCM_SYMBOL (sym_cells_allocated, "cells-allocated"); | |
407 | SCM_SYMBOL (sym_heap_size, "cell-heap-size"); | |
408 | SCM_SYMBOL (sym_mallocated, "bytes-malloced"); | |
409 | SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold"); | |
410 | SCM_SYMBOL (sym_heap_segments, "cell-heap-segments"); | |
411 | SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken"); | |
c9b0d4b0 ML |
412 | SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken"); |
413 | SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken"); | |
414 | SCM_SYMBOL (sym_times, "gc-times"); | |
415 | SCM_SYMBOL (sym_cells_marked, "cells-marked"); | |
416 | SCM_SYMBOL (sym_cells_swept, "cells-swept"); | |
0f2d19dd | 417 | |
92c2555f | 418 | typedef struct scm_t_heap_seg_data |
0f2d19dd | 419 | { |
cf2d30f6 JB |
420 | /* lower and upper bounds of the segment */ |
421 | SCM_CELLPTR bounds[2]; | |
422 | ||
423 | /* address of the head-of-freelist pointer for this segment's cells. | |
424 | All segments usually point to the same one, scm_freelist. */ | |
92c2555f | 425 | scm_t_freelist *freelist; |
cf2d30f6 | 426 | |
fe517a7d | 427 | /* number of cells per object in this segment */ |
945fec60 | 428 | int span; |
92c2555f | 429 | } scm_t_heap_seg_data; |
0f2d19dd JB |
430 | |
431 | ||
432 | ||
92c2555f | 433 | static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *); |
b6efc951 DH |
434 | |
435 | typedef enum { return_on_error, abort_on_error } policy_on_error; | |
92c2555f | 436 | static void alloc_some_heap (scm_t_freelist *, policy_on_error); |
0f2d19dd JB |
437 | |
438 | ||
d6884e63 ML |
439 | #define SCM_HEAP_SIZE \ |
440 | (scm_master_freelist.heap_size + scm_master_freelist2.heap_size) | |
441 | #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B)) | |
442 | ||
443 | #define BVEC_GROW_SIZE 256 | |
444 | #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE) | |
92c2555f | 445 | #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb)) |
d6884e63 ML |
446 | |
447 | /* mark space allocation */ | |
448 | ||
92c2555f | 449 | typedef struct scm_t_mark_space |
d6884e63 | 450 | { |
92c2555f MV |
451 | scm_t_c_bvec_limb *bvec_space; |
452 | struct scm_t_mark_space *next; | |
453 | } scm_t_mark_space; | |
d6884e63 | 454 | |
92c2555f MV |
455 | static scm_t_mark_space *current_mark_space; |
456 | static scm_t_mark_space **mark_space_ptr; | |
1be6b49c | 457 | static ptrdiff_t current_mark_space_offset; |
92c2555f | 458 | static scm_t_mark_space *mark_space_head; |
d6884e63 | 459 | |
92c2555f | 460 | static scm_t_c_bvec_limb * |
d6884e63 | 461 | get_bvec () |
db4b4ca6 | 462 | #define FUNC_NAME "get_bvec" |
d6884e63 | 463 | { |
92c2555f | 464 | scm_t_c_bvec_limb *res; |
d6884e63 ML |
465 | |
466 | if (!current_mark_space) | |
467 | { | |
92c2555f | 468 | SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space))); |
d6884e63 | 469 | if (!current_mark_space) |
db4b4ca6 | 470 | SCM_MISC_ERROR ("could not grow heap", SCM_EOL); |
d6884e63 ML |
471 | |
472 | current_mark_space->bvec_space = NULL; | |
473 | current_mark_space->next = NULL; | |
474 | ||
475 | *mark_space_ptr = current_mark_space; | |
476 | mark_space_ptr = &(current_mark_space->next); | |
477 | ||
478 | return get_bvec (); | |
479 | } | |
480 | ||
481 | if (!(current_mark_space->bvec_space)) | |
482 | { | |
483 | SCM_SYSCALL (current_mark_space->bvec_space = | |
92c2555f | 484 | (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1)); |
d6884e63 | 485 | if (!(current_mark_space->bvec_space)) |
db4b4ca6 | 486 | SCM_MISC_ERROR ("could not grow heap", SCM_EOL); |
d6884e63 ML |
487 | |
488 | current_mark_space_offset = 0; | |
489 | ||
490 | return get_bvec (); | |
491 | } | |
492 | ||
493 | if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS) | |
494 | { | |
495 | current_mark_space = NULL; | |
496 | ||
497 | return get_bvec (); | |
498 | } | |
499 | ||
500 | res = current_mark_space->bvec_space + current_mark_space_offset; | |
501 | current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS; | |
502 | ||
503 | return res; | |
504 | } | |
db4b4ca6 DH |
505 | #undef FUNC_NAME |
506 | ||
d6884e63 ML |
507 | |
508 | static void | |
509 | clear_mark_space () | |
510 | { | |
92c2555f | 511 | scm_t_mark_space *ms; |
d6884e63 ML |
512 | |
513 | for (ms = mark_space_head; ms; ms = ms->next) | |
514 | memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES); | |
515 | } | |
516 | ||
517 | ||
0f2d19dd | 518 | \f |
cf2d30f6 JB |
519 | /* Debugging functions. */ |
520 | ||
bb2c57fa | 521 | #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) |
cf2d30f6 | 522 | |
eae33935 TTN |
523 | static long int heap_segment (SCM obj); /* forw decl: non-debugging func */ |
524 | ||
8ded62a3 | 525 | static void |
92c2555f | 526 | map_free_list (scm_t_freelist *master, SCM freelist) |
8ded62a3 | 527 | { |
c014a02e | 528 | long last_seg = -1, count = 0; |
8ded62a3 | 529 | SCM f; |
a00c95d9 | 530 | |
3f5d82cd | 531 | for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f)) |
8ded62a3 | 532 | { |
592996c9 | 533 | long int this_seg = heap_segment (f); |
8ded62a3 | 534 | |
592996c9 DH |
535 | if (this_seg == -1) |
536 | { | |
eae33935 | 537 | fprintf (stderr, |
592996c9 | 538 | "map_free_list: can't find segment containing cell %lux\n", |
eae33935 | 539 | (unsigned long int) SCM_UNPACK (f)); |
592996c9 DH |
540 | abort (); |
541 | } | |
542 | else if (this_seg != last_seg) | |
8ded62a3 MD |
543 | { |
544 | if (last_seg != -1) | |
1be6b49c ML |
545 | fprintf (stderr, " %5ld %d-cells in segment %ld\n", |
546 | (long) count, master->span, (long) last_seg); | |
8ded62a3 MD |
547 | last_seg = this_seg; |
548 | count = 0; | |
549 | } | |
550 | count++; | |
551 | } | |
552 | if (last_seg != -1) | |
1be6b49c ML |
553 | fprintf (stderr, " %5ld %d-cells in segment %ld\n", |
554 | (long) count, master->span, (long) last_seg); | |
8ded62a3 | 555 | } |
cf2d30f6 | 556 | |
a00c95d9 | 557 | SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0, |
acb0a19c | 558 | (), |
5352393c MG |
559 | "Print debugging information about the free-list.\n" |
560 | "@code{map-free-list} is only included in\n" | |
561 | "@code{--enable-guile-debug} builds of Guile.") | |
acb0a19c MD |
562 | #define FUNC_NAME s_scm_map_free_list |
563 | { | |
592996c9 DH |
564 | size_t i; |
565 | ||
1be6b49c ML |
566 | fprintf (stderr, "%ld segments total (%d:%ld", |
567 | (long) scm_n_heap_segs, | |
4c48ba06 | 568 | scm_heap_table[0].span, |
1be6b49c | 569 | (long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0])); |
592996c9 DH |
570 | |
571 | for (i = 1; i != scm_n_heap_segs; i++) | |
1be6b49c | 572 | fprintf (stderr, ", %d:%ld", |
4c48ba06 | 573 | scm_heap_table[i].span, |
1be6b49c | 574 | (long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0])); |
4c48ba06 | 575 | fprintf (stderr, ")\n"); |
8ded62a3 MD |
576 | map_free_list (&scm_master_freelist, scm_freelist); |
577 | map_free_list (&scm_master_freelist2, scm_freelist2); | |
cf2d30f6 JB |
578 | fflush (stderr); |
579 | ||
580 | return SCM_UNSPECIFIED; | |
581 | } | |
1bbd0b84 | 582 | #undef FUNC_NAME |
cf2d30f6 | 583 | |
c014a02e ML |
584 | static long last_cluster; |
585 | static long last_size; | |
4c48ba06 | 586 | |
c014a02e ML |
587 | static long |
588 | free_list_length (char *title, long i, SCM freelist) | |
5384bc5b MD |
589 | { |
590 | SCM ls; | |
c014a02e | 591 | long n = 0; |
3f5d82cd DH |
592 | for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls)) |
593 | if (SCM_FREE_CELL_P (ls)) | |
5384bc5b MD |
594 | ++n; |
595 | else | |
596 | { | |
1be6b49c | 597 | fprintf (stderr, "bad cell in %s at position %ld\n", title, (long) n); |
5384bc5b MD |
598 | abort (); |
599 | } | |
4c48ba06 MD |
600 | if (n != last_size) |
601 | { | |
602 | if (i > 0) | |
603 | { | |
604 | if (last_cluster == i - 1) | |
1be6b49c | 605 | fprintf (stderr, "\t%ld\n", (long) last_size); |
4c48ba06 | 606 | else |
1be6b49c | 607 | fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size); |
4c48ba06 MD |
608 | } |
609 | if (i >= 0) | |
1be6b49c | 610 | fprintf (stderr, "%s %ld", title, (long) i); |
4c48ba06 | 611 | else |
1be6b49c | 612 | fprintf (stderr, "%s\t%ld\n", title, (long) n); |
4c48ba06 MD |
613 | last_cluster = i; |
614 | last_size = n; | |
615 | } | |
5384bc5b MD |
616 | return n; |
617 | } | |
618 | ||
619 | static void | |
92c2555f | 620 | free_list_lengths (char *title, scm_t_freelist *master, SCM freelist) |
5384bc5b MD |
621 | { |
622 | SCM clusters; | |
c014a02e | 623 | long i = 0, len, n = 0; |
5384bc5b MD |
624 | fprintf (stderr, "%s\n\n", title); |
625 | n += free_list_length ("free list", -1, freelist); | |
626 | for (clusters = master->clusters; | |
627 | SCM_NNULLP (clusters); | |
628 | clusters = SCM_CDR (clusters)) | |
4c48ba06 MD |
629 | { |
630 | len = free_list_length ("cluster", i++, SCM_CAR (clusters)); | |
631 | n += len; | |
632 | } | |
633 | if (last_cluster == i - 1) | |
1be6b49c | 634 | fprintf (stderr, "\t%ld\n", (long) last_size); |
4c48ba06 | 635 | else |
1be6b49c ML |
636 | fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size); |
637 | fprintf (stderr, "\ntotal %ld objects\n\n", (long) n); | |
5384bc5b MD |
638 | } |
639 | ||
a00c95d9 | 640 | SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0, |
5384bc5b | 641 | (), |
5352393c MG |
642 | "Print debugging information about the free-list.\n" |
643 | "@code{free-list-length} is only included in\n" | |
644 | "@code{--enable-guile-debug} builds of Guile.") | |
5384bc5b MD |
645 | #define FUNC_NAME s_scm_free_list_length |
646 | { | |
b37fe1c5 MD |
647 | free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist); |
648 | free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2); | |
12e5fb3b | 649 | return SCM_UNSPECIFIED; |
5384bc5b MD |
650 | } |
651 | #undef FUNC_NAME | |
652 | ||
eae33935 | 653 | #endif /* defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) */ |
bb2c57fa MD |
654 | |
655 | #ifdef GUILE_DEBUG_FREELIST | |
cf2d30f6 | 656 | |
d3dd80ab MG |
657 | /* Non-zero if freelist debugging is in effect. Set this via |
658 | `gc-set-debug-check-freelist!'. */ | |
659 | static int scm_debug_check_freelist = 0; | |
660 | ||
cf2d30f6 | 661 | /* Number of calls to SCM_NEWCELL since startup. */ |
c014a02e ML |
662 | static unsigned long scm_newcell_count; |
663 | static unsigned long scm_newcell2_count; | |
cf2d30f6 JB |
664 | |
665 | /* Search freelist for anything that isn't marked as a free cell. | |
666 | Abort if we find something. */ | |
8ded62a3 MD |
667 | static void |
668 | scm_check_freelist (SCM freelist) | |
669 | { | |
670 | SCM f; | |
c014a02e | 671 | long i = 0; |
8ded62a3 | 672 | |
3f5d82cd DH |
673 | for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++) |
674 | if (!SCM_FREE_CELL_P (f)) | |
8ded62a3 | 675 | { |
1be6b49c ML |
676 | fprintf (stderr, "Bad cell in freelist on newcell %lu: %lu'th elt\n", |
677 | (long) scm_newcell_count, (long) i); | |
8ded62a3 MD |
678 | abort (); |
679 | } | |
680 | } | |
cf2d30f6 | 681 | |
a00c95d9 | 682 | SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0, |
1bbd0b84 | 683 | (SCM flag), |
1e6808ea MG |
684 | "If @var{flag} is @code{#t}, check the freelist for consistency\n" |
685 | "on each cell allocation. This procedure only exists when the\n" | |
686 | "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.") | |
1bbd0b84 | 687 | #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x |
25748c78 | 688 | { |
d6884e63 ML |
689 | /* [cmm] I did a double-take when I read this code the first time. |
690 | well, FWIW. */ | |
945fec60 | 691 | SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist); |
25748c78 GB |
692 | return SCM_UNSPECIFIED; |
693 | } | |
1bbd0b84 | 694 | #undef FUNC_NAME |
25748c78 | 695 | |
fca7547b | 696 | #endif /* GUILE_DEBUG_FREELIST */ |
cf2d30f6 JB |
697 | |
698 | \f | |
0f2d19dd | 699 | |
c014a02e | 700 | static unsigned long |
92c2555f | 701 | master_cells_allocated (scm_t_freelist *master) |
b37fe1c5 | 702 | { |
d6884e63 | 703 | /* the '- 1' below is to ignore the cluster spine cells. */ |
c014a02e | 704 | long objects = master->clusters_allocated * (master->cluster_size - 1); |
b37fe1c5 MD |
705 | if (SCM_NULLP (master->clusters)) |
706 | objects -= master->left_to_collect; | |
707 | return master->span * objects; | |
708 | } | |
709 | ||
c014a02e | 710 | static unsigned long |
b37fe1c5 MD |
711 | freelist_length (SCM freelist) |
712 | { | |
c014a02e | 713 | long n; |
3f5d82cd | 714 | for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist)) |
b37fe1c5 MD |
715 | ++n; |
716 | return n; | |
717 | } | |
718 | ||
c014a02e | 719 | static unsigned long |
b37fe1c5 MD |
720 | compute_cells_allocated () |
721 | { | |
722 | return (scm_cells_allocated | |
723 | + master_cells_allocated (&scm_master_freelist) | |
724 | + master_cells_allocated (&scm_master_freelist2) | |
725 | - scm_master_freelist.span * freelist_length (scm_freelist) | |
726 | - scm_master_freelist2.span * freelist_length (scm_freelist2)); | |
727 | } | |
b37fe1c5 | 728 | |
0f2d19dd JB |
729 | /* {Scheme Interface to GC} |
730 | */ | |
731 | ||
a00c95d9 | 732 | SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0, |
1bbd0b84 | 733 | (), |
1e6808ea MG |
734 | "Return an association list of statistics about Guile's current\n" |
735 | "use of storage.") | |
1bbd0b84 | 736 | #define FUNC_NAME s_scm_gc_stats |
0f2d19dd | 737 | { |
c014a02e ML |
738 | long i; |
739 | long n; | |
0f2d19dd | 740 | SCM heap_segs; |
c014a02e ML |
741 | unsigned long int local_scm_mtrigger; |
742 | unsigned long int local_scm_mallocated; | |
743 | unsigned long int local_scm_heap_size; | |
744 | unsigned long int local_scm_cells_allocated; | |
745 | unsigned long int local_scm_gc_time_taken; | |
746 | unsigned long int local_scm_gc_times; | |
747 | unsigned long int local_scm_gc_mark_time_taken; | |
748 | unsigned long int local_scm_gc_sweep_time_taken; | |
c9b0d4b0 ML |
749 | double local_scm_gc_cells_swept; |
750 | double local_scm_gc_cells_marked; | |
0f2d19dd JB |
751 | SCM answer; |
752 | ||
753 | SCM_DEFER_INTS; | |
939794ce DH |
754 | |
755 | ++scm_block_gc; | |
756 | ||
0f2d19dd JB |
757 | retry: |
758 | heap_segs = SCM_EOL; | |
759 | n = scm_n_heap_segs; | |
760 | for (i = scm_n_heap_segs; i--; ) | |
c014a02e ML |
761 | heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]), |
762 | scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])), | |
0f2d19dd JB |
763 | heap_segs); |
764 | if (scm_n_heap_segs != n) | |
765 | goto retry; | |
939794ce DH |
766 | |
767 | --scm_block_gc; | |
0f2d19dd | 768 | |
7febb4a2 MD |
769 | /* Below, we cons to produce the resulting list. We want a snapshot of |
770 | * the heap situation before consing. | |
771 | */ | |
0f2d19dd JB |
772 | local_scm_mtrigger = scm_mtrigger; |
773 | local_scm_mallocated = scm_mallocated; | |
b37fe1c5 | 774 | local_scm_heap_size = SCM_HEAP_SIZE; |
b37fe1c5 | 775 | local_scm_cells_allocated = compute_cells_allocated (); |
0f2d19dd | 776 | local_scm_gc_time_taken = scm_gc_time_taken; |
c9b0d4b0 ML |
777 | local_scm_gc_mark_time_taken = scm_gc_mark_time_taken; |
778 | local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken; | |
779 | local_scm_gc_times = scm_gc_times; | |
780 | local_scm_gc_cells_swept = scm_gc_cells_swept_acc; | |
781 | local_scm_gc_cells_marked = scm_gc_cells_marked_acc; | |
0f2d19dd | 782 | |
1afff620 KN |
783 | answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)), |
784 | scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)), | |
785 | scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)), | |
786 | scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)), | |
787 | scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)), | |
788 | scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)), | |
789 | scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)), | |
790 | scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)), | |
791 | scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)), | |
792 | scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)), | |
793 | scm_cons (sym_heap_segments, heap_segs), | |
794 | SCM_UNDEFINED); | |
0f2d19dd JB |
795 | SCM_ALLOW_INTS; |
796 | return answer; | |
797 | } | |
1bbd0b84 | 798 | #undef FUNC_NAME |
0f2d19dd JB |
799 | |
800 | ||
c9b0d4b0 | 801 | static void |
e81d98ec | 802 | gc_start_stats (const char *what SCM_UNUSED) |
0f2d19dd | 803 | { |
c9b0d4b0 ML |
804 | t_before_gc = scm_c_get_internal_run_time (); |
805 | scm_gc_cells_swept = 0; | |
b37fe1c5 | 806 | scm_gc_cells_collected = 0; |
37ddcaf6 | 807 | scm_gc_yield_1 = scm_gc_yield; |
8b0d194f MD |
808 | scm_gc_yield = (scm_cells_allocated |
809 | + master_cells_allocated (&scm_master_freelist) | |
810 | + master_cells_allocated (&scm_master_freelist2)); | |
0f2d19dd JB |
811 | scm_gc_malloc_collected = 0; |
812 | scm_gc_ports_collected = 0; | |
813 | } | |
814 | ||
939794ce | 815 | |
c9b0d4b0 ML |
816 | static void |
817 | gc_end_stats () | |
0f2d19dd | 818 | { |
c9b0d4b0 ML |
819 | unsigned long t = scm_c_get_internal_run_time (); |
820 | scm_gc_time_taken += (t - t_before_gc); | |
821 | scm_gc_sweep_time_taken += (t - t_before_sweep); | |
822 | ++scm_gc_times; | |
823 | ||
824 | scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected; | |
825 | scm_gc_cells_swept_acc += scm_gc_cells_swept; | |
0f2d19dd JB |
826 | } |
827 | ||
828 | ||
a00c95d9 | 829 | SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0, |
1bbd0b84 | 830 | (SCM obj), |
b380b885 MD |
831 | "Return an integer that for the lifetime of @var{obj} is uniquely\n" |
832 | "returned by this function for @var{obj}") | |
1bbd0b84 | 833 | #define FUNC_NAME s_scm_object_address |
0f2d19dd | 834 | { |
c014a02e | 835 | return scm_ulong2num ((unsigned long) SCM_UNPACK (obj)); |
0f2d19dd | 836 | } |
1bbd0b84 | 837 | #undef FUNC_NAME |
0f2d19dd JB |
838 | |
839 | ||
a00c95d9 | 840 | SCM_DEFINE (scm_gc, "gc", 0, 0, 0, |
1bbd0b84 | 841 | (), |
b380b885 MD |
842 | "Scans all of SCM objects and reclaims for further use those that are\n" |
843 | "no longer accessible.") | |
1bbd0b84 | 844 | #define FUNC_NAME s_scm_gc |
0f2d19dd JB |
845 | { |
846 | SCM_DEFER_INTS; | |
847 | scm_igc ("call"); | |
848 | SCM_ALLOW_INTS; | |
849 | return SCM_UNSPECIFIED; | |
850 | } | |
1bbd0b84 | 851 | #undef FUNC_NAME |
0f2d19dd JB |
852 | |
853 | ||
854 | \f | |
855 | /* {C Interface For When GC is Triggered} | |
856 | */ | |
857 | ||
b37fe1c5 | 858 | static void |
92c2555f | 859 | adjust_min_yield (scm_t_freelist *freelist) |
b37fe1c5 | 860 | { |
8fef55a8 | 861 | /* min yield is adjusted upwards so that next predicted total yield |
bda1446c | 862 | * (allocated cells actually freed by GC) becomes |
8fef55a8 MD |
863 | * `min_yield_fraction' of total heap size. Note, however, that |
864 | * the absolute value of min_yield will correspond to `collected' | |
bda1446c | 865 | * on one master (the one which currently is triggering GC). |
b37fe1c5 | 866 | * |
bda1446c MD |
867 | * The reason why we look at total yield instead of cells collected |
868 | * on one list is that we want to take other freelists into account. | |
869 | * On this freelist, we know that (local) yield = collected cells, | |
870 | * but that's probably not the case on the other lists. | |
b37fe1c5 MD |
871 | * |
872 | * (We might consider computing a better prediction, for example | |
873 | * by computing an average over multiple GC:s.) | |
874 | */ | |
8fef55a8 | 875 | if (freelist->min_yield_fraction) |
b37fe1c5 | 876 | { |
37ddcaf6 | 877 | /* Pick largest of last two yields. */ |
1be6b49c | 878 | long delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100) |
8fef55a8 | 879 | - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield)); |
b37fe1c5 | 880 | #ifdef DEBUGINFO |
1be6b49c ML |
881 | fprintf (stderr, " after GC = %lu, delta = %ld\n", |
882 | (long) scm_cells_allocated, | |
883 | (long) delta); | |
b37fe1c5 MD |
884 | #endif |
885 | if (delta > 0) | |
8fef55a8 | 886 | freelist->min_yield += delta; |
b37fe1c5 MD |
887 | } |
888 | } | |
889 | ||
b6efc951 | 890 | |
4a4c9785 | 891 | /* When we get POSIX threads support, the master will be global and |
4c48ba06 MD |
892 | * common while the freelist will be individual for each thread. |
893 | */ | |
4a4c9785 MD |
894 | |
895 | SCM | |
92c2555f | 896 | scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist) |
4a4c9785 MD |
897 | { |
898 | SCM cell; | |
899 | ++scm_ints_disabled; | |
4c48ba06 MD |
900 | do |
901 | { | |
c7387918 | 902 | if (SCM_NULLP (master->clusters)) |
4c48ba06 | 903 | { |
150c200b | 904 | if (master->grow_heap_p || scm_block_gc) |
4c48ba06 | 905 | { |
b6efc951 DH |
906 | /* In order to reduce gc frequency, try to allocate a new heap |
907 | * segment first, even if gc might find some free cells. If we | |
908 | * can't obtain a new heap segment, we will try gc later. | |
909 | */ | |
4c48ba06 | 910 | master->grow_heap_p = 0; |
b6efc951 | 911 | alloc_some_heap (master, return_on_error); |
4c48ba06 | 912 | } |
b6efc951 | 913 | if (SCM_NULLP (master->clusters)) |
b37fe1c5 | 914 | { |
b6efc951 DH |
915 | /* The heap was not grown, either because it wasn't scheduled to |
916 | * grow, or because there was not enough memory available. In | |
917 | * both cases we have to try gc to get some free cells. | |
918 | */ | |
37ddcaf6 | 919 | #ifdef DEBUGINFO |
1be6b49c ML |
920 | fprintf (stderr, "allocated = %lu, ", |
921 | (long) (scm_cells_allocated | |
37ddcaf6 | 922 | + master_cells_allocated (&scm_master_freelist) |
c014a02e | 923 | + master_cells_allocated (&scm_master_freelist2))); |
37ddcaf6 | 924 | #endif |
b37fe1c5 | 925 | scm_igc ("cells"); |
8fef55a8 | 926 | adjust_min_yield (master); |
c7387918 DH |
927 | if (SCM_NULLP (master->clusters)) |
928 | { | |
b6efc951 DH |
929 | /* gc could not free any cells. Now, we _must_ allocate a |
930 | * new heap segment, because there is no other possibility | |
931 | * to provide a new cell for the caller. | |
932 | */ | |
933 | alloc_some_heap (master, abort_on_error); | |
c7387918 | 934 | } |
b37fe1c5 | 935 | } |
4c48ba06 MD |
936 | } |
937 | cell = SCM_CAR (master->clusters); | |
938 | master->clusters = SCM_CDR (master->clusters); | |
b37fe1c5 | 939 | ++master->clusters_allocated; |
4c48ba06 MD |
940 | } |
941 | while (SCM_NULLP (cell)); | |
d6884e63 ML |
942 | |
943 | #ifdef GUILE_DEBUG_FREELIST | |
944 | scm_check_freelist (cell); | |
945 | #endif | |
946 | ||
4a4c9785 | 947 | --scm_ints_disabled; |
3f5d82cd | 948 | *freelist = SCM_FREE_CELL_CDR (cell); |
4a4c9785 MD |
949 | return cell; |
950 | } | |
951 | ||
b6efc951 | 952 | |
4c48ba06 MD |
953 | #if 0 |
954 | /* This is a support routine which can be used to reserve a cluster | |
955 | * for some special use, such as debugging. It won't be useful until | |
956 | * free cells are preserved between garbage collections. | |
957 | */ | |
958 | ||
959 | void | |
92c2555f | 960 | scm_alloc_cluster (scm_t_freelist *master) |
4c48ba06 MD |
961 | { |
962 | SCM freelist, cell; | |
963 | cell = scm_gc_for_newcell (master, &freelist); | |
964 | SCM_SETCDR (cell, freelist); | |
965 | return cell; | |
966 | } | |
967 | #endif | |
968 | ||
801cb5e7 | 969 | |
92c2555f MV |
970 | scm_t_c_hook scm_before_gc_c_hook; |
971 | scm_t_c_hook scm_before_mark_c_hook; | |
972 | scm_t_c_hook scm_before_sweep_c_hook; | |
973 | scm_t_c_hook scm_after_sweep_c_hook; | |
974 | scm_t_c_hook scm_after_gc_c_hook; | |
801cb5e7 | 975 | |
d9189652 RB |
976 | #ifdef __ia64__ |
977 | # define SCM_MARK_BACKING_STORE() do { \ | |
978 | ucontext_t ctx; \ | |
979 | SCM_STACKITEM * top, * bot; \ | |
980 | getcontext (&ctx); \ | |
981 | scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \ | |
982 | ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \ | |
983 | / sizeof (SCM_STACKITEM))); \ | |
984 | bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \ | |
985 | top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \ | |
986 | scm_mark_locations (bot, top - bot); } while (0) | |
987 | #else | |
988 | # define SCM_MARK_BACKING_STORE() | |
989 | #endif | |
b6efc951 | 990 | |
0f2d19dd | 991 | void |
1bbd0b84 | 992 | scm_igc (const char *what) |
0f2d19dd | 993 | { |
c014a02e | 994 | long j; |
0f2d19dd | 995 | |
406c7d90 | 996 | ++scm_gc_running_p; |
801cb5e7 | 997 | scm_c_hook_run (&scm_before_gc_c_hook, 0); |
4c48ba06 MD |
998 | #ifdef DEBUGINFO |
999 | fprintf (stderr, | |
1000 | SCM_NULLP (scm_freelist) | |
1001 | ? "*" | |
1002 | : (SCM_NULLP (scm_freelist2) ? "o" : "m")); | |
1003 | #endif | |
42db06f0 | 1004 | /* During the critical section, only the current thread may run. */ |
216eedfc | 1005 | SCM_CRITICAL_SECTION_START; |
42db06f0 | 1006 | |
ab4bef85 JB |
1007 | if (!scm_stack_base || scm_block_gc) |
1008 | { | |
406c7d90 | 1009 | --scm_gc_running_p; |
ab4bef85 JB |
1010 | return; |
1011 | } | |
1012 | ||
c9b0d4b0 ML |
1013 | gc_start_stats (what); |
1014 | ||
ab4bef85 JB |
1015 | if (scm_gc_heap_lock) |
1016 | /* We've invoked the collector while a GC is already in progress. | |
1017 | That should never happen. */ | |
1018 | abort (); | |
0f2d19dd JB |
1019 | |
1020 | ++scm_gc_heap_lock; | |
ab4bef85 | 1021 | |
801cb5e7 MD |
1022 | scm_c_hook_run (&scm_before_mark_c_hook, 0); |
1023 | ||
d6884e63 ML |
1024 | clear_mark_space (); |
1025 | ||
42db06f0 | 1026 | #ifndef USE_THREADS |
a00c95d9 | 1027 | |
1b9be268 | 1028 | /* Mark objects on the C stack. */ |
0f2d19dd JB |
1029 | SCM_FLUSH_REGISTER_WINDOWS; |
1030 | /* This assumes that all registers are saved into the jmp_buf */ | |
1031 | setjmp (scm_save_regs_gc_mark); | |
1032 | scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark, | |
1be6b49c | 1033 | ( (size_t) (sizeof (SCM_STACKITEM) - 1 + |
ce4a361d JB |
1034 | sizeof scm_save_regs_gc_mark) |
1035 | / sizeof (SCM_STACKITEM))); | |
0f2d19dd JB |
1036 | |
1037 | { | |
6b1b030e | 1038 | unsigned long stack_len = scm_stack_size (scm_stack_base); |
0f2d19dd | 1039 | #ifdef SCM_STACK_GROWS_UP |
6ba93e5e | 1040 | scm_mark_locations (scm_stack_base, stack_len); |
0f2d19dd | 1041 | #else |
6ba93e5e | 1042 | scm_mark_locations (scm_stack_base - stack_len, stack_len); |
0f2d19dd JB |
1043 | #endif |
1044 | } | |
d9189652 | 1045 | SCM_MARK_BACKING_STORE(); |
0f2d19dd | 1046 | |
42db06f0 MD |
1047 | #else /* USE_THREADS */ |
1048 | ||
1049 | /* Mark every thread's stack and registers */ | |
945fec60 | 1050 | scm_threads_mark_stacks (); |
42db06f0 MD |
1051 | |
1052 | #endif /* USE_THREADS */ | |
0f2d19dd | 1053 | |
0f2d19dd JB |
1054 | j = SCM_NUM_PROTECTS; |
1055 | while (j--) | |
1056 | scm_gc_mark (scm_sys_protects[j]); | |
1057 | ||
6b1b030e ML |
1058 | /* mark the registered roots */ |
1059 | { | |
592996c9 | 1060 | size_t i; |
6b1b030e ML |
1061 | for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) { |
1062 | SCM l = SCM_VELTS (scm_gc_registered_roots)[i]; | |
592996c9 | 1063 | for (; !SCM_NULLP (l); l = SCM_CDR (l)) { |
6b1b030e ML |
1064 | SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL)); |
1065 | scm_gc_mark (*p); | |
1066 | } | |
1067 | } | |
1068 | } | |
1069 | ||
9de33deb MD |
1070 | /* FIXME: we should have a means to register C functions to be run |
1071 | * in different phases of GC | |
a00c95d9 | 1072 | */ |
9de33deb | 1073 | scm_mark_subr_table (); |
a00c95d9 | 1074 | |
42db06f0 MD |
1075 | #ifndef USE_THREADS |
1076 | scm_gc_mark (scm_root->handle); | |
1077 | #endif | |
a00c95d9 | 1078 | |
c9b0d4b0 ML |
1079 | t_before_sweep = scm_c_get_internal_run_time (); |
1080 | scm_gc_mark_time_taken += (t_before_sweep - t_before_gc); | |
1081 | ||
801cb5e7 | 1082 | scm_c_hook_run (&scm_before_sweep_c_hook, 0); |
0493cd89 | 1083 | |
0f2d19dd JB |
1084 | scm_gc_sweep (); |
1085 | ||
801cb5e7 MD |
1086 | scm_c_hook_run (&scm_after_sweep_c_hook, 0); |
1087 | ||
0f2d19dd | 1088 | --scm_gc_heap_lock; |
c9b0d4b0 | 1089 | gc_end_stats (); |
42db06f0 | 1090 | |
216eedfc | 1091 | SCM_CRITICAL_SECTION_END; |
801cb5e7 | 1092 | scm_c_hook_run (&scm_after_gc_c_hook, 0); |
406c7d90 | 1093 | --scm_gc_running_p; |
0f2d19dd JB |
1094 | } |
1095 | ||
1096 | \f | |
939794ce | 1097 | |
a00c95d9 | 1098 | /* {Mark/Sweep} |
0f2d19dd JB |
1099 | */ |
1100 | ||
56495472 ML |
1101 | #define MARK scm_gc_mark |
1102 | #define FNAME "scm_gc_mark" | |
0f2d19dd | 1103 | |
56495472 | 1104 | #endif /*!MARK_DEPENDENCIES*/ |
0f2d19dd JB |
1105 | |
1106 | /* Mark an object precisely. | |
1107 | */ | |
a00c95d9 | 1108 | void |
56495472 ML |
1109 | MARK (SCM p) |
1110 | #define FUNC_NAME FNAME | |
0f2d19dd | 1111 | { |
c014a02e | 1112 | register long i; |
0f2d19dd | 1113 | register SCM ptr; |
92c2555f | 1114 | scm_t_bits cell_type; |
0f2d19dd | 1115 | |
56495472 ML |
1116 | #ifndef MARK_DEPENDENCIES |
1117 | # define RECURSE scm_gc_mark | |
1118 | #else | |
1119 | /* go through the usual marking, but not for self-cycles. */ | |
1120 | # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0) | |
eae33935 | 1121 | #endif |
0f2d19dd JB |
1122 | ptr = p; |
1123 | ||
56495472 ML |
1124 | #ifdef MARK_DEPENDENCIES |
1125 | goto gc_mark_loop_first_time; | |
1126 | #endif | |
1127 | ||
86d31dfe MV |
1128 | /* A simple hack for debugging. Chose the second branch to get a |
1129 | meaningful backtrace for crashes inside the GC. | |
1130 | */ | |
1131 | #if 1 | |
1132 | #define goto_gc_mark_loop goto gc_mark_loop | |
1133 | #define goto_gc_mark_nimp goto gc_mark_nimp | |
1134 | #else | |
1135 | #define goto_gc_mark_loop RECURSE(ptr); return | |
1136 | #define goto_gc_mark_nimp RECURSE(ptr); return | |
1137 | #endif | |
1138 | ||
0f2d19dd JB |
1139 | gc_mark_loop: |
1140 | if (SCM_IMP (ptr)) | |
1141 | return; | |
1142 | ||
1143 | gc_mark_nimp: | |
eae33935 | 1144 | |
56495472 | 1145 | #ifdef MARK_DEPENDENCIES |
0209177b | 1146 | if (SCM_EQ_P (ptr, p)) |
56495472 ML |
1147 | return; |
1148 | ||
1149 | scm_gc_mark (ptr); | |
0209177b | 1150 | return; |
56495472 ML |
1151 | |
1152 | gc_mark_loop_first_time: | |
1153 | #endif | |
9a6976cd | 1154 | |
61045190 | 1155 | #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST)) |
9a6976cd | 1156 | /* We are in debug mode. Check the ptr exhaustively. */ |
61045190 | 1157 | if (!scm_cellp (ptr)) |
db4b4ca6 | 1158 | SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL); |
9a6976cd DH |
1159 | #else |
1160 | /* In non-debug mode, do at least some cheap testing. */ | |
8c494e99 | 1161 | if (!CELL_P (ptr)) |
9a6976cd | 1162 | SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL); |
d6884e63 ML |
1163 | #endif |
1164 | ||
56495472 | 1165 | #ifndef MARK_DEPENDENCIES |
eae33935 | 1166 | |
d6884e63 ML |
1167 | if (SCM_GCMARKP (ptr)) |
1168 | return; | |
eae33935 | 1169 | |
d6884e63 ML |
1170 | SCM_SETGCMARK (ptr); |
1171 | ||
56495472 ML |
1172 | #endif |
1173 | ||
61045190 DH |
1174 | cell_type = SCM_GC_CELL_TYPE (ptr); |
1175 | switch (SCM_ITAG7 (cell_type)) | |
0f2d19dd JB |
1176 | { |
1177 | case scm_tcs_cons_nimcar: | |
d6884e63 | 1178 | if (SCM_IMP (SCM_CDR (ptr))) |
0f2d19dd JB |
1179 | { |
1180 | ptr = SCM_CAR (ptr); | |
86d31dfe | 1181 | goto_gc_mark_nimp; |
0f2d19dd | 1182 | } |
56495472 | 1183 | RECURSE (SCM_CAR (ptr)); |
d6884e63 | 1184 | ptr = SCM_CDR (ptr); |
86d31dfe | 1185 | goto_gc_mark_nimp; |
0f2d19dd | 1186 | case scm_tcs_cons_imcar: |
d6884e63 | 1187 | ptr = SCM_CDR (ptr); |
86d31dfe | 1188 | goto_gc_mark_loop; |
e641afaf | 1189 | case scm_tc7_pws: |
22a52da1 DH |
1190 | RECURSE (SCM_SETTER (ptr)); |
1191 | ptr = SCM_PROCEDURE (ptr); | |
86d31dfe | 1192 | goto_gc_mark_loop; |
904a077d | 1193 | case scm_tcs_struct: |
0f2d19dd | 1194 | { |
904a077d MV |
1195 | /* XXX - use less explicit code. */ |
1196 | scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_struct; | |
1197 | scm_t_bits * vtable_data = (scm_t_bits *) word0; | |
1198 | SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]); | |
1199 | long len = SCM_SYMBOL_LENGTH (layout); | |
1200 | char * fields_desc = SCM_SYMBOL_CHARS (layout); | |
1201 | scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr); | |
1202 | ||
1203 | if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY) | |
0f2d19dd | 1204 | { |
904a077d MV |
1205 | RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure])); |
1206 | RECURSE (SCM_PACK (struct_data[scm_struct_i_setter])); | |
0f2d19dd | 1207 | } |
904a077d MV |
1208 | if (len) |
1209 | { | |
1210 | long x; | |
eae33935 | 1211 | |
904a077d MV |
1212 | for (x = 0; x < len - 2; x += 2, ++struct_data) |
1213 | if (fields_desc[x] == 'p') | |
1214 | RECURSE (SCM_PACK (*struct_data)); | |
1215 | if (fields_desc[x] == 'p') | |
1216 | { | |
1217 | if (SCM_LAYOUT_TAILP (fields_desc[x + 1])) | |
1218 | for (x = *struct_data++; x; --x, ++struct_data) | |
1219 | RECURSE (SCM_PACK (*struct_data)); | |
1220 | else | |
1221 | RECURSE (SCM_PACK (*struct_data)); | |
1222 | } | |
1223 | } | |
1224 | /* mark vtable */ | |
1225 | ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]); | |
1226 | goto_gc_mark_loop; | |
0f2d19dd JB |
1227 | } |
1228 | break; | |
1229 | case scm_tcs_closures: | |
22a52da1 | 1230 | if (SCM_IMP (SCM_ENV (ptr))) |
0f2d19dd JB |
1231 | { |
1232 | ptr = SCM_CLOSCAR (ptr); | |
86d31dfe | 1233 | goto_gc_mark_nimp; |
0f2d19dd | 1234 | } |
56495472 | 1235 | RECURSE (SCM_CLOSCAR (ptr)); |
22a52da1 | 1236 | ptr = SCM_ENV (ptr); |
86d31dfe | 1237 | goto_gc_mark_nimp; |
0f2d19dd | 1238 | case scm_tc7_vector: |
b5c2579a DH |
1239 | i = SCM_VECTOR_LENGTH (ptr); |
1240 | if (i == 0) | |
1241 | break; | |
1242 | while (--i > 0) | |
1243 | if (SCM_NIMP (SCM_VELTS (ptr)[i])) | |
56495472 | 1244 | RECURSE (SCM_VELTS (ptr)[i]); |
b5c2579a | 1245 | ptr = SCM_VELTS (ptr)[0]; |
86d31dfe | 1246 | goto_gc_mark_loop; |
0f2d19dd JB |
1247 | #ifdef CCLO |
1248 | case scm_tc7_cclo: | |
362306b9 | 1249 | { |
1be6b49c ML |
1250 | size_t i = SCM_CCLO_LENGTH (ptr); |
1251 | size_t j; | |
362306b9 DH |
1252 | for (j = 1; j != i; ++j) |
1253 | { | |
1254 | SCM obj = SCM_CCLO_REF (ptr, j); | |
1255 | if (!SCM_IMP (obj)) | |
56495472 | 1256 | RECURSE (obj); |
362306b9 DH |
1257 | } |
1258 | ptr = SCM_CCLO_REF (ptr, 0); | |
86d31dfe | 1259 | goto_gc_mark_loop; |
362306b9 | 1260 | } |
b5c2579a | 1261 | #endif |
afe5177e | 1262 | #ifdef HAVE_ARRAYS |
0f2d19dd JB |
1263 | case scm_tc7_bvect: |
1264 | case scm_tc7_byvect: | |
1265 | case scm_tc7_ivect: | |
1266 | case scm_tc7_uvect: | |
1267 | case scm_tc7_fvect: | |
1268 | case scm_tc7_dvect: | |
1269 | case scm_tc7_cvect: | |
1270 | case scm_tc7_svect: | |
5c11cc9d | 1271 | #ifdef HAVE_LONG_LONGS |
0f2d19dd JB |
1272 | case scm_tc7_llvect: |
1273 | #endif | |
afe5177e | 1274 | #endif |
0f2d19dd | 1275 | case scm_tc7_string: |
0f2d19dd JB |
1276 | break; |
1277 | ||
0f2d19dd | 1278 | case scm_tc7_wvect: |
592996c9 | 1279 | SCM_SET_WVECT_GC_CHAIN (ptr, scm_weak_vectors); |
ab4bef85 | 1280 | scm_weak_vectors = ptr; |
0f2d19dd JB |
1281 | if (SCM_IS_WHVEC_ANY (ptr)) |
1282 | { | |
c014a02e ML |
1283 | long x; |
1284 | long len; | |
0f2d19dd JB |
1285 | int weak_keys; |
1286 | int weak_values; | |
1287 | ||
b5c2579a | 1288 | len = SCM_VECTOR_LENGTH (ptr); |
0f2d19dd JB |
1289 | weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr); |
1290 | weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr); | |
a00c95d9 | 1291 | |
0f2d19dd JB |
1292 | for (x = 0; x < len; ++x) |
1293 | { | |
1294 | SCM alist; | |
1295 | alist = SCM_VELTS (ptr)[x]; | |
46408039 JB |
1296 | |
1297 | /* mark everything on the alist except the keys or | |
1298 | * values, according to weak_values and weak_keys. */ | |
0b5f3f34 | 1299 | while ( SCM_CONSP (alist) |
0f2d19dd | 1300 | && !SCM_GCMARKP (alist) |
0f2d19dd JB |
1301 | && SCM_CONSP (SCM_CAR (alist))) |
1302 | { | |
1303 | SCM kvpair; | |
1304 | SCM next_alist; | |
1305 | ||
1306 | kvpair = SCM_CAR (alist); | |
1307 | next_alist = SCM_CDR (alist); | |
a00c95d9 | 1308 | /* |
0f2d19dd JB |
1309 | * Do not do this: |
1310 | * SCM_SETGCMARK (alist); | |
1311 | * SCM_SETGCMARK (kvpair); | |
1312 | * | |
1313 | * It may be that either the key or value is protected by | |
1314 | * an escaped reference to part of the spine of this alist. | |
1315 | * If we mark the spine here, and only mark one or neither of the | |
1316 | * key and value, they may never be properly marked. | |
1317 | * This leads to a horrible situation in which an alist containing | |
1318 | * freelist cells is exported. | |
1319 | * | |
1320 | * So only mark the spines of these arrays last of all marking. | |
1321 | * If somebody confuses us by constructing a weak vector | |
1322 | * with a circular alist then we are hosed, but at least we | |
1323 | * won't prematurely drop table entries. | |
1324 | */ | |
1325 | if (!weak_keys) | |
56495472 | 1326 | RECURSE (SCM_CAR (kvpair)); |
0f2d19dd | 1327 | if (!weak_values) |
56495472 | 1328 | RECURSE (SCM_CDR (kvpair)); |
0f2d19dd JB |
1329 | alist = next_alist; |
1330 | } | |
1331 | if (SCM_NIMP (alist)) | |
56495472 | 1332 | RECURSE (alist); |
0f2d19dd JB |
1333 | } |
1334 | } | |
1335 | break; | |
1336 | ||
28b06554 DH |
1337 | case scm_tc7_symbol: |
1338 | ptr = SCM_PROP_SLOTS (ptr); | |
86d31dfe | 1339 | goto_gc_mark_loop; |
e5aca4b5 MV |
1340 | case scm_tc7_variable: |
1341 | ptr = SCM_CELL_OBJECT_1 (ptr); | |
1342 | goto_gc_mark_loop; | |
0f2d19dd | 1343 | case scm_tcs_subrs: |
9de33deb | 1344 | break; |
0f2d19dd JB |
1345 | case scm_tc7_port: |
1346 | i = SCM_PTOBNUM (ptr); | |
7a7f7c53 | 1347 | #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST)) |
0f2d19dd | 1348 | if (!(i < scm_numptob)) |
7a7f7c53 DH |
1349 | SCM_MISC_ERROR ("undefined port type", SCM_EOL); |
1350 | #endif | |
ebf7394e | 1351 | if (SCM_PTAB_ENTRY(ptr)) |
56495472 | 1352 | RECURSE (SCM_FILENAME (ptr)); |
dc53f026 JB |
1353 | if (scm_ptobs[i].mark) |
1354 | { | |
1355 | ptr = (scm_ptobs[i].mark) (ptr); | |
86d31dfe | 1356 | goto_gc_mark_loop; |
dc53f026 JB |
1357 | } |
1358 | else | |
1359 | return; | |
0f2d19dd JB |
1360 | break; |
1361 | case scm_tc7_smob: | |
d6884e63 | 1362 | switch (SCM_TYP16 (ptr)) |
0f2d19dd JB |
1363 | { /* should be faster than going through scm_smobs */ |
1364 | case scm_tc_free_cell: | |
592996c9 DH |
1365 | /* We have detected a free cell. This can happen if non-object data |
1366 | * on the C stack points into guile's heap and is scanned during | |
1367 | * conservative marking. */ | |
592996c9 | 1368 | break; |
acb0a19c MD |
1369 | case scm_tc16_big: |
1370 | case scm_tc16_real: | |
1371 | case scm_tc16_complex: | |
0f2d19dd JB |
1372 | break; |
1373 | default: | |
1374 | i = SCM_SMOBNUM (ptr); | |
7a7f7c53 | 1375 | #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST)) |
0f2d19dd | 1376 | if (!(i < scm_numsmob)) |
7a7f7c53 DH |
1377 | SCM_MISC_ERROR ("undefined smob type", SCM_EOL); |
1378 | #endif | |
dc53f026 JB |
1379 | if (scm_smobs[i].mark) |
1380 | { | |
1381 | ptr = (scm_smobs[i].mark) (ptr); | |
86d31dfe | 1382 | goto_gc_mark_loop; |
dc53f026 JB |
1383 | } |
1384 | else | |
1385 | return; | |
0f2d19dd JB |
1386 | } |
1387 | break; | |
1388 | default: | |
acf4331f | 1389 | SCM_MISC_ERROR ("unknown type", SCM_EOL); |
0f2d19dd | 1390 | } |
0209177b | 1391 | #undef RECURSE |
0f2d19dd | 1392 | } |
acf4331f | 1393 | #undef FUNC_NAME |
0f2d19dd | 1394 | |
56495472 ML |
1395 | #ifndef MARK_DEPENDENCIES |
1396 | ||
1397 | #undef MARK | |
56495472 ML |
1398 | #undef FNAME |
1399 | ||
1400 | /* And here we define `scm_gc_mark_dependencies', by including this | |
1401 | * same file in itself. | |
1402 | */ | |
1403 | #define MARK scm_gc_mark_dependencies | |
1404 | #define FNAME "scm_gc_mark_dependencies" | |
1405 | #define MARK_DEPENDENCIES | |
1406 | #include "gc.c" | |
1407 | #undef MARK_DEPENDENCIES | |
1408 | #undef MARK | |
56495472 ML |
1409 | #undef FNAME |
1410 | ||
0f2d19dd | 1411 | |
592996c9 DH |
1412 | /* Determine whether the given value does actually represent a cell in some |
1413 | * heap segment. If this is the case, the number of the heap segment is | |
1414 | * returned. Otherwise, -1 is returned. Binary search is used in order to | |
1415 | * determine the heap segment that contains the cell.*/ | |
662c5539 DH |
1416 | /* FIXME: To be used within scm_mark_locations and scm_cellp this function |
1417 | * should be an inline function. */ | |
592996c9 DH |
1418 | static long int |
1419 | heap_segment (SCM obj) | |
0f2d19dd | 1420 | { |
8c494e99 | 1421 | if (!CELL_P (obj)) |
592996c9 DH |
1422 | return -1; |
1423 | else | |
c4da09e2 | 1424 | { |
592996c9 DH |
1425 | SCM_CELLPTR ptr = SCM2PTR (obj); |
1426 | unsigned long int i = 0; | |
1427 | unsigned long int j = scm_n_heap_segs - 1; | |
1428 | ||
1429 | if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0])) | |
1430 | return -1; | |
1431 | else if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr)) | |
1432 | return -1; | |
1433 | else | |
c4da09e2 | 1434 | { |
592996c9 | 1435 | while (i < j) |
c4da09e2 | 1436 | { |
592996c9 | 1437 | if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[1])) |
c4da09e2 | 1438 | { |
592996c9 DH |
1439 | break; |
1440 | } | |
1441 | else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr)) | |
1442 | { | |
1443 | i = j; | |
1444 | break; | |
1445 | } | |
1446 | else | |
1447 | { | |
1448 | unsigned long int k = (i + j) / 2; | |
1449 | ||
1450 | if (k == i) | |
1451 | return -1; | |
1452 | else if (SCM_PTR_LT (ptr, scm_heap_table[k].bounds[1])) | |
c4da09e2 | 1453 | { |
592996c9 DH |
1454 | j = k; |
1455 | ++i; | |
1456 | if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0])) | |
1457 | return -1; | |
1458 | } | |
1459 | else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) | |
1460 | { | |
1461 | i = k; | |
1462 | --j; | |
1463 | if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr)) | |
1464 | return -1; | |
c4da09e2 | 1465 | } |
c4da09e2 DH |
1466 | } |
1467 | } | |
592996c9 DH |
1468 | |
1469 | if (!DOUBLECELL_ALIGNED_P (obj) && scm_heap_table[i].span == 2) | |
1470 | return -1; | |
1471 | else if (SCM_GC_IN_CARD_HEADERP (ptr)) | |
1472 | return -1; | |
1473 | else | |
1474 | return i; | |
c4da09e2 DH |
1475 | } |
1476 | } | |
0f2d19dd JB |
1477 | } |
1478 | ||
1479 | ||
592996c9 DH |
1480 | /* Mark a region conservatively */ |
1481 | void | |
1482 | scm_mark_locations (SCM_STACKITEM x[], unsigned long n) | |
1483 | { | |
1484 | unsigned long m; | |
1485 | ||
1486 | for (m = 0; m < n; ++m) | |
1487 | { | |
1488 | SCM obj = * (SCM *) &x[m]; | |
1489 | long int segment = heap_segment (obj); | |
1490 | if (segment >= 0) | |
1491 | scm_gc_mark (obj); | |
1a548472 | 1492 | } |
592996c9 | 1493 | } |
2e11a577 | 1494 | |
592996c9 DH |
1495 | |
1496 | /* The function scm_cellp determines whether an SCM value can be regarded as a | |
1497 | * pointer to a cell on the heap. | |
1498 | */ | |
1499 | int | |
1500 | scm_cellp (SCM value) | |
1501 | { | |
1502 | long int segment = heap_segment (value); | |
1503 | return (segment >= 0); | |
2e11a577 MD |
1504 | } |
1505 | ||
1506 | ||
4c48ba06 | 1507 | static void |
92c2555f | 1508 | gc_sweep_freelist_start (scm_t_freelist *freelist) |
4c48ba06 MD |
1509 | { |
1510 | freelist->cells = SCM_EOL; | |
1511 | freelist->left_to_collect = freelist->cluster_size; | |
b37fe1c5 | 1512 | freelist->clusters_allocated = 0; |
4c48ba06 MD |
1513 | freelist->clusters = SCM_EOL; |
1514 | freelist->clustertail = &freelist->clusters; | |
1811ebce | 1515 | freelist->collected_1 = freelist->collected; |
4c48ba06 MD |
1516 | freelist->collected = 0; |
1517 | } | |
1518 | ||
1519 | static void | |
92c2555f | 1520 | gc_sweep_freelist_finish (scm_t_freelist *freelist) |
4c48ba06 | 1521 | { |
c014a02e | 1522 | long collected; |
4c48ba06 | 1523 | *freelist->clustertail = freelist->cells; |
3f5d82cd | 1524 | if (!SCM_NULLP (freelist->cells)) |
4c48ba06 MD |
1525 | { |
1526 | SCM c = freelist->cells; | |
22a52da1 DH |
1527 | SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c)); |
1528 | SCM_SET_CELL_WORD_1 (c, SCM_EOL); | |
4c48ba06 MD |
1529 | freelist->collected += |
1530 | freelist->span * (freelist->cluster_size - freelist->left_to_collect); | |
1531 | } | |
b37fe1c5 | 1532 | scm_gc_cells_collected += freelist->collected; |
a00c95d9 | 1533 | |
8fef55a8 | 1534 | /* Although freelist->min_yield is used to test freelist->collected |
7dbff8b1 | 1535 | * (which is the local GC yield for freelist), it is adjusted so |
8fef55a8 | 1536 | * that *total* yield is freelist->min_yield_fraction of total heap |
7dbff8b1 MD |
1537 | * size. This means that a too low yield is compensated by more |
1538 | * heap on the list which is currently doing most work, which is | |
1539 | * just what we want. | |
1540 | */ | |
1811ebce | 1541 | collected = SCM_MAX (freelist->collected_1, freelist->collected); |
8fef55a8 | 1542 | freelist->grow_heap_p = (collected < freelist->min_yield); |
4c48ba06 | 1543 | } |
0f2d19dd | 1544 | |
d6884e63 ML |
1545 | #define NEXT_DATA_CELL(ptr, span) \ |
1546 | do { \ | |
1547 | scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \ | |
1548 | (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \ | |
1549 | CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \ | |
1550 | : nxt__); \ | |
1551 | } while (0) | |
1552 | ||
a00c95d9 | 1553 | void |
0f2d19dd | 1554 | scm_gc_sweep () |
acf4331f | 1555 | #define FUNC_NAME "scm_gc_sweep" |
0f2d19dd JB |
1556 | { |
1557 | register SCM_CELLPTR ptr; | |
0f2d19dd | 1558 | register SCM nfreelist; |
92c2555f | 1559 | register scm_t_freelist *freelist; |
c014a02e | 1560 | register unsigned long m; |
0f2d19dd | 1561 | register int span; |
592996c9 | 1562 | size_t i; |
1be6b49c | 1563 | size_t seg_size; |
0f2d19dd | 1564 | |
0f2d19dd | 1565 | m = 0; |
0f2d19dd | 1566 | |
4c48ba06 MD |
1567 | gc_sweep_freelist_start (&scm_master_freelist); |
1568 | gc_sweep_freelist_start (&scm_master_freelist2); | |
a00c95d9 | 1569 | |
cf2d30f6 | 1570 | for (i = 0; i < scm_n_heap_segs; i++) |
0f2d19dd | 1571 | { |
c014a02e | 1572 | register long left_to_collect; |
1be6b49c | 1573 | register size_t j; |
15e9d186 | 1574 | |
cf2d30f6 JB |
1575 | /* Unmarked cells go onto the front of the freelist this heap |
1576 | segment points to. Rather than updating the real freelist | |
1577 | pointer as we go along, we accumulate the new head in | |
1578 | nfreelist. Then, if it turns out that the entire segment is | |
1579 | free, we free (i.e., malloc's free) the whole segment, and | |
1580 | simply don't assign nfreelist back into the real freelist. */ | |
4c48ba06 MD |
1581 | freelist = scm_heap_table[i].freelist; |
1582 | nfreelist = freelist->cells; | |
4c48ba06 | 1583 | left_to_collect = freelist->left_to_collect; |
945fec60 | 1584 | span = scm_heap_table[i].span; |
cf2d30f6 | 1585 | |
a00c95d9 ML |
1586 | ptr = CELL_UP (scm_heap_table[i].bounds[0], span); |
1587 | seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr; | |
c9b0d4b0 | 1588 | |
d6884e63 ML |
1589 | /* use only data cells in seg_size */ |
1590 | seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span; | |
1591 | ||
c9b0d4b0 ML |
1592 | scm_gc_cells_swept += seg_size; |
1593 | ||
0f2d19dd JB |
1594 | for (j = seg_size + span; j -= span; ptr += span) |
1595 | { | |
d6884e63 | 1596 | SCM scmptr; |
96f6f4ae | 1597 | |
d6884e63 | 1598 | if (SCM_GC_IN_CARD_HEADERP (ptr)) |
0f2d19dd | 1599 | { |
d6884e63 ML |
1600 | SCM_CELLPTR nxt; |
1601 | ||
1602 | /* cheat here */ | |
1603 | nxt = ptr; | |
1604 | NEXT_DATA_CELL (nxt, span); | |
1605 | j += span; | |
1606 | ||
1607 | ptr = nxt - span; | |
1608 | continue; | |
1609 | } | |
1610 | ||
1611 | scmptr = PTR2SCM (ptr); | |
1612 | ||
1613 | if (SCM_GCMARKP (scmptr)) | |
1614 | continue; | |
7bb8eac7 | 1615 | |
d6884e63 ML |
1616 | switch SCM_TYP7 (scmptr) |
1617 | { | |
904a077d | 1618 | case scm_tcs_struct: |
0f2d19dd | 1619 | { |
904a077d MV |
1620 | /* Structs need to be freed in a special order. |
1621 | * This is handled by GC C hooks in struct.c. | |
c8045e8d | 1622 | */ |
904a077d MV |
1623 | SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free); |
1624 | scm_structs_to_free = scmptr; | |
0f2d19dd | 1625 | } |
904a077d | 1626 | continue; |
0f2d19dd JB |
1627 | case scm_tcs_cons_imcar: |
1628 | case scm_tcs_cons_nimcar: | |
1629 | case scm_tcs_closures: | |
e641afaf | 1630 | case scm_tc7_pws: |
0f2d19dd JB |
1631 | break; |
1632 | case scm_tc7_wvect: | |
0f2d19dd | 1633 | case scm_tc7_vector: |
1b9be268 | 1634 | { |
c014a02e | 1635 | unsigned long int length = SCM_VECTOR_LENGTH (scmptr); |
1b9be268 DH |
1636 | if (length > 0) |
1637 | { | |
4c9419ac MV |
1638 | scm_gc_free (SCM_VECTOR_BASE (scmptr), |
1639 | length * sizeof (scm_t_bits), | |
1640 | "vector"); | |
1b9be268 DH |
1641 | } |
1642 | break; | |
1643 | } | |
0f2d19dd JB |
1644 | #ifdef CCLO |
1645 | case scm_tc7_cclo: | |
4c9419ac MV |
1646 | scm_gc_free (SCM_CCLO_BASE (scmptr), |
1647 | SCM_CCLO_LENGTH (scmptr) * sizeof (SCM), | |
1648 | "compiled closure"); | |
0f2d19dd | 1649 | break; |
06ee04b2 | 1650 | #endif |
afe5177e | 1651 | #ifdef HAVE_ARRAYS |
0f2d19dd | 1652 | case scm_tc7_bvect: |
93778877 | 1653 | { |
c014a02e | 1654 | unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr); |
93778877 DH |
1655 | if (length > 0) |
1656 | { | |
4c9419ac MV |
1657 | scm_gc_free (SCM_BITVECTOR_BASE (scmptr), |
1658 | (sizeof (long) | |
1659 | * ((length+SCM_LONG_BIT-1) / SCM_LONG_BIT)), | |
1660 | "vector"); | |
93778877 DH |
1661 | } |
1662 | } | |
06ee04b2 | 1663 | break; |
0f2d19dd | 1664 | case scm_tc7_byvect: |
0f2d19dd JB |
1665 | case scm_tc7_ivect: |
1666 | case scm_tc7_uvect: | |
0f2d19dd | 1667 | case scm_tc7_svect: |
5c11cc9d | 1668 | #ifdef HAVE_LONG_LONGS |
0f2d19dd | 1669 | case scm_tc7_llvect: |
0f2d19dd JB |
1670 | #endif |
1671 | case scm_tc7_fvect: | |
0f2d19dd | 1672 | case scm_tc7_dvect: |
0f2d19dd | 1673 | case scm_tc7_cvect: |
4c9419ac MV |
1674 | scm_gc_free (SCM_UVECTOR_BASE (scmptr), |
1675 | (SCM_UVECTOR_LENGTH (scmptr) | |
1676 | * scm_uniform_element_size (scmptr)), | |
1677 | "vector"); | |
06ee04b2 | 1678 | break; |
afe5177e | 1679 | #endif |
0f2d19dd | 1680 | case scm_tc7_string: |
4c9419ac MV |
1681 | scm_gc_free (SCM_STRING_CHARS (scmptr), |
1682 | SCM_STRING_LENGTH (scmptr) + 1, "string"); | |
f151f912 | 1683 | break; |
28b06554 | 1684 | case scm_tc7_symbol: |
4c9419ac MV |
1685 | scm_gc_free (SCM_SYMBOL_CHARS (scmptr), |
1686 | SCM_SYMBOL_LENGTH (scmptr) + 1, "symbol"); | |
0f2d19dd | 1687 | break; |
8a3e715b ML |
1688 | case scm_tc7_variable: |
1689 | break; | |
0f2d19dd | 1690 | case scm_tcs_subrs: |
d6884e63 | 1691 | /* the various "subrs" (primitives) are never freed */ |
0f2d19dd JB |
1692 | continue; |
1693 | case scm_tc7_port: | |
0f2d19dd JB |
1694 | if SCM_OPENP (scmptr) |
1695 | { | |
1696 | int k = SCM_PTOBNUM (scmptr); | |
4c9419ac | 1697 | size_t mm; |
7a7f7c53 | 1698 | #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST)) |
0f2d19dd | 1699 | if (!(k < scm_numptob)) |
7a7f7c53 DH |
1700 | SCM_MISC_ERROR ("undefined port type", SCM_EOL); |
1701 | #endif | |
0f2d19dd | 1702 | /* Keep "revealed" ports alive. */ |
945fec60 | 1703 | if (scm_revealed_count (scmptr) > 0) |
0f2d19dd JB |
1704 | continue; |
1705 | /* Yes, I really do mean scm_ptobs[k].free */ | |
1706 | /* rather than ftobs[k].close. .close */ | |
1707 | /* is for explicit CLOSE-PORT by user */ | |
4c9419ac MV |
1708 | mm = scm_ptobs[k].free (scmptr); |
1709 | ||
1710 | if (mm != 0) | |
1711 | { | |
1712 | scm_c_issue_deprecation_warning | |
1713 | ("Returning non-0 from a port free function is " | |
1714 | "deprecated. Use scm_gc_free et al instead."); | |
1715 | scm_c_issue_deprecation_warning_fmt | |
1716 | ("(You just returned non-0 while freeing a %s.)", | |
1717 | SCM_PTOBNAME (k)); | |
1718 | m += mm; | |
1719 | } | |
1720 | ||
0f2d19dd JB |
1721 | SCM_SETSTREAM (scmptr, 0); |
1722 | scm_remove_from_port_table (scmptr); | |
1723 | scm_gc_ports_collected++; | |
22a52da1 | 1724 | SCM_CLR_PORT_OPEN_FLAG (scmptr); |
0f2d19dd JB |
1725 | } |
1726 | break; | |
1727 | case scm_tc7_smob: | |
d6884e63 | 1728 | switch SCM_TYP16 (scmptr) |
0f2d19dd JB |
1729 | { |
1730 | case scm_tc_free_cell: | |
acb0a19c | 1731 | case scm_tc16_real: |
0f2d19dd JB |
1732 | break; |
1733 | #ifdef SCM_BIGDIG | |
acb0a19c | 1734 | case scm_tc16_big: |
4c9419ac MV |
1735 | scm_gc_free (SCM_BDIGITS (scmptr), |
1736 | ((SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG | |
1737 | / SCM_CHAR_BIT)), "bignum"); | |
06ee04b2 | 1738 | break; |
0f2d19dd | 1739 | #endif /* def SCM_BIGDIG */ |
acb0a19c | 1740 | case scm_tc16_complex: |
4c9419ac MV |
1741 | scm_gc_free (SCM_COMPLEX_MEM (scmptr), 2*sizeof (double), |
1742 | "complex"); | |
06ee04b2 | 1743 | break; |
0f2d19dd | 1744 | default: |
0f2d19dd JB |
1745 | { |
1746 | int k; | |
1747 | k = SCM_SMOBNUM (scmptr); | |
7a7f7c53 | 1748 | #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST)) |
0f2d19dd | 1749 | if (!(k < scm_numsmob)) |
7a7f7c53 DH |
1750 | SCM_MISC_ERROR ("undefined smob type", SCM_EOL); |
1751 | #endif | |
1752 | if (scm_smobs[k].free) | |
4c9419ac MV |
1753 | { |
1754 | size_t mm; | |
1755 | mm = scm_smobs[k].free (scmptr); | |
1756 | if (mm != 0) | |
1757 | { | |
1758 | scm_c_issue_deprecation_warning | |
1759 | ("Returning non-0 from a smob free function is " | |
1760 | "deprecated. Use scm_gc_free et al instead."); | |
1761 | scm_c_issue_deprecation_warning_fmt | |
1762 | ("(You just returned non-0 while freeing a %s.)", | |
1763 | SCM_SMOBNAME (k)); | |
1764 | m += mm; | |
1765 | } | |
1766 | } | |
0f2d19dd JB |
1767 | break; |
1768 | } | |
1769 | } | |
1770 | break; | |
1771 | default: | |
acf4331f | 1772 | SCM_MISC_ERROR ("unknown type", SCM_EOL); |
0f2d19dd | 1773 | } |
7bb8eac7 | 1774 | |
4c48ba06 | 1775 | if (!--left_to_collect) |
4a4c9785 | 1776 | { |
22a52da1 | 1777 | SCM_SET_CELL_WORD_0 (scmptr, nfreelist); |
4c48ba06 MD |
1778 | *freelist->clustertail = scmptr; |
1779 | freelist->clustertail = SCM_CDRLOC (scmptr); | |
a00c95d9 | 1780 | |
4a4c9785 | 1781 | nfreelist = SCM_EOL; |
4c48ba06 MD |
1782 | freelist->collected += span * freelist->cluster_size; |
1783 | left_to_collect = freelist->cluster_size; | |
4a4c9785 MD |
1784 | } |
1785 | else | |
4a4c9785 MD |
1786 | { |
1787 | /* Stick the new cell on the front of nfreelist. It's | |
1788 | critical that we mark this cell as freed; otherwise, the | |
1789 | conservative collector might trace it as some other type | |
1790 | of object. */ | |
54778cd3 | 1791 | SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell); |
3f5d82cd | 1792 | SCM_SET_FREE_CELL_CDR (scmptr, nfreelist); |
4a4c9785 MD |
1793 | nfreelist = scmptr; |
1794 | } | |
0f2d19dd | 1795 | } |
d6884e63 | 1796 | |
0f2d19dd JB |
1797 | #ifdef GC_FREE_SEGMENTS |
1798 | if (n == seg_size) | |
1799 | { | |
c014a02e | 1800 | register long j; |
15e9d186 | 1801 | |
4c48ba06 | 1802 | freelist->heap_size -= seg_size; |
cf2d30f6 JB |
1803 | free ((char *) scm_heap_table[i].bounds[0]); |
1804 | scm_heap_table[i].bounds[0] = 0; | |
1805 | for (j = i + 1; j < scm_n_heap_segs; j++) | |
0f2d19dd JB |
1806 | scm_heap_table[j - 1] = scm_heap_table[j]; |
1807 | scm_n_heap_segs -= 1; | |
cf2d30f6 | 1808 | i--; /* We need to scan the segment just moved. */ |
0f2d19dd JB |
1809 | } |
1810 | else | |
1811 | #endif /* ifdef GC_FREE_SEGMENTS */ | |
4a4c9785 MD |
1812 | { |
1813 | /* Update the real freelist pointer to point to the head of | |
1814 | the list of free cells we've built for this segment. */ | |
4c48ba06 | 1815 | freelist->cells = nfreelist; |
4c48ba06 | 1816 | freelist->left_to_collect = left_to_collect; |
4a4c9785 MD |
1817 | } |
1818 | ||
fca7547b | 1819 | #ifdef GUILE_DEBUG_FREELIST |
cf2d30f6 JB |
1820 | scm_map_free_list (); |
1821 | #endif | |
4a4c9785 | 1822 | } |
a00c95d9 | 1823 | |
4c48ba06 MD |
1824 | gc_sweep_freelist_finish (&scm_master_freelist); |
1825 | gc_sweep_freelist_finish (&scm_master_freelist2); | |
a00c95d9 | 1826 | |
8ded62a3 MD |
1827 | /* When we move to POSIX threads private freelists should probably |
1828 | be GC-protected instead. */ | |
1829 | scm_freelist = SCM_EOL; | |
1830 | scm_freelist2 = SCM_EOL; | |
a00c95d9 | 1831 | |
b37fe1c5 | 1832 | scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected); |
8b0d194f | 1833 | scm_gc_yield -= scm_cells_allocated; |
eae33935 | 1834 | |
1be6b49c ML |
1835 | if (scm_mallocated < m) |
1836 | /* The byte count of allocated objects has underflowed. This is | |
1837 | probably because you forgot to report the sizes of objects you | |
1838 | have allocated, by calling scm_done_malloc or some such. When | |
1839 | the GC freed them, it subtracted their size from | |
1840 | scm_mallocated, which underflowed. */ | |
1841 | abort (); | |
1842 | ||
0f2d19dd JB |
1843 | scm_mallocated -= m; |
1844 | scm_gc_malloc_collected = m; | |
1845 | } | |
acf4331f | 1846 | #undef FUNC_NAME |
0f2d19dd JB |
1847 | |
1848 | ||
1849 | \f | |
4c9419ac MV |
1850 | /* Function for non-cell memory management. |
1851 | */ | |
1852 | ||
1853 | void * | |
1854 | scm_malloc (size_t size) | |
1855 | { | |
1856 | void *ptr; | |
1857 | ||
1858 | if (size == 0) | |
1859 | return NULL; | |
1860 | ||
1861 | SCM_SYSCALL (ptr = malloc (size)); | |
1862 | if (ptr) | |
1863 | return ptr; | |
1864 | ||
1865 | scm_igc ("malloc"); | |
1866 | SCM_SYSCALL (ptr = malloc (size)); | |
1867 | if (ptr) | |
1868 | return ptr; | |
1869 | ||
1870 | scm_memory_error ("malloc"); | |
1871 | } | |
1872 | ||
1873 | void * | |
1874 | scm_realloc (void *mem, size_t size) | |
1875 | { | |
1876 | void *ptr; | |
1877 | ||
1878 | SCM_SYSCALL (ptr = realloc (mem, size)); | |
1879 | if (ptr) | |
1880 | return ptr; | |
1881 | ||
1882 | scm_igc ("realloc"); | |
1883 | SCM_SYSCALL (ptr = realloc (mem, size)); | |
1884 | if (ptr) | |
1885 | return ptr; | |
1886 | ||
1887 | scm_memory_error ("realloc"); | |
1888 | } | |
1889 | ||
1890 | char * | |
1891 | scm_strndup (const char *str, size_t n) | |
1892 | { | |
1893 | char *dst = scm_malloc (n+1); | |
1894 | memcpy (dst, str, n); | |
1895 | dst[n] = 0; | |
1896 | return dst; | |
1897 | } | |
1898 | ||
1899 | char * | |
1900 | scm_strdup (const char *str) | |
1901 | { | |
1902 | return scm_strndup (str, strlen (str)); | |
1903 | } | |
1904 | ||
1905 | void | |
1906 | scm_gc_register_collectable_memory (void *mem, size_t size, const char *what) | |
1907 | { | |
1908 | scm_mallocated += size; | |
1909 | ||
1910 | if (scm_mallocated > scm_mtrigger) | |
1911 | { | |
1912 | scm_igc (what); | |
1913 | if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) | |
1914 | { | |
1915 | if (scm_mallocated > scm_mtrigger) | |
1916 | scm_mtrigger = scm_mallocated + scm_mallocated / 2; | |
1917 | else | |
1918 | scm_mtrigger += scm_mtrigger / 2; | |
1919 | } | |
1920 | } | |
1921 | ||
1922 | #ifdef GUILE_DEBUG_MALLOC | |
1923 | scm_malloc_register (mem, what); | |
1924 | #endif | |
1925 | } | |
1926 | ||
1927 | void | |
1928 | scm_gc_unregister_collectable_memory (void *mem, size_t size, const char *what) | |
1929 | { | |
1930 | scm_mallocated -= size; | |
1931 | ||
1932 | #ifdef GUILE_DEBUG_MALLOC | |
1933 | scm_malloc_unregister (mem); | |
1934 | #endif | |
1935 | } | |
1936 | ||
1937 | void * | |
1938 | scm_gc_malloc (size_t size, const char *what) | |
1939 | { | |
1940 | /* XXX - The straightforward implementation below has the problem | |
1941 | that it might call the GC twice, once in scm_malloc and then | |
1942 | again in scm_gc_register_collectable_memory. We don't really | |
1943 | want the second GC. | |
1944 | */ | |
1945 | ||
1946 | void *ptr = scm_malloc (size); | |
1947 | scm_gc_register_collectable_memory (ptr, size, what); | |
1948 | return ptr; | |
1949 | } | |
1950 | ||
1951 | void * | |
1952 | scm_gc_realloc (void *mem, size_t old_size, size_t new_size, const char *what) | |
1953 | { | |
1954 | /* XXX - see scm_gc_malloc. */ | |
1955 | ||
1956 | void *ptr = scm_realloc (mem, new_size); | |
1957 | scm_gc_unregister_collectable_memory (mem, old_size, what); | |
1958 | scm_gc_register_collectable_memory (ptr, new_size, what); | |
1959 | return ptr; | |
1960 | } | |
1961 | ||
1962 | void | |
1963 | scm_gc_free (void *mem, size_t size, const char *what) | |
1964 | { | |
1965 | scm_gc_unregister_collectable_memory (mem, size, what); | |
1966 | free (mem); | |
1967 | } | |
1968 | ||
1969 | char * | |
1970 | scm_gc_strndup (const char *str, size_t n, const char *what) | |
1971 | { | |
1972 | char *dst = scm_gc_malloc (n+1, what); | |
1973 | memcpy (dst, str, n); | |
1974 | dst[n] = 0; | |
1975 | return dst; | |
1976 | } | |
1977 | ||
1978 | char * | |
1979 | scm_gc_strdup (const char *str, const char *what) | |
1980 | { | |
1981 | return scm_gc_strndup (str, strlen (str), what); | |
1982 | } | |
1983 | ||
1984 | /* {Deprecated front end to malloc} | |
0f2d19dd | 1985 | * |
9d47a1e6 ML |
1986 | * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc, |
1987 | * scm_done_free | |
0f2d19dd | 1988 | * |
c6c79933 GH |
1989 | * These functions provide services comparable to malloc, realloc, and |
1990 | * free. They should be used when allocating memory that will be under | |
1991 | * control of the garbage collector, i.e., if the memory may be freed | |
1992 | * during garbage collection. | |
1993 | */ | |
bc9d9bb2 | 1994 | |
0f2d19dd JB |
1995 | /* scm_must_malloc |
1996 | * Return newly malloced storage or throw an error. | |
1997 | * | |
1998 | * The parameter WHAT is a string for error reporting. | |
a00c95d9 | 1999 | * If the threshold scm_mtrigger will be passed by this |
0f2d19dd JB |
2000 | * allocation, or if the first call to malloc fails, |
2001 | * garbage collect -- on the presumption that some objects | |
2002 | * using malloced storage may be collected. | |
2003 | * | |
2004 | * The limit scm_mtrigger may be raised by this allocation. | |
2005 | */ | |
07806695 | 2006 | void * |
1be6b49c | 2007 | scm_must_malloc (size_t size, const char *what) |
0f2d19dd | 2008 | { |
07806695 | 2009 | void *ptr; |
c014a02e | 2010 | unsigned long nm = scm_mallocated + size; |
1be6b49c ML |
2011 | |
2012 | if (nm < size) | |
2013 | /* The byte count of allocated objects has overflowed. This is | |
2014 | probably because you forgot to report the correct size of freed | |
2015 | memory in some of your smob free methods. */ | |
2016 | abort (); | |
e4ef2330 MD |
2017 | |
2018 | if (nm <= scm_mtrigger) | |
0f2d19dd | 2019 | { |
07806695 | 2020 | SCM_SYSCALL (ptr = malloc (size)); |
0f2d19dd JB |
2021 | if (NULL != ptr) |
2022 | { | |
2023 | scm_mallocated = nm; | |
bc9d9bb2 MD |
2024 | #ifdef GUILE_DEBUG_MALLOC |
2025 | scm_malloc_register (ptr, what); | |
2026 | #endif | |
0f2d19dd JB |
2027 | return ptr; |
2028 | } | |
2029 | } | |
6064dcc6 | 2030 | |
0f2d19dd | 2031 | scm_igc (what); |
e4ef2330 | 2032 | |
0f2d19dd | 2033 | nm = scm_mallocated + size; |
eae33935 | 2034 | |
1be6b49c ML |
2035 | if (nm < size) |
2036 | /* The byte count of allocated objects has overflowed. This is | |
2037 | probably because you forgot to report the correct size of freed | |
2038 | memory in some of your smob free methods. */ | |
2039 | abort (); | |
2040 | ||
07806695 | 2041 | SCM_SYSCALL (ptr = malloc (size)); |
0f2d19dd JB |
2042 | if (NULL != ptr) |
2043 | { | |
2044 | scm_mallocated = nm; | |
5e7248f2 | 2045 | |
6064dcc6 | 2046 | if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) { |
5e7248f2 | 2047 | unsigned long old_trigger = scm_mtrigger; |
6064dcc6 MV |
2048 | if (nm > scm_mtrigger) |
2049 | scm_mtrigger = nm + nm / 2; | |
2050 | else | |
2051 | scm_mtrigger += scm_mtrigger / 2; | |
5e7248f2 MV |
2052 | if (scm_mtrigger < old_trigger) |
2053 | abort (); | |
6064dcc6 | 2054 | } |
bc9d9bb2 MD |
2055 | #ifdef GUILE_DEBUG_MALLOC |
2056 | scm_malloc_register (ptr, what); | |
2057 | #endif | |
2058 | ||
0f2d19dd JB |
2059 | return ptr; |
2060 | } | |
e4ef2330 | 2061 | |
acf4331f | 2062 | scm_memory_error (what); |
0f2d19dd JB |
2063 | } |
2064 | ||
2065 | ||
2066 | /* scm_must_realloc | |
2067 | * is similar to scm_must_malloc. | |
2068 | */ | |
07806695 JB |
2069 | void * |
2070 | scm_must_realloc (void *where, | |
1be6b49c ML |
2071 | size_t old_size, |
2072 | size_t size, | |
3eeba8d4 | 2073 | const char *what) |
0f2d19dd | 2074 | { |
07806695 | 2075 | void *ptr; |
c014a02e | 2076 | unsigned long nm; |
1be6b49c ML |
2077 | |
2078 | if (size <= old_size) | |
2079 | return where; | |
2080 | ||
2081 | nm = scm_mallocated + size - old_size; | |
2082 | ||
2083 | if (nm < (size - old_size)) | |
2084 | /* The byte count of allocated objects has overflowed. This is | |
2085 | probably because you forgot to report the correct size of freed | |
2086 | memory in some of your smob free methods. */ | |
2087 | abort (); | |
e4ef2330 MD |
2088 | |
2089 | if (nm <= scm_mtrigger) | |
0f2d19dd | 2090 | { |
07806695 | 2091 | SCM_SYSCALL (ptr = realloc (where, size)); |
0f2d19dd JB |
2092 | if (NULL != ptr) |
2093 | { | |
2094 | scm_mallocated = nm; | |
bc9d9bb2 MD |
2095 | #ifdef GUILE_DEBUG_MALLOC |
2096 | scm_malloc_reregister (where, ptr, what); | |
2097 | #endif | |
0f2d19dd JB |
2098 | return ptr; |
2099 | } | |
2100 | } | |
e4ef2330 | 2101 | |
0f2d19dd | 2102 | scm_igc (what); |
e4ef2330 MD |
2103 | |
2104 | nm = scm_mallocated + size - old_size; | |
1be6b49c ML |
2105 | |
2106 | if (nm < (size - old_size)) | |
2107 | /* The byte count of allocated objects has overflowed. This is | |
2108 | probably because you forgot to report the correct size of freed | |
2109 | memory in some of your smob free methods. */ | |
2110 | abort (); | |
2111 | ||
07806695 | 2112 | SCM_SYSCALL (ptr = realloc (where, size)); |
0f2d19dd JB |
2113 | if (NULL != ptr) |
2114 | { | |
2115 | scm_mallocated = nm; | |
6064dcc6 | 2116 | if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) { |
5e7248f2 | 2117 | unsigned long old_trigger = scm_mtrigger; |
6064dcc6 MV |
2118 | if (nm > scm_mtrigger) |
2119 | scm_mtrigger = nm + nm / 2; | |
2120 | else | |
2121 | scm_mtrigger += scm_mtrigger / 2; | |
5e7248f2 MV |
2122 | if (scm_mtrigger < old_trigger) |
2123 | abort (); | |
6064dcc6 | 2124 | } |
bc9d9bb2 MD |
2125 | #ifdef GUILE_DEBUG_MALLOC |
2126 | scm_malloc_reregister (where, ptr, what); | |
2127 | #endif | |
0f2d19dd JB |
2128 | return ptr; |
2129 | } | |
e4ef2330 | 2130 | |
acf4331f | 2131 | scm_memory_error (what); |
0f2d19dd JB |
2132 | } |
2133 | ||
e4a7824f | 2134 | char * |
1be6b49c | 2135 | scm_must_strndup (const char *str, size_t length) |
e4a7824f MV |
2136 | { |
2137 | char * dst = scm_must_malloc (length + 1, "scm_must_strndup"); | |
2138 | memcpy (dst, str, length); | |
2139 | dst[length] = 0; | |
2140 | return dst; | |
2141 | } | |
2142 | ||
2143 | char * | |
2144 | scm_must_strdup (const char *str) | |
2145 | { | |
2146 | return scm_must_strndup (str, strlen (str)); | |
2147 | } | |
acf4331f | 2148 | |
a00c95d9 | 2149 | void |
07806695 | 2150 | scm_must_free (void *obj) |
acf4331f | 2151 | #define FUNC_NAME "scm_must_free" |
0f2d19dd | 2152 | { |
bc9d9bb2 MD |
2153 | #ifdef GUILE_DEBUG_MALLOC |
2154 | scm_malloc_unregister (obj); | |
2155 | #endif | |
0f2d19dd JB |
2156 | if (obj) |
2157 | free (obj); | |
2158 | else | |
acf4331f | 2159 | SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL); |
0f2d19dd | 2160 | } |
acf4331f DH |
2161 | #undef FUNC_NAME |
2162 | ||
0f2d19dd | 2163 | |
c68296f8 MV |
2164 | /* Announce that there has been some malloc done that will be freed |
2165 | * during gc. A typical use is for a smob that uses some malloced | |
2166 | * memory but can not get it from scm_must_malloc (for whatever | |
2167 | * reason). When a new object of this smob is created you call | |
2168 | * scm_done_malloc with the size of the object. When your smob free | |
2169 | * function is called, be sure to include this size in the return | |
9d47a1e6 ML |
2170 | * value. |
2171 | * | |
2172 | * If you can't actually free the memory in the smob free function, | |
2173 | * for whatever reason (like reference counting), you still can (and | |
2174 | * should) report the amount of memory freed when you actually free it. | |
2175 | * Do it by calling scm_done_malloc with the _negated_ size. Clever, | |
2176 | * eh? Or even better, call scm_done_free. */ | |
0f2d19dd | 2177 | |
c68296f8 | 2178 | void |
c014a02e | 2179 | scm_done_malloc (long size) |
c68296f8 | 2180 | { |
1be6b49c ML |
2181 | if (size < 0) { |
2182 | if (scm_mallocated < size) | |
2183 | /* The byte count of allocated objects has underflowed. This is | |
2184 | probably because you forgot to report the sizes of objects you | |
2185 | have allocated, by calling scm_done_malloc or some such. When | |
2186 | the GC freed them, it subtracted their size from | |
2187 | scm_mallocated, which underflowed. */ | |
2188 | abort (); | |
2189 | } else { | |
c014a02e | 2190 | unsigned long nm = scm_mallocated + size; |
1be6b49c ML |
2191 | if (nm < size) |
2192 | /* The byte count of allocated objects has overflowed. This is | |
2193 | probably because you forgot to report the correct size of freed | |
2194 | memory in some of your smob free methods. */ | |
2195 | abort (); | |
2196 | } | |
eae33935 | 2197 | |
c68296f8 MV |
2198 | scm_mallocated += size; |
2199 | ||
2200 | if (scm_mallocated > scm_mtrigger) | |
2201 | { | |
2202 | scm_igc ("foreign mallocs"); | |
2203 | if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) | |
2204 | { | |
2205 | if (scm_mallocated > scm_mtrigger) | |
2206 | scm_mtrigger = scm_mallocated + scm_mallocated / 2; | |
2207 | else | |
2208 | scm_mtrigger += scm_mtrigger / 2; | |
2209 | } | |
2210 | } | |
2211 | } | |
2212 | ||
9d47a1e6 | 2213 | void |
c014a02e | 2214 | scm_done_free (long size) |
9d47a1e6 | 2215 | { |
1be6b49c ML |
2216 | if (size >= 0) { |
2217 | if (scm_mallocated < size) | |
2218 | /* The byte count of allocated objects has underflowed. This is | |
2219 | probably because you forgot to report the sizes of objects you | |
2220 | have allocated, by calling scm_done_malloc or some such. When | |
2221 | the GC freed them, it subtracted their size from | |
2222 | scm_mallocated, which underflowed. */ | |
2223 | abort (); | |
2224 | } else { | |
5e7248f2 | 2225 | unsigned long nm = scm_mallocated - size; |
1be6b49c ML |
2226 | if (nm < size) |
2227 | /* The byte count of allocated objects has overflowed. This is | |
2228 | probably because you forgot to report the correct size of freed | |
2229 | memory in some of your smob free methods. */ | |
2230 | abort (); | |
2231 | } | |
2232 | ||
9d47a1e6 ML |
2233 | scm_mallocated -= size; |
2234 | } | |
2235 | ||
c68296f8 MV |
2236 | |
2237 | \f | |
0f2d19dd JB |
2238 | /* {Heap Segments} |
2239 | * | |
2240 | * Each heap segment is an array of objects of a particular size. | |
2241 | * Every segment has an associated (possibly shared) freelist. | |
2242 | * A table of segment records is kept that records the upper and | |
2243 | * lower extents of the segment; this is used during the conservative | |
2244 | * phase of gc to identify probably gc roots (because they point | |
c68296f8 | 2245 | * into valid segments at reasonable offsets). */ |
0f2d19dd JB |
2246 | |
2247 | /* scm_expmem | |
2248 | * is true if the first segment was smaller than INIT_HEAP_SEG. | |
2249 | * If scm_expmem is set to one, subsequent segment allocations will | |
2250 | * allocate segments of size SCM_EXPHEAP(scm_heap_size). | |
2251 | */ | |
2252 | int scm_expmem = 0; | |
2253 | ||
1be6b49c | 2254 | size_t scm_max_segment_size; |
4c48ba06 | 2255 | |
0f2d19dd JB |
2256 | /* scm_heap_org |
2257 | * is the lowest base address of any heap segment. | |
2258 | */ | |
2259 | SCM_CELLPTR scm_heap_org; | |
2260 | ||
92c2555f | 2261 | scm_t_heap_seg_data * scm_heap_table = 0; |
1be6b49c ML |
2262 | static size_t heap_segment_table_size = 0; |
2263 | size_t scm_n_heap_segs = 0; | |
0f2d19dd | 2264 | |
0f2d19dd | 2265 | /* init_heap_seg |
d6884e63 | 2266 | * initializes a new heap segment and returns the number of objects it contains. |
0f2d19dd | 2267 | * |
d6884e63 ML |
2268 | * The segment origin and segment size in bytes are input parameters. |
2269 | * The freelist is both input and output. | |
0f2d19dd | 2270 | * |
d6884e63 ML |
2271 | * This function presumes that the scm_heap_table has already been expanded |
2272 | * to accomodate a new segment record and that the markbit space was reserved | |
2273 | * for all the cards in this segment. | |
0f2d19dd JB |
2274 | */ |
2275 | ||
d6884e63 ML |
2276 | #define INIT_CARD(card, span) \ |
2277 | do { \ | |
322ec19d | 2278 | SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \ |
d6884e63 ML |
2279 | if ((span) == 2) \ |
2280 | SCM_GC_SET_CARD_DOUBLECELL (card); \ | |
2281 | } while (0) | |
0f2d19dd | 2282 | |
1be6b49c | 2283 | static size_t |
92c2555f | 2284 | init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist) |
0f2d19dd JB |
2285 | { |
2286 | register SCM_CELLPTR ptr; | |
0f2d19dd | 2287 | SCM_CELLPTR seg_end; |
592996c9 | 2288 | size_t new_seg_index; |
1be6b49c | 2289 | ptrdiff_t n_new_cells; |
4c48ba06 | 2290 | int span = freelist->span; |
a00c95d9 | 2291 | |
0f2d19dd JB |
2292 | if (seg_org == NULL) |
2293 | return 0; | |
2294 | ||
d6884e63 ML |
2295 | /* Align the begin ptr up. |
2296 | */ | |
2297 | ptr = SCM_GC_CARD_UP (seg_org); | |
acb0a19c | 2298 | |
a00c95d9 | 2299 | /* Compute the ceiling on valid object pointers w/in this segment. |
0f2d19dd | 2300 | */ |
d6884e63 | 2301 | seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size); |
0f2d19dd | 2302 | |
a00c95d9 | 2303 | /* Find the right place and insert the segment record. |
0f2d19dd | 2304 | */ |
592996c9 | 2305 | new_seg_index = 0; |
eae33935 | 2306 | while (new_seg_index < scm_n_heap_segs |
592996c9 DH |
2307 | && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org)) |
2308 | new_seg_index++; | |
0f2d19dd JB |
2309 | |
2310 | { | |
2311 | int i; | |
2312 | for (i = scm_n_heap_segs; i > new_seg_index; --i) | |
2313 | scm_heap_table[i] = scm_heap_table[i - 1]; | |
2314 | } | |
a00c95d9 | 2315 | |
0f2d19dd JB |
2316 | ++scm_n_heap_segs; |
2317 | ||
945fec60 | 2318 | scm_heap_table[new_seg_index].span = span; |
4c48ba06 | 2319 | scm_heap_table[new_seg_index].freelist = freelist; |
195e6201 DH |
2320 | scm_heap_table[new_seg_index].bounds[0] = ptr; |
2321 | scm_heap_table[new_seg_index].bounds[1] = seg_end; | |
0f2d19dd | 2322 | |
acb0a19c MD |
2323 | /*n_new_cells*/ |
2324 | n_new_cells = seg_end - ptr; | |
0f2d19dd | 2325 | |
4c48ba06 | 2326 | freelist->heap_size += n_new_cells; |
4a4c9785 | 2327 | |
a00c95d9 | 2328 | /* Partition objects in this segment into clusters */ |
4a4c9785 MD |
2329 | { |
2330 | SCM clusters; | |
2331 | SCM *clusterp = &clusters; | |
4a4c9785 | 2332 | |
d6884e63 ML |
2333 | NEXT_DATA_CELL (ptr, span); |
2334 | while (ptr < seg_end) | |
4a4c9785 | 2335 | { |
d6884e63 ML |
2336 | scm_cell *nxt = ptr; |
2337 | scm_cell *prv = NULL; | |
2338 | scm_cell *last_card = NULL; | |
2339 | int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1; | |
2340 | NEXT_DATA_CELL(nxt, span); | |
4a4c9785 | 2341 | |
4c48ba06 MD |
2342 | /* Allocate cluster spine |
2343 | */ | |
4a4c9785 | 2344 | *clusterp = PTR2SCM (ptr); |
d6884e63 | 2345 | SCM_SETCAR (*clusterp, PTR2SCM (nxt)); |
4a4c9785 | 2346 | clusterp = SCM_CDRLOC (*clusterp); |
d6884e63 | 2347 | ptr = nxt; |
a00c95d9 | 2348 | |
d6884e63 | 2349 | while (n_data_cells--) |
4a4c9785 | 2350 | { |
d6884e63 | 2351 | scm_cell *card = SCM_GC_CELL_CARD (ptr); |
96f6f4ae | 2352 | SCM scmptr = PTR2SCM (ptr); |
d6884e63 ML |
2353 | nxt = ptr; |
2354 | NEXT_DATA_CELL (nxt, span); | |
2355 | prv = ptr; | |
2356 | ||
2357 | if (card != last_card) | |
2358 | { | |
2359 | INIT_CARD (card, span); | |
2360 | last_card = card; | |
2361 | } | |
96f6f4ae | 2362 | |
54778cd3 | 2363 | SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell); |
22a52da1 | 2364 | SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt)); |
d6884e63 ML |
2365 | |
2366 | ptr = nxt; | |
4a4c9785 | 2367 | } |
4c48ba06 | 2368 | |
d6884e63 | 2369 | SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL); |
4a4c9785 | 2370 | } |
a00c95d9 | 2371 | |
d6884e63 ML |
2372 | /* sanity check */ |
2373 | { | |
2374 | scm_cell *ref = seg_end; | |
2375 | NEXT_DATA_CELL (ref, span); | |
2376 | if (ref != ptr) | |
2377 | /* [cmm] looks like the segment size doesn't divide cleanly by | |
2378 | cluster size. bad cmm! */ | |
2379 | abort(); | |
2380 | } | |
2381 | ||
4a4c9785 MD |
2382 | /* Patch up the last cluster pointer in the segment |
2383 | * to join it to the input freelist. | |
2384 | */ | |
4c48ba06 MD |
2385 | *clusterp = freelist->clusters; |
2386 | freelist->clusters = clusters; | |
4a4c9785 MD |
2387 | } |
2388 | ||
4c48ba06 MD |
2389 | #ifdef DEBUGINFO |
2390 | fprintf (stderr, "H"); | |
2391 | #endif | |
0f2d19dd | 2392 | return size; |
0f2d19dd JB |
2393 | } |
2394 | ||
1be6b49c | 2395 | static size_t |
92c2555f | 2396 | round_to_cluster_size (scm_t_freelist *freelist, size_t len) |
a00c95d9 | 2397 | { |
1be6b49c | 2398 | size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist); |
a00c95d9 ML |
2399 | |
2400 | return | |
2401 | (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes | |
2402 | + ALIGNMENT_SLACK (freelist); | |
2403 | } | |
2404 | ||
a00c95d9 | 2405 | static void |
92c2555f | 2406 | alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy) |
acf4331f | 2407 | #define FUNC_NAME "alloc_some_heap" |
0f2d19dd | 2408 | { |
0f2d19dd | 2409 | SCM_CELLPTR ptr; |
1be6b49c | 2410 | size_t len; |
a00c95d9 | 2411 | |
9d47a1e6 | 2412 | if (scm_gc_heap_lock) |
b6efc951 DH |
2413 | { |
2414 | /* Critical code sections (such as the garbage collector) aren't | |
2415 | * supposed to add heap segments. | |
2416 | */ | |
2417 | fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n"); | |
2418 | abort (); | |
2419 | } | |
0f2d19dd | 2420 | |
9d47a1e6 | 2421 | if (scm_n_heap_segs == heap_segment_table_size) |
b6efc951 DH |
2422 | { |
2423 | /* We have to expand the heap segment table to have room for the new | |
2424 | * segment. Do not yet increment scm_n_heap_segs -- that is done by | |
2425 | * init_heap_seg only if the allocation of the segment itself succeeds. | |
2426 | */ | |
1be6b49c | 2427 | size_t new_table_size = scm_n_heap_segs + 1; |
92c2555f MV |
2428 | size_t size = new_table_size * sizeof (scm_t_heap_seg_data); |
2429 | scm_t_heap_seg_data *new_heap_table; | |
b6efc951 | 2430 | |
92c2555f | 2431 | SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *) |
b6efc951 DH |
2432 | realloc ((char *)scm_heap_table, size))); |
2433 | if (!new_heap_table) | |
2434 | { | |
2435 | if (error_policy == abort_on_error) | |
2436 | { | |
2437 | fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n"); | |
2438 | abort (); | |
2439 | } | |
2440 | else | |
2441 | { | |
2442 | return; | |
2443 | } | |
2444 | } | |
2445 | else | |
2446 | { | |
2447 | scm_heap_table = new_heap_table; | |
2448 | heap_segment_table_size = new_table_size; | |
2449 | } | |
2450 | } | |
0f2d19dd | 2451 | |
0f2d19dd | 2452 | /* Pick a size for the new heap segment. |
a00c95d9 | 2453 | * The rule for picking the size of a segment is explained in |
0f2d19dd JB |
2454 | * gc.h |
2455 | */ | |
4c48ba06 | 2456 | { |
1811ebce MD |
2457 | /* Assure that the new segment is predicted to be large enough. |
2458 | * | |
2459 | * New yield should at least equal GC fraction of new heap size, i.e. | |
2460 | * | |
2461 | * y + dh > f * (h + dh) | |
2462 | * | |
2463 | * y : yield | |
8fef55a8 | 2464 | * f : min yield fraction |
1811ebce MD |
2465 | * h : heap size |
2466 | * dh : size of new heap segment | |
2467 | * | |
2468 | * This gives dh > (f * h - y) / (1 - f) | |
bda1446c | 2469 | */ |
8fef55a8 | 2470 | int f = freelist->min_yield_fraction; |
c014a02e | 2471 | unsigned long h = SCM_HEAP_SIZE; |
1be6b49c | 2472 | size_t min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f); |
4c48ba06 MD |
2473 | len = SCM_EXPHEAP (freelist->heap_size); |
2474 | #ifdef DEBUGINFO | |
1be6b49c | 2475 | fprintf (stderr, "(%ld < %ld)", (long) len, (long) min_cells); |
4c48ba06 MD |
2476 | #endif |
2477 | if (len < min_cells) | |
1811ebce | 2478 | len = min_cells + freelist->cluster_size; |
4c48ba06 | 2479 | len *= sizeof (scm_cell); |
1811ebce MD |
2480 | /* force new sampling */ |
2481 | freelist->collected = LONG_MAX; | |
4c48ba06 | 2482 | } |
a00c95d9 | 2483 | |
4c48ba06 MD |
2484 | if (len > scm_max_segment_size) |
2485 | len = scm_max_segment_size; | |
0f2d19dd JB |
2486 | |
2487 | { | |
1be6b49c | 2488 | size_t smallest; |
0f2d19dd | 2489 | |
a00c95d9 | 2490 | smallest = CLUSTER_SIZE_IN_BYTES (freelist); |
a00c95d9 | 2491 | |
0f2d19dd | 2492 | if (len < smallest) |
a00c95d9 | 2493 | len = smallest; |
0f2d19dd JB |
2494 | |
2495 | /* Allocate with decaying ambition. */ | |
2496 | while ((len >= SCM_MIN_HEAP_SEG_SIZE) | |
2497 | && (len >= smallest)) | |
2498 | { | |
1be6b49c | 2499 | size_t rounded_len = round_to_cluster_size (freelist, len); |
a00c95d9 | 2500 | SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len)); |
0f2d19dd JB |
2501 | if (ptr) |
2502 | { | |
a00c95d9 | 2503 | init_heap_seg (ptr, rounded_len, freelist); |
0f2d19dd JB |
2504 | return; |
2505 | } | |
2506 | len /= 2; | |
2507 | } | |
2508 | } | |
2509 | ||
b6efc951 DH |
2510 | if (error_policy == abort_on_error) |
2511 | { | |
2512 | fprintf (stderr, "alloc_some_heap: Could not grow heap.\n"); | |
2513 | abort (); | |
2514 | } | |
0f2d19dd | 2515 | } |
acf4331f | 2516 | #undef FUNC_NAME |
0f2d19dd | 2517 | |
0f2d19dd JB |
2518 | \f |
2519 | /* {GC Protection Helper Functions} | |
2520 | */ | |
2521 | ||
2522 | ||
5d2b97cd DH |
2523 | /* |
2524 | * If within a function you need to protect one or more scheme objects from | |
2525 | * garbage collection, pass them as parameters to one of the | |
2526 | * scm_remember_upto_here* functions below. These functions don't do | |
2527 | * anything, but since the compiler does not know that they are actually | |
2528 | * no-ops, it will generate code that calls these functions with the given | |
2529 | * parameters. Therefore, you can be sure that the compiler will keep those | |
2530 | * scheme values alive (on the stack or in a register) up to the point where | |
2531 | * scm_remember_upto_here* is called. In other words, place the call to | |
592996c9 | 2532 | * scm_remember_upto_here* _behind_ the last code in your function, that |
5d2b97cd DH |
2533 | * depends on the scheme object to exist. |
2534 | * | |
8c494e99 DH |
2535 | * Example: We want to make sure that the string object str does not get |
2536 | * garbage collected during the execution of 'some_function' in the code | |
2537 | * below, because otherwise the characters belonging to str would be freed and | |
5d2b97cd DH |
2538 | * 'some_function' might access freed memory. To make sure that the compiler |
2539 | * keeps str alive on the stack or in a register such that it is visible to | |
2540 | * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the | |
2541 | * call to 'some_function'. Note that this would not be necessary if str was | |
2542 | * used anyway after the call to 'some_function'. | |
2543 | * char *chars = SCM_STRING_CHARS (str); | |
2544 | * some_function (chars); | |
2545 | * scm_remember_upto_here_1 (str); // str will be alive up to this point. | |
2546 | */ | |
2547 | ||
2548 | void | |
e81d98ec | 2549 | scm_remember_upto_here_1 (SCM obj SCM_UNUSED) |
5d2b97cd DH |
2550 | { |
2551 | /* Empty. Protects a single object from garbage collection. */ | |
2552 | } | |
2553 | ||
2554 | void | |
e81d98ec | 2555 | scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED) |
5d2b97cd DH |
2556 | { |
2557 | /* Empty. Protects two objects from garbage collection. */ | |
2558 | } | |
2559 | ||
2560 | void | |
e81d98ec | 2561 | scm_remember_upto_here (SCM obj SCM_UNUSED, ...) |
5d2b97cd DH |
2562 | { |
2563 | /* Empty. Protects any number of objects from garbage collection. */ | |
2564 | } | |
2565 | ||
c209c88e | 2566 | /* |
41b0806d GB |
2567 | These crazy functions prevent garbage collection |
2568 | of arguments after the first argument by | |
2569 | ensuring they remain live throughout the | |
2570 | function because they are used in the last | |
2571 | line of the code block. | |
2572 | It'd be better to have a nice compiler hint to | |
2573 | aid the conservative stack-scanning GC. --03/09/00 gjb */ | |
0f2d19dd JB |
2574 | SCM |
2575 | scm_return_first (SCM elt, ...) | |
0f2d19dd JB |
2576 | { |
2577 | return elt; | |
2578 | } | |
2579 | ||
41b0806d GB |
2580 | int |
2581 | scm_return_first_int (int i, ...) | |
2582 | { | |
2583 | return i; | |
2584 | } | |
2585 | ||
0f2d19dd | 2586 | |
0f2d19dd | 2587 | SCM |
6e8d25a6 | 2588 | scm_permanent_object (SCM obj) |
0f2d19dd JB |
2589 | { |
2590 | SCM_REDEFER_INTS; | |
2591 | scm_permobjs = scm_cons (obj, scm_permobjs); | |
2592 | SCM_REALLOW_INTS; | |
2593 | return obj; | |
2594 | } | |
2595 | ||
2596 | ||
7bd4fbe2 MD |
2597 | /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all |
2598 | other references are dropped, until the object is unprotected by calling | |
6b1b030e | 2599 | scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest, |
7bd4fbe2 MD |
2600 | i. e. it is possible to protect the same object several times, but it is |
2601 | necessary to unprotect the object the same number of times to actually get | |
2602 | the object unprotected. It is an error to unprotect an object more often | |
2603 | than it has been protected before. The function scm_protect_object returns | |
2604 | OBJ. | |
2605 | */ | |
2606 | ||
2607 | /* Implementation note: For every object X, there is a counter which | |
6b1b030e | 2608 | scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements. |
7bd4fbe2 | 2609 | */ |
686765af | 2610 | |
ef290276 | 2611 | SCM |
6b1b030e | 2612 | scm_gc_protect_object (SCM obj) |
ef290276 | 2613 | { |
686765af | 2614 | SCM handle; |
9d47a1e6 | 2615 | |
686765af | 2616 | /* This critical section barrier will be replaced by a mutex. */ |
2dd6a83a | 2617 | SCM_REDEFER_INTS; |
9d47a1e6 | 2618 | |
0f0f0899 | 2619 | handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0)); |
1be6b49c | 2620 | SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1))); |
9d47a1e6 | 2621 | |
2dd6a83a | 2622 | SCM_REALLOW_INTS; |
9d47a1e6 | 2623 | |
ef290276 JB |
2624 | return obj; |
2625 | } | |
2626 | ||
2627 | ||
2628 | /* Remove any protection for OBJ established by a prior call to | |
dab7f566 | 2629 | scm_protect_object. This function returns OBJ. |
ef290276 | 2630 | |
dab7f566 | 2631 | See scm_protect_object for more information. */ |
ef290276 | 2632 | SCM |
6b1b030e | 2633 | scm_gc_unprotect_object (SCM obj) |
ef290276 | 2634 | { |
686765af | 2635 | SCM handle; |
9d47a1e6 | 2636 | |
686765af | 2637 | /* This critical section barrier will be replaced by a mutex. */ |
2dd6a83a | 2638 | SCM_REDEFER_INTS; |
9d47a1e6 | 2639 | |
686765af | 2640 | handle = scm_hashq_get_handle (scm_protects, obj); |
9d47a1e6 | 2641 | |
22a52da1 | 2642 | if (SCM_FALSEP (handle)) |
686765af | 2643 | { |
0f0f0899 MD |
2644 | fprintf (stderr, "scm_unprotect_object called on unprotected object\n"); |
2645 | abort (); | |
686765af | 2646 | } |
6a199940 DH |
2647 | else |
2648 | { | |
1be6b49c ML |
2649 | SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1)); |
2650 | if (SCM_EQ_P (count, SCM_MAKINUM (0))) | |
6a199940 DH |
2651 | scm_hashq_remove_x (scm_protects, obj); |
2652 | else | |
1be6b49c | 2653 | SCM_SETCDR (handle, count); |
6a199940 | 2654 | } |
686765af | 2655 | |
2dd6a83a | 2656 | SCM_REALLOW_INTS; |
ef290276 JB |
2657 | |
2658 | return obj; | |
2659 | } | |
2660 | ||
6b1b030e ML |
2661 | void |
2662 | scm_gc_register_root (SCM *p) | |
2663 | { | |
2664 | SCM handle; | |
2665 | SCM key = scm_long2num ((long) p); | |
eae33935 | 2666 | |
6b1b030e ML |
2667 | /* This critical section barrier will be replaced by a mutex. */ |
2668 | SCM_REDEFER_INTS; | |
2669 | ||
2670 | handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0)); | |
2671 | SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1))); | |
2672 | ||
2673 | SCM_REALLOW_INTS; | |
2674 | } | |
2675 | ||
2676 | void | |
2677 | scm_gc_unregister_root (SCM *p) | |
2678 | { | |
2679 | SCM handle; | |
2680 | SCM key = scm_long2num ((long) p); | |
2681 | ||
2682 | /* This critical section barrier will be replaced by a mutex. */ | |
2683 | SCM_REDEFER_INTS; | |
2684 | ||
2685 | handle = scm_hashv_get_handle (scm_gc_registered_roots, key); | |
2686 | ||
2687 | if (SCM_FALSEP (handle)) | |
2688 | { | |
2689 | fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n"); | |
2690 | abort (); | |
2691 | } | |
2692 | else | |
2693 | { | |
2694 | SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1)); | |
2695 | if (SCM_EQ_P (count, SCM_MAKINUM (0))) | |
2696 | scm_hashv_remove_x (scm_gc_registered_roots, key); | |
2697 | else | |
2698 | SCM_SETCDR (handle, count); | |
2699 | } | |
2700 | ||
2701 | SCM_REALLOW_INTS; | |
2702 | } | |
2703 | ||
2704 | void | |
2705 | scm_gc_register_roots (SCM *b, unsigned long n) | |
2706 | { | |
2707 | SCM *p = b; | |
2708 | for (; p < b + n; ++p) | |
2709 | scm_gc_register_root (p); | |
2710 | } | |
2711 | ||
2712 | void | |
2713 | scm_gc_unregister_roots (SCM *b, unsigned long n) | |
2714 | { | |
2715 | SCM *p = b; | |
2716 | for (; p < b + n; ++p) | |
2717 | scm_gc_unregister_root (p); | |
2718 | } | |
2719 | ||
c45acc34 JB |
2720 | int terminating; |
2721 | ||
2722 | /* called on process termination. */ | |
e52ceaac MD |
2723 | #ifdef HAVE_ATEXIT |
2724 | static void | |
2725 | cleanup (void) | |
2726 | #else | |
2727 | #ifdef HAVE_ON_EXIT | |
51157deb MD |
2728 | extern int on_exit (void (*procp) (), int arg); |
2729 | ||
e52ceaac MD |
2730 | static void |
2731 | cleanup (int status, void *arg) | |
2732 | #else | |
2733 | #error Dont know how to setup a cleanup handler on your system. | |
2734 | #endif | |
2735 | #endif | |
c45acc34 JB |
2736 | { |
2737 | terminating = 1; | |
2738 | scm_flush_all_ports (); | |
2739 | } | |
ef290276 | 2740 | |
0f2d19dd | 2741 | \f |
acb0a19c | 2742 | static int |
92c2555f | 2743 | make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist) |
acb0a19c | 2744 | { |
1be6b49c | 2745 | size_t rounded_size = round_to_cluster_size (freelist, init_heap_size); |
d6884e63 | 2746 | |
a00c95d9 ML |
2747 | if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size), |
2748 | rounded_size, | |
4c48ba06 | 2749 | freelist)) |
acb0a19c | 2750 | { |
a00c95d9 ML |
2751 | rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE); |
2752 | if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size), | |
2753 | rounded_size, | |
4c48ba06 | 2754 | freelist)) |
acb0a19c MD |
2755 | return 1; |
2756 | } | |
2757 | else | |
2758 | scm_expmem = 1; | |
2759 | ||
8fef55a8 MD |
2760 | if (freelist->min_yield_fraction) |
2761 | freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction | |
b37fe1c5 | 2762 | / 100); |
8fef55a8 | 2763 | freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield); |
a00c95d9 | 2764 | |
acb0a19c MD |
2765 | return 0; |
2766 | } | |
2767 | ||
2768 | \f | |
4c48ba06 | 2769 | static void |
92c2555f | 2770 | init_freelist (scm_t_freelist *freelist, |
4c48ba06 | 2771 | int span, |
c014a02e | 2772 | long cluster_size, |
8fef55a8 | 2773 | int min_yield) |
4c48ba06 MD |
2774 | { |
2775 | freelist->clusters = SCM_EOL; | |
2776 | freelist->cluster_size = cluster_size + 1; | |
b37fe1c5 MD |
2777 | freelist->left_to_collect = 0; |
2778 | freelist->clusters_allocated = 0; | |
8fef55a8 MD |
2779 | freelist->min_yield = 0; |
2780 | freelist->min_yield_fraction = min_yield; | |
4c48ba06 MD |
2781 | freelist->span = span; |
2782 | freelist->collected = 0; | |
1811ebce | 2783 | freelist->collected_1 = 0; |
4c48ba06 MD |
2784 | freelist->heap_size = 0; |
2785 | } | |
2786 | ||
85db4a2c DH |
2787 | |
2788 | /* Get an integer from an environment variable. */ | |
2789 | static int | |
2790 | scm_i_getenv_int (const char *var, int def) | |
2791 | { | |
2792 | char *end, *val = getenv (var); | |
2793 | long res; | |
2794 | if (!val) | |
2795 | return def; | |
2796 | res = strtol (val, &end, 10); | |
2797 | if (end == val) | |
2798 | return def; | |
2799 | return res; | |
2800 | } | |
2801 | ||
2802 | ||
4a4c9785 | 2803 | int |
85db4a2c | 2804 | scm_init_storage () |
0f2d19dd | 2805 | { |
1be6b49c ML |
2806 | unsigned long gc_trigger_1; |
2807 | unsigned long gc_trigger_2; | |
2808 | size_t init_heap_size_1; | |
2809 | size_t init_heap_size_2; | |
2810 | size_t j; | |
0f2d19dd JB |
2811 | |
2812 | j = SCM_NUM_PROTECTS; | |
2813 | while (j) | |
2814 | scm_sys_protects[--j] = SCM_BOOL_F; | |
2815 | scm_block_gc = 1; | |
4a4c9785 | 2816 | |
4a4c9785 | 2817 | scm_freelist = SCM_EOL; |
4c48ba06 | 2818 | scm_freelist2 = SCM_EOL; |
85db4a2c DH |
2819 | gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1); |
2820 | init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1); | |
2821 | gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2); | |
2822 | init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2); | |
2823 | scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size); | |
4a4c9785 | 2824 | |
0f2d19dd JB |
2825 | scm_expmem = 0; |
2826 | ||
2827 | j = SCM_HEAP_SEG_SIZE; | |
2828 | scm_mtrigger = SCM_INIT_MALLOC_LIMIT; | |
92c2555f | 2829 | scm_heap_table = ((scm_t_heap_seg_data *) |
4c9419ac | 2830 | scm_malloc (sizeof (scm_t_heap_seg_data) * 2)); |
b6efc951 | 2831 | heap_segment_table_size = 2; |
acb0a19c | 2832 | |
d6884e63 ML |
2833 | mark_space_ptr = &mark_space_head; |
2834 | ||
85db4a2c DH |
2835 | init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1); |
2836 | init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2); | |
4c48ba06 MD |
2837 | if (make_initial_segment (init_heap_size_1, &scm_master_freelist) || |
2838 | make_initial_segment (init_heap_size_2, &scm_master_freelist2)) | |
4a4c9785 | 2839 | return 1; |
acb0a19c | 2840 | |
801cb5e7 | 2841 | /* scm_hplims[0] can change. do not remove scm_heap_org */ |
a00c95d9 | 2842 | scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1); |
acb0a19c | 2843 | |
801cb5e7 MD |
2844 | scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL); |
2845 | scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL); | |
2846 | scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL); | |
2847 | scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL); | |
2848 | scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL); | |
0f2d19dd JB |
2849 | |
2850 | /* Initialise the list of ports. */ | |
f5fd8aa2 MV |
2851 | scm_port_table = (scm_t_port **) |
2852 | malloc (sizeof (scm_t_port *) * scm_port_table_room); | |
2853 | if (!scm_port_table) | |
0f2d19dd JB |
2854 | return 1; |
2855 | ||
a18bcd0e | 2856 | #ifdef HAVE_ATEXIT |
c45acc34 | 2857 | atexit (cleanup); |
e52ceaac MD |
2858 | #else |
2859 | #ifdef HAVE_ON_EXIT | |
2860 | on_exit (cleanup, 0); | |
2861 | #endif | |
a18bcd0e | 2862 | #endif |
0f2d19dd | 2863 | |
8960e0a0 | 2864 | scm_stand_in_procs = SCM_EOL; |
0f2d19dd | 2865 | scm_permobjs = SCM_EOL; |
00ffa0e7 | 2866 | scm_protects = scm_c_make_hash_table (31); |
6b1b030e | 2867 | scm_gc_registered_roots = scm_c_make_hash_table (31); |
d6884e63 | 2868 | |
0f2d19dd JB |
2869 | return 0; |
2870 | } | |
939794ce | 2871 | |
0f2d19dd JB |
2872 | \f |
2873 | ||
939794ce DH |
2874 | SCM scm_after_gc_hook; |
2875 | ||
939794ce DH |
2876 | static SCM gc_async; |
2877 | ||
939794ce DH |
2878 | /* The function gc_async_thunk causes the execution of the after-gc-hook. It |
2879 | * is run after the gc, as soon as the asynchronous events are handled by the | |
2880 | * evaluator. | |
2881 | */ | |
2882 | static SCM | |
2883 | gc_async_thunk (void) | |
2884 | { | |
2885 | scm_c_run_hook (scm_after_gc_hook, SCM_EOL); | |
939794ce DH |
2886 | return SCM_UNSPECIFIED; |
2887 | } | |
2888 | ||
2889 | ||
2890 | /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of | |
2891 | * the garbage collection. The only purpose of this function is to mark the | |
2892 | * gc_async (which will eventually lead to the execution of the | |
2893 | * gc_async_thunk). | |
2894 | */ | |
2895 | static void * | |
e81d98ec DH |
2896 | mark_gc_async (void * hook_data SCM_UNUSED, |
2897 | void *func_data SCM_UNUSED, | |
2898 | void *data SCM_UNUSED) | |
2899 | { | |
2900 | /* If cell access debugging is enabled, the user may choose to perform | |
2901 | * additional garbage collections after an arbitrary number of cell | |
2902 | * accesses. We don't want the scheme level after-gc-hook to be performed | |
2903 | * for each of these garbage collections for the following reason: The | |
2904 | * execution of the after-gc-hook causes cell accesses itself. Thus, if the | |
2905 | * after-gc-hook was performed with every gc, and if the gc was performed | |
2906 | * after a very small number of cell accesses, then the number of cell | |
2907 | * accesses during the execution of the after-gc-hook will suffice to cause | |
2908 | * the execution of the next gc. Then, guile would keep executing the | |
2909 | * after-gc-hook over and over again, and would never come to do other | |
2910 | * things. | |
eae33935 | 2911 | * |
e81d98ec DH |
2912 | * To overcome this problem, if cell access debugging with additional |
2913 | * garbage collections is enabled, the after-gc-hook is never run by the | |
2914 | * garbage collecter. When running guile with cell access debugging and the | |
2915 | * execution of the after-gc-hook is desired, then it is necessary to run | |
2916 | * the hook explicitly from the user code. This has the effect, that from | |
2917 | * the scheme level point of view it seems that garbage collection is | |
2918 | * performed with a much lower frequency than it actually is. Obviously, | |
2919 | * this will not work for code that depends on a fixed one to one | |
2920 | * relationship between the execution counts of the C level garbage | |
2921 | * collection hooks and the execution count of the scheme level | |
2922 | * after-gc-hook. | |
2923 | */ | |
2924 | #if (SCM_DEBUG_CELL_ACCESSES == 1) | |
2925 | if (debug_cells_gc_interval == 0) | |
2926 | scm_system_async_mark (gc_async); | |
2927 | #else | |
939794ce | 2928 | scm_system_async_mark (gc_async); |
e81d98ec DH |
2929 | #endif |
2930 | ||
939794ce DH |
2931 | return NULL; |
2932 | } | |
2933 | ||
d678e25c MV |
2934 | #if SCM_ENABLE_DEPRECATED == 1 |
2935 | ||
2936 | /* If an allocated cell is detected during garbage collection, this | |
2937 | * means that some code has just obtained the object but was preempted | |
2938 | * before the initialization of the object was completed. This meanst | |
2939 | * that some entries of the allocated cell may already contain SCM | |
2940 | * objects. Therefore, allocated cells are scanned conservatively. | |
2941 | */ | |
2942 | ||
2943 | scm_t_bits scm_tc16_allocated; | |
2944 | ||
2945 | static SCM | |
2946 | allocated_mark (SCM cell) | |
2947 | { | |
2948 | unsigned long int cell_segment = heap_segment (cell); | |
2949 | unsigned int span = scm_heap_table[cell_segment].span; | |
2950 | unsigned int i; | |
2951 | ||
2952 | for (i = 1; i != span * 2; ++i) | |
2953 | { | |
2954 | SCM obj = SCM_CELL_OBJECT (cell, i); | |
2955 | long int obj_segment = heap_segment (obj); | |
2956 | if (obj_segment >= 0) | |
2957 | scm_gc_mark (obj); | |
2958 | } | |
2959 | return SCM_BOOL_F; | |
2960 | } | |
2961 | ||
2962 | SCM | |
2963 | scm_deprecated_newcell (void) | |
2964 | { | |
2965 | scm_c_issue_deprecation_warning | |
2966 | ("SCM_NEWCELL is deprecated. Use `scm_alloc_cell' instead.\n"); | |
2967 | ||
2968 | return scm_alloc_cell (scm_tc16_allocated, 0); | |
2969 | } | |
2970 | ||
2971 | SCM | |
2972 | scm_deprecated_newcell2 (void) | |
2973 | { | |
2974 | scm_c_issue_deprecation_warning | |
2975 | ("SCM_NEWCELL2 is deprecated. Use `scm_alloc_double_cell' instead.\n"); | |
2976 | ||
2977 | return scm_alloc_double_cell (scm_tc16_allocated, 0, 0, 0); | |
2978 | } | |
2979 | ||
2980 | #endif /* SCM_ENABLE_DEPRECATED == 1 */ | |
939794ce | 2981 | |
0f2d19dd JB |
2982 | void |
2983 | scm_init_gc () | |
0f2d19dd | 2984 | { |
939794ce DH |
2985 | SCM after_gc_thunk; |
2986 | ||
d678e25c MV |
2987 | #if SCM_ENABLE_DEPRECATED == 1 |
2988 | scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0); | |
2989 | scm_set_smob_mark (scm_tc16_allocated, allocated_mark); | |
2990 | #endif | |
2991 | ||
fde50407 ML |
2992 | scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0)); |
2993 | scm_c_define ("after-gc-hook", scm_after_gc_hook); | |
939794ce | 2994 | |
9a441ddb MV |
2995 | after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0, |
2996 | gc_async_thunk); | |
23670993 | 2997 | gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */ |
939794ce DH |
2998 | |
2999 | scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0); | |
3000 | ||
8dc9439f | 3001 | #ifndef SCM_MAGIC_SNARFER |
a0599745 | 3002 | #include "libguile/gc.x" |
8dc9439f | 3003 | #endif |
0f2d19dd | 3004 | } |
89e00824 | 3005 | |
56495472 ML |
3006 | #endif /*MARK_DEPENDENCIES*/ |
3007 | ||
89e00824 ML |
3008 | /* |
3009 | Local Variables: | |
3010 | c-file-style: "gnu" | |
3011 | End: | |
3012 | */ |