Commit | Line | Data |
---|---|---|
acb0a19c | 1 | /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc. |
a00c95d9 | 2 | * |
0f2d19dd JB |
3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License as published by | |
5 | * the Free Software Foundation; either version 2, or (at your option) | |
6 | * any later version. | |
a00c95d9 | 7 | * |
0f2d19dd JB |
8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
a00c95d9 | 12 | * |
0f2d19dd JB |
13 | * You should have received a copy of the GNU General Public License |
14 | * along with this software; see the file COPYING. If not, write to | |
82892bed JB |
15 | * the Free Software Foundation, Inc., 59 Temple Place, Suite 330, |
16 | * Boston, MA 02111-1307 USA | |
0f2d19dd JB |
17 | * |
18 | * As a special exception, the Free Software Foundation gives permission | |
19 | * for additional uses of the text contained in its release of GUILE. | |
20 | * | |
21 | * The exception is that, if you link the GUILE library with other files | |
22 | * to produce an executable, this does not by itself cause the | |
23 | * resulting executable to be covered by the GNU General Public License. | |
24 | * Your use of that executable is in no way restricted on account of | |
25 | * linking the GUILE library code into it. | |
26 | * | |
27 | * This exception does not however invalidate any other reasons why | |
28 | * the executable file might be covered by the GNU General Public License. | |
29 | * | |
30 | * This exception applies only to the code released by the | |
31 | * Free Software Foundation under the name GUILE. If you copy | |
32 | * code from other Free Software Foundation releases into a copy of | |
33 | * GUILE, as the General Public License permits, the exception does | |
34 | * not apply to the code that you add in this way. To avoid misleading | |
35 | * anyone as to the status of such modified files, you must delete | |
36 | * this exception notice from them. | |
37 | * | |
38 | * If you write modifications of your own for GUILE, it is your choice | |
39 | * whether to permit this exception to apply to your modifications. | |
82892bed | 40 | * If you do not wish that, delete this exception notice. */ |
1bbd0b84 GB |
41 | |
42 | /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999, | |
43 | gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */ | |
44 | ||
37ddcaf6 MD |
45 | /* #define DEBUGINFO */ |
46 | ||
0f2d19dd JB |
47 | \f |
48 | #include <stdio.h> | |
a0599745 | 49 | #include "libguile/_scm.h" |
0a7a7445 | 50 | #include "libguile/eval.h" |
a0599745 MD |
51 | #include "libguile/stime.h" |
52 | #include "libguile/stackchk.h" | |
53 | #include "libguile/struct.h" | |
a0599745 MD |
54 | #include "libguile/smob.h" |
55 | #include "libguile/unif.h" | |
56 | #include "libguile/async.h" | |
57 | #include "libguile/ports.h" | |
58 | #include "libguile/root.h" | |
59 | #include "libguile/strings.h" | |
60 | #include "libguile/vectors.h" | |
801cb5e7 | 61 | #include "libguile/weaks.h" |
686765af | 62 | #include "libguile/hashtab.h" |
a0599745 MD |
63 | |
64 | #include "libguile/validate.h" | |
65 | #include "libguile/gc.h" | |
fce59c93 | 66 | |
bc9d9bb2 | 67 | #ifdef GUILE_DEBUG_MALLOC |
a0599745 | 68 | #include "libguile/debug-malloc.h" |
bc9d9bb2 MD |
69 | #endif |
70 | ||
0f2d19dd | 71 | #ifdef HAVE_MALLOC_H |
95b88819 | 72 | #include <malloc.h> |
0f2d19dd JB |
73 | #endif |
74 | ||
75 | #ifdef HAVE_UNISTD_H | |
95b88819 | 76 | #include <unistd.h> |
0f2d19dd JB |
77 | #endif |
78 | ||
1cc91f1b JB |
79 | #ifdef __STDC__ |
80 | #include <stdarg.h> | |
81 | #define var_start(x, y) va_start(x, y) | |
82 | #else | |
83 | #include <varargs.h> | |
84 | #define var_start(x, y) va_start(x) | |
85 | #endif | |
86 | ||
0f2d19dd | 87 | \f |
406c7d90 DH |
88 | |
89 | unsigned int scm_gc_running_p = 0; | |
90 | ||
91 | \f | |
92 | ||
93 | #if (SCM_DEBUG_CELL_ACCESSES == 1) | |
94 | ||
95 | unsigned int scm_debug_cell_accesses_p = 0; | |
96 | ||
97 | ||
98 | /* Assert that the given object is a valid reference to a valid cell. This | |
99 | * test involves to determine whether the object is a cell pointer, whether | |
100 | * this pointer actually points into a heap segment and whether the cell | |
101 | * pointed to is not a free cell. | |
102 | */ | |
103 | void | |
104 | scm_assert_cell_valid (SCM cell) | |
105 | { | |
106 | if (scm_debug_cell_accesses_p) | |
107 | { | |
108 | scm_debug_cell_accesses_p = 0; /* disable to avoid recursion */ | |
109 | ||
9d47a1e6 | 110 | if (!scm_cellp (cell)) |
406c7d90 DH |
111 | { |
112 | fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell)); | |
113 | abort (); | |
114 | } | |
115 | else if (!scm_gc_running_p) | |
116 | { | |
117 | /* Dirk::FIXME:: During garbage collection there occur references to | |
118 | free cells. This is allright during conservative marking, but | |
119 | should not happen otherwise (I think). The case of free cells | |
120 | accessed during conservative marking is handled in function | |
121 | scm_mark_locations. However, there still occur accesses to free | |
122 | cells during gc. I don't understand why this happens. If it is | |
123 | a bug and gets fixed, the following test should also work while | |
124 | gc is running. | |
125 | */ | |
126 | if (SCM_FREE_CELL_P (cell)) | |
127 | { | |
128 | fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell)); | |
129 | abort (); | |
130 | } | |
131 | } | |
132 | scm_debug_cell_accesses_p = 1; /* re-enable */ | |
133 | } | |
134 | } | |
135 | ||
136 | ||
137 | SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0, | |
138 | (SCM flag), | |
139 | "If FLAG is #f, cell access checking is disabled.\n" | |
140 | "If FLAG is #t, cell access checking is enabled.\n" | |
141 | "This procedure only exists because the compile-time flag\n" | |
142 | "SCM_DEBUG_CELL_ACCESSES was set to 1.\n") | |
143 | #define FUNC_NAME s_scm_set_debug_cell_accesses_x | |
144 | { | |
145 | if (SCM_FALSEP (flag)) { | |
146 | scm_debug_cell_accesses_p = 0; | |
147 | } else if (SCM_EQ_P (flag, SCM_BOOL_T)) { | |
148 | scm_debug_cell_accesses_p = 1; | |
149 | } else { | |
150 | SCM_WRONG_TYPE_ARG (1, flag); | |
151 | } | |
152 | return SCM_UNSPECIFIED; | |
153 | } | |
154 | #undef FUNC_NAME | |
155 | ||
156 | #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */ | |
157 | ||
158 | \f | |
159 | ||
0f2d19dd | 160 | /* {heap tuning parameters} |
a00c95d9 | 161 | * |
0f2d19dd JB |
162 | * These are parameters for controlling memory allocation. The heap |
163 | * is the area out of which scm_cons, and object headers are allocated. | |
164 | * | |
165 | * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a | |
166 | * 64 bit machine. The units of the _SIZE parameters are bytes. | |
167 | * Cons pairs and object headers occupy one heap cell. | |
168 | * | |
169 | * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is | |
170 | * allocated initially the heap will grow by half its current size | |
171 | * each subsequent time more heap is needed. | |
172 | * | |
173 | * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE | |
174 | * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more | |
175 | * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code | |
176 | * is in scm_init_storage() and alloc_some_heap() in sys.c | |
a00c95d9 | 177 | * |
0f2d19dd JB |
178 | * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by |
179 | * SCM_EXPHEAP(scm_heap_size) when more heap is needed. | |
180 | * | |
181 | * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap | |
182 | * is needed. | |
183 | * | |
184 | * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will | |
a00c95d9 | 185 | * trigger a GC. |
6064dcc6 MV |
186 | * |
187 | * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be | |
188 | * reclaimed by a GC triggered by must_malloc. If less than this is | |
189 | * reclaimed, the trigger threshold is raised. [I don't know what a | |
190 | * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to | |
a00c95d9 | 191 | * work around a oscillation that caused almost constant GC.] |
0f2d19dd JB |
192 | */ |
193 | ||
8fef55a8 MD |
194 | /* |
195 | * Heap size 45000 and 40% min yield gives quick startup and no extra | |
196 | * heap allocation. Having higher values on min yield may lead to | |
197 | * large heaps, especially if code behaviour is varying its | |
198 | * maximum consumption between different freelists. | |
199 | */ | |
aeacfc8f MD |
200 | int scm_default_init_heap_size_1 = (45000L * sizeof (scm_cell)); |
201 | int scm_default_min_yield_1 = 40; | |
4c48ba06 | 202 | #define SCM_CLUSTER_SIZE_1 2000L |
4c48ba06 | 203 | |
aeacfc8f | 204 | int scm_default_init_heap_size_2 = (2500L * 2 * sizeof (scm_cell)); |
4c48ba06 MD |
205 | /* The following value may seem large, but note that if we get to GC at |
206 | * all, this means that we have a numerically intensive application | |
207 | */ | |
aeacfc8f MD |
208 | int scm_default_min_yield_2 = 40; |
209 | #define SCM_CLUSTER_SIZE_2 1000L | |
4c48ba06 | 210 | |
aeacfc8f | 211 | int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */ |
4c48ba06 | 212 | |
945fec60 | 213 | #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell)) |
0f2d19dd JB |
214 | #ifdef _QC |
215 | # define SCM_HEAP_SEG_SIZE 32768L | |
216 | #else | |
217 | # ifdef sequent | |
4c48ba06 | 218 | # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell)) |
0f2d19dd | 219 | # else |
4c48ba06 | 220 | # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell)) |
0f2d19dd JB |
221 | # endif |
222 | #endif | |
4c48ba06 | 223 | /* Make heap grow with factor 1.5 */ |
4a4c9785 | 224 | #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2) |
0f2d19dd | 225 | #define SCM_INIT_MALLOC_LIMIT 100000 |
6064dcc6 | 226 | #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10) |
0f2d19dd JB |
227 | |
228 | /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner | |
229 | bounds for allocated storage */ | |
230 | ||
231 | #ifdef PROT386 | |
232 | /*in 386 protected mode we must only adjust the offset */ | |
a00c95d9 ML |
233 | # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1)) |
234 | # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p)) | |
0f2d19dd JB |
235 | #else |
236 | # ifdef _UNICOS | |
a00c95d9 ML |
237 | # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span))) |
238 | # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p)) | |
0f2d19dd | 239 | # else |
a00c95d9 ML |
240 | # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L)) |
241 | # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p)) | |
0f2d19dd JB |
242 | # endif /* UNICOS */ |
243 | #endif /* PROT386 */ | |
a00c95d9 ML |
244 | #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell)) |
245 | #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1) | |
b37fe1c5 MD |
246 | #define SCM_HEAP_SIZE \ |
247 | (scm_master_freelist.heap_size + scm_master_freelist2.heap_size) | |
1811ebce | 248 | #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B)) |
0f2d19dd JB |
249 | |
250 | ||
251 | \f | |
945fec60 | 252 | /* scm_freelists |
0f2d19dd | 253 | */ |
945fec60 | 254 | |
a00c95d9 ML |
255 | typedef struct scm_freelist_t { |
256 | /* collected cells */ | |
257 | SCM cells; | |
a00c95d9 ML |
258 | /* number of cells left to collect before cluster is full */ |
259 | unsigned int left_to_collect; | |
b37fe1c5 MD |
260 | /* number of clusters which have been allocated */ |
261 | unsigned int clusters_allocated; | |
8fef55a8 MD |
262 | /* a list of freelists, each of size cluster_size, |
263 | * except the last one which may be shorter | |
264 | */ | |
a00c95d9 ML |
265 | SCM clusters; |
266 | SCM *clustertail; | |
b37fe1c5 | 267 | /* this is the number of objects in each cluster, including the spine cell */ |
a00c95d9 | 268 | int cluster_size; |
8fef55a8 | 269 | /* indicates that we should grow heap instead of GC:ing |
a00c95d9 ML |
270 | */ |
271 | int grow_heap_p; | |
8fef55a8 | 272 | /* minimum yield on this list in order not to grow the heap |
a00c95d9 | 273 | */ |
8fef55a8 MD |
274 | long min_yield; |
275 | /* defines min_yield as percent of total heap size | |
a00c95d9 | 276 | */ |
8fef55a8 | 277 | int min_yield_fraction; |
a00c95d9 ML |
278 | /* number of cells per object on this list */ |
279 | int span; | |
280 | /* number of collected cells during last GC */ | |
1811ebce MD |
281 | long collected; |
282 | /* number of collected cells during penultimate GC */ | |
283 | long collected_1; | |
a00c95d9 ML |
284 | /* total number of cells in heap segments |
285 | * belonging to this list. | |
286 | */ | |
1811ebce | 287 | long heap_size; |
a00c95d9 ML |
288 | } scm_freelist_t; |
289 | ||
4a4c9785 MD |
290 | SCM scm_freelist = SCM_EOL; |
291 | scm_freelist_t scm_master_freelist = { | |
b37fe1c5 | 292 | SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0 |
4a4c9785 MD |
293 | }; |
294 | SCM scm_freelist2 = SCM_EOL; | |
295 | scm_freelist_t scm_master_freelist2 = { | |
b37fe1c5 | 296 | SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0 |
4a4c9785 | 297 | }; |
0f2d19dd JB |
298 | |
299 | /* scm_mtrigger | |
300 | * is the number of bytes of must_malloc allocation needed to trigger gc. | |
301 | */ | |
15e9d186 | 302 | unsigned long scm_mtrigger; |
0f2d19dd JB |
303 | |
304 | ||
305 | /* scm_gc_heap_lock | |
306 | * If set, don't expand the heap. Set only during gc, during which no allocation | |
307 | * is supposed to take place anyway. | |
308 | */ | |
309 | int scm_gc_heap_lock = 0; | |
310 | ||
311 | /* GC Blocking | |
312 | * Don't pause for collection if this is set -- just | |
313 | * expand the heap. | |
314 | */ | |
0f2d19dd JB |
315 | int scm_block_gc = 1; |
316 | ||
0f2d19dd JB |
317 | /* During collection, this accumulates objects holding |
318 | * weak references. | |
319 | */ | |
ab4bef85 | 320 | SCM scm_weak_vectors; |
0f2d19dd | 321 | |
7445e0e8 MD |
322 | /* During collection, this accumulates structures which are to be freed. |
323 | */ | |
324 | SCM scm_structs_to_free; | |
325 | ||
0f2d19dd JB |
326 | /* GC Statistics Keeping |
327 | */ | |
328 | unsigned long scm_cells_allocated = 0; | |
a5c314c8 | 329 | long scm_mallocated = 0; |
b37fe1c5 | 330 | unsigned long scm_gc_cells_collected; |
8b0d194f | 331 | unsigned long scm_gc_yield; |
37ddcaf6 | 332 | static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */ |
0f2d19dd JB |
333 | unsigned long scm_gc_malloc_collected; |
334 | unsigned long scm_gc_ports_collected; | |
335 | unsigned long scm_gc_rt; | |
336 | unsigned long scm_gc_time_taken = 0; | |
337 | ||
338 | SCM_SYMBOL (sym_cells_allocated, "cells-allocated"); | |
339 | SCM_SYMBOL (sym_heap_size, "cell-heap-size"); | |
340 | SCM_SYMBOL (sym_mallocated, "bytes-malloced"); | |
341 | SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold"); | |
342 | SCM_SYMBOL (sym_heap_segments, "cell-heap-segments"); | |
343 | SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken"); | |
344 | ||
a00c95d9 | 345 | typedef struct scm_heap_seg_data_t |
0f2d19dd | 346 | { |
cf2d30f6 JB |
347 | /* lower and upper bounds of the segment */ |
348 | SCM_CELLPTR bounds[2]; | |
349 | ||
350 | /* address of the head-of-freelist pointer for this segment's cells. | |
351 | All segments usually point to the same one, scm_freelist. */ | |
4c48ba06 | 352 | scm_freelist_t *freelist; |
cf2d30f6 | 353 | |
fe517a7d | 354 | /* number of cells per object in this segment */ |
945fec60 | 355 | int span; |
a00c95d9 | 356 | } scm_heap_seg_data_t; |
0f2d19dd JB |
357 | |
358 | ||
359 | ||
945fec60 | 360 | static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *); |
b6efc951 DH |
361 | |
362 | typedef enum { return_on_error, abort_on_error } policy_on_error; | |
363 | static void alloc_some_heap (scm_freelist_t *, policy_on_error); | |
0f2d19dd JB |
364 | |
365 | ||
366 | \f | |
cf2d30f6 JB |
367 | /* Debugging functions. */ |
368 | ||
bb2c57fa | 369 | #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) |
cf2d30f6 JB |
370 | |
371 | /* Return the number of the heap segment containing CELL. */ | |
372 | static int | |
373 | which_seg (SCM cell) | |
374 | { | |
375 | int i; | |
376 | ||
377 | for (i = 0; i < scm_n_heap_segs; i++) | |
195e6201 DH |
378 | if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell)) |
379 | && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell))) | |
cf2d30f6 JB |
380 | return i; |
381 | fprintf (stderr, "which_seg: can't find segment containing cell %lx\n", | |
945fec60 | 382 | SCM_UNPACK (cell)); |
cf2d30f6 JB |
383 | abort (); |
384 | } | |
385 | ||
386 | ||
8ded62a3 MD |
387 | static void |
388 | map_free_list (scm_freelist_t *master, SCM freelist) | |
389 | { | |
390 | int last_seg = -1, count = 0; | |
391 | SCM f; | |
a00c95d9 | 392 | |
3f5d82cd | 393 | for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f)) |
8ded62a3 MD |
394 | { |
395 | int this_seg = which_seg (f); | |
396 | ||
397 | if (this_seg != last_seg) | |
398 | { | |
399 | if (last_seg != -1) | |
400 | fprintf (stderr, " %5d %d-cells in segment %d\n", | |
401 | count, master->span, last_seg); | |
402 | last_seg = this_seg; | |
403 | count = 0; | |
404 | } | |
405 | count++; | |
406 | } | |
407 | if (last_seg != -1) | |
408 | fprintf (stderr, " %5d %d-cells in segment %d\n", | |
409 | count, master->span, last_seg); | |
410 | } | |
cf2d30f6 | 411 | |
a00c95d9 | 412 | SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0, |
acb0a19c MD |
413 | (), |
414 | "Print debugging information about the free-list.\n" | |
5384bc5b | 415 | "`map-free-list' is only included in --enable-guile-debug builds of Guile.") |
acb0a19c MD |
416 | #define FUNC_NAME s_scm_map_free_list |
417 | { | |
4c48ba06 MD |
418 | int i; |
419 | fprintf (stderr, "%d segments total (%d:%d", | |
420 | scm_n_heap_segs, | |
421 | scm_heap_table[0].span, | |
422 | scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]); | |
423 | for (i = 1; i < scm_n_heap_segs; i++) | |
424 | fprintf (stderr, ", %d:%d", | |
425 | scm_heap_table[i].span, | |
426 | scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]); | |
427 | fprintf (stderr, ")\n"); | |
8ded62a3 MD |
428 | map_free_list (&scm_master_freelist, scm_freelist); |
429 | map_free_list (&scm_master_freelist2, scm_freelist2); | |
cf2d30f6 JB |
430 | fflush (stderr); |
431 | ||
432 | return SCM_UNSPECIFIED; | |
433 | } | |
1bbd0b84 | 434 | #undef FUNC_NAME |
cf2d30f6 | 435 | |
4c48ba06 MD |
436 | static int last_cluster; |
437 | static int last_size; | |
438 | ||
5384bc5b MD |
439 | static int |
440 | free_list_length (char *title, int i, SCM freelist) | |
441 | { | |
442 | SCM ls; | |
443 | int n = 0; | |
3f5d82cd DH |
444 | for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls)) |
445 | if (SCM_FREE_CELL_P (ls)) | |
5384bc5b MD |
446 | ++n; |
447 | else | |
448 | { | |
449 | fprintf (stderr, "bad cell in %s at position %d\n", title, n); | |
450 | abort (); | |
451 | } | |
4c48ba06 MD |
452 | if (n != last_size) |
453 | { | |
454 | if (i > 0) | |
455 | { | |
456 | if (last_cluster == i - 1) | |
457 | fprintf (stderr, "\t%d\n", last_size); | |
458 | else | |
459 | fprintf (stderr, "-%d\t%d\n", i - 1, last_size); | |
460 | } | |
461 | if (i >= 0) | |
462 | fprintf (stderr, "%s %d", title, i); | |
463 | else | |
464 | fprintf (stderr, "%s\t%d\n", title, n); | |
465 | last_cluster = i; | |
466 | last_size = n; | |
467 | } | |
5384bc5b MD |
468 | return n; |
469 | } | |
470 | ||
471 | static void | |
472 | free_list_lengths (char *title, scm_freelist_t *master, SCM freelist) | |
473 | { | |
474 | SCM clusters; | |
4c48ba06 | 475 | int i = 0, len, n = 0; |
5384bc5b MD |
476 | fprintf (stderr, "%s\n\n", title); |
477 | n += free_list_length ("free list", -1, freelist); | |
478 | for (clusters = master->clusters; | |
479 | SCM_NNULLP (clusters); | |
480 | clusters = SCM_CDR (clusters)) | |
4c48ba06 MD |
481 | { |
482 | len = free_list_length ("cluster", i++, SCM_CAR (clusters)); | |
483 | n += len; | |
484 | } | |
485 | if (last_cluster == i - 1) | |
486 | fprintf (stderr, "\t%d\n", last_size); | |
487 | else | |
488 | fprintf (stderr, "-%d\t%d\n", i - 1, last_size); | |
489 | fprintf (stderr, "\ntotal %d objects\n\n", n); | |
5384bc5b MD |
490 | } |
491 | ||
a00c95d9 | 492 | SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0, |
5384bc5b MD |
493 | (), |
494 | "Print debugging information about the free-list.\n" | |
495 | "`free-list-length' is only included in --enable-guile-debug builds of Guile.") | |
496 | #define FUNC_NAME s_scm_free_list_length | |
497 | { | |
b37fe1c5 MD |
498 | free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist); |
499 | free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2); | |
12e5fb3b | 500 | return SCM_UNSPECIFIED; |
5384bc5b MD |
501 | } |
502 | #undef FUNC_NAME | |
503 | ||
bb2c57fa MD |
504 | #endif |
505 | ||
506 | #ifdef GUILE_DEBUG_FREELIST | |
cf2d30f6 JB |
507 | |
508 | /* Number of calls to SCM_NEWCELL since startup. */ | |
509 | static unsigned long scm_newcell_count; | |
acb0a19c | 510 | static unsigned long scm_newcell2_count; |
cf2d30f6 JB |
511 | |
512 | /* Search freelist for anything that isn't marked as a free cell. | |
513 | Abort if we find something. */ | |
8ded62a3 MD |
514 | static void |
515 | scm_check_freelist (SCM freelist) | |
516 | { | |
517 | SCM f; | |
518 | int i = 0; | |
519 | ||
3f5d82cd DH |
520 | for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++) |
521 | if (!SCM_FREE_CELL_P (f)) | |
8ded62a3 MD |
522 | { |
523 | fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n", | |
524 | scm_newcell_count, i); | |
8ded62a3 MD |
525 | abort (); |
526 | } | |
527 | } | |
cf2d30f6 JB |
528 | |
529 | static int scm_debug_check_freelist = 0; | |
25748c78 | 530 | |
a00c95d9 | 531 | SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0, |
1bbd0b84 | 532 | (SCM flag), |
da4a1dba GB |
533 | "If FLAG is #t, check the freelist for consistency on each cell allocation.\n" |
534 | "This procedure only exists because the GUILE_DEBUG_FREELIST \n" | |
535 | "compile-time flag was selected.\n") | |
1bbd0b84 | 536 | #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x |
25748c78 | 537 | { |
945fec60 | 538 | SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist); |
25748c78 GB |
539 | return SCM_UNSPECIFIED; |
540 | } | |
1bbd0b84 | 541 | #undef FUNC_NAME |
25748c78 GB |
542 | |
543 | ||
4a4c9785 MD |
544 | SCM |
545 | scm_debug_newcell (void) | |
546 | { | |
547 | SCM new; | |
548 | ||
549 | scm_newcell_count++; | |
550 | if (scm_debug_check_freelist) | |
551 | { | |
8ded62a3 | 552 | scm_check_freelist (scm_freelist); |
4a4c9785 MD |
553 | scm_gc(); |
554 | } | |
555 | ||
556 | /* The rest of this is supposed to be identical to the SCM_NEWCELL | |
557 | macro. */ | |
3f5d82cd | 558 | if (SCM_NULLP (scm_freelist)) |
4a4c9785 MD |
559 | new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist); |
560 | else | |
561 | { | |
562 | new = scm_freelist; | |
3f5d82cd DH |
563 | scm_freelist = SCM_FREE_CELL_CDR (scm_freelist); |
564 | SCM_SET_FREE_CELL_TYPE (new, scm_tc16_allocated); | |
4a4c9785 MD |
565 | } |
566 | ||
567 | return new; | |
568 | } | |
569 | ||
570 | SCM | |
571 | scm_debug_newcell2 (void) | |
572 | { | |
573 | SCM new; | |
574 | ||
575 | scm_newcell2_count++; | |
576 | if (scm_debug_check_freelist) | |
577 | { | |
8ded62a3 | 578 | scm_check_freelist (scm_freelist2); |
4a4c9785 MD |
579 | scm_gc (); |
580 | } | |
581 | ||
582 | /* The rest of this is supposed to be identical to the SCM_NEWCELL | |
583 | macro. */ | |
3f5d82cd | 584 | if (SCM_NULLP (scm_freelist2)) |
4a4c9785 MD |
585 | new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2); |
586 | else | |
587 | { | |
588 | new = scm_freelist2; | |
3f5d82cd DH |
589 | scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2); |
590 | SCM_SET_FREE_CELL_TYPE (new, scm_tc16_allocated); | |
4a4c9785 MD |
591 | } |
592 | ||
593 | return new; | |
594 | } | |
595 | ||
fca7547b | 596 | #endif /* GUILE_DEBUG_FREELIST */ |
cf2d30f6 JB |
597 | |
598 | \f | |
0f2d19dd | 599 | |
b37fe1c5 MD |
600 | static unsigned long |
601 | master_cells_allocated (scm_freelist_t *master) | |
602 | { | |
603 | int objects = master->clusters_allocated * (master->cluster_size - 1); | |
604 | if (SCM_NULLP (master->clusters)) | |
605 | objects -= master->left_to_collect; | |
606 | return master->span * objects; | |
607 | } | |
608 | ||
609 | static unsigned long | |
610 | freelist_length (SCM freelist) | |
611 | { | |
612 | int n; | |
3f5d82cd | 613 | for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist)) |
b37fe1c5 MD |
614 | ++n; |
615 | return n; | |
616 | } | |
617 | ||
618 | static unsigned long | |
619 | compute_cells_allocated () | |
620 | { | |
621 | return (scm_cells_allocated | |
622 | + master_cells_allocated (&scm_master_freelist) | |
623 | + master_cells_allocated (&scm_master_freelist2) | |
624 | - scm_master_freelist.span * freelist_length (scm_freelist) | |
625 | - scm_master_freelist2.span * freelist_length (scm_freelist2)); | |
626 | } | |
b37fe1c5 | 627 | |
0f2d19dd JB |
628 | /* {Scheme Interface to GC} |
629 | */ | |
630 | ||
a00c95d9 | 631 | SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0, |
1bbd0b84 | 632 | (), |
b380b885 | 633 | "Returns an association list of statistics about Guile's current use of storage. ") |
1bbd0b84 | 634 | #define FUNC_NAME s_scm_gc_stats |
0f2d19dd JB |
635 | { |
636 | int i; | |
637 | int n; | |
638 | SCM heap_segs; | |
c209c88e GB |
639 | long int local_scm_mtrigger; |
640 | long int local_scm_mallocated; | |
641 | long int local_scm_heap_size; | |
642 | long int local_scm_cells_allocated; | |
643 | long int local_scm_gc_time_taken; | |
0f2d19dd JB |
644 | SCM answer; |
645 | ||
646 | SCM_DEFER_INTS; | |
939794ce DH |
647 | |
648 | ++scm_block_gc; | |
649 | ||
0f2d19dd JB |
650 | retry: |
651 | heap_segs = SCM_EOL; | |
652 | n = scm_n_heap_segs; | |
653 | for (i = scm_n_heap_segs; i--; ) | |
654 | heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]), | |
655 | scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])), | |
656 | heap_segs); | |
657 | if (scm_n_heap_segs != n) | |
658 | goto retry; | |
939794ce DH |
659 | |
660 | --scm_block_gc; | |
0f2d19dd | 661 | |
7febb4a2 MD |
662 | /* Below, we cons to produce the resulting list. We want a snapshot of |
663 | * the heap situation before consing. | |
664 | */ | |
0f2d19dd JB |
665 | local_scm_mtrigger = scm_mtrigger; |
666 | local_scm_mallocated = scm_mallocated; | |
b37fe1c5 | 667 | local_scm_heap_size = SCM_HEAP_SIZE; |
b37fe1c5 | 668 | local_scm_cells_allocated = compute_cells_allocated (); |
0f2d19dd JB |
669 | local_scm_gc_time_taken = scm_gc_time_taken; |
670 | ||
671 | answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)), | |
672 | scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)), | |
673 | scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)), | |
674 | scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)), | |
675 | scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)), | |
676 | scm_cons (sym_heap_segments, heap_segs), | |
677 | SCM_UNDEFINED); | |
678 | SCM_ALLOW_INTS; | |
679 | return answer; | |
680 | } | |
1bbd0b84 | 681 | #undef FUNC_NAME |
0f2d19dd JB |
682 | |
683 | ||
a00c95d9 | 684 | void |
6e8d25a6 | 685 | scm_gc_start (const char *what) |
0f2d19dd JB |
686 | { |
687 | scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()); | |
b37fe1c5 | 688 | scm_gc_cells_collected = 0; |
37ddcaf6 | 689 | scm_gc_yield_1 = scm_gc_yield; |
8b0d194f MD |
690 | scm_gc_yield = (scm_cells_allocated |
691 | + master_cells_allocated (&scm_master_freelist) | |
692 | + master_cells_allocated (&scm_master_freelist2)); | |
0f2d19dd JB |
693 | scm_gc_malloc_collected = 0; |
694 | scm_gc_ports_collected = 0; | |
695 | } | |
696 | ||
939794ce | 697 | |
a00c95d9 | 698 | void |
0f2d19dd | 699 | scm_gc_end () |
0f2d19dd JB |
700 | { |
701 | scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt; | |
c209c88e | 702 | scm_gc_time_taken += scm_gc_rt; |
0f2d19dd JB |
703 | } |
704 | ||
705 | ||
a00c95d9 | 706 | SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0, |
1bbd0b84 | 707 | (SCM obj), |
b380b885 MD |
708 | "Return an integer that for the lifetime of @var{obj} is uniquely\n" |
709 | "returned by this function for @var{obj}") | |
1bbd0b84 | 710 | #define FUNC_NAME s_scm_object_address |
0f2d19dd | 711 | { |
54778cd3 | 712 | return scm_ulong2num ((unsigned long) SCM_UNPACK (obj)); |
0f2d19dd | 713 | } |
1bbd0b84 | 714 | #undef FUNC_NAME |
0f2d19dd JB |
715 | |
716 | ||
a00c95d9 | 717 | SCM_DEFINE (scm_gc, "gc", 0, 0, 0, |
1bbd0b84 | 718 | (), |
b380b885 MD |
719 | "Scans all of SCM objects and reclaims for further use those that are\n" |
720 | "no longer accessible.") | |
1bbd0b84 | 721 | #define FUNC_NAME s_scm_gc |
0f2d19dd JB |
722 | { |
723 | SCM_DEFER_INTS; | |
724 | scm_igc ("call"); | |
725 | SCM_ALLOW_INTS; | |
726 | return SCM_UNSPECIFIED; | |
727 | } | |
1bbd0b84 | 728 | #undef FUNC_NAME |
0f2d19dd JB |
729 | |
730 | ||
731 | \f | |
732 | /* {C Interface For When GC is Triggered} | |
733 | */ | |
734 | ||
b37fe1c5 | 735 | static void |
8fef55a8 | 736 | adjust_min_yield (scm_freelist_t *freelist) |
b37fe1c5 | 737 | { |
8fef55a8 | 738 | /* min yield is adjusted upwards so that next predicted total yield |
bda1446c | 739 | * (allocated cells actually freed by GC) becomes |
8fef55a8 MD |
740 | * `min_yield_fraction' of total heap size. Note, however, that |
741 | * the absolute value of min_yield will correspond to `collected' | |
bda1446c | 742 | * on one master (the one which currently is triggering GC). |
b37fe1c5 | 743 | * |
bda1446c MD |
744 | * The reason why we look at total yield instead of cells collected |
745 | * on one list is that we want to take other freelists into account. | |
746 | * On this freelist, we know that (local) yield = collected cells, | |
747 | * but that's probably not the case on the other lists. | |
b37fe1c5 MD |
748 | * |
749 | * (We might consider computing a better prediction, for example | |
750 | * by computing an average over multiple GC:s.) | |
751 | */ | |
8fef55a8 | 752 | if (freelist->min_yield_fraction) |
b37fe1c5 | 753 | { |
37ddcaf6 | 754 | /* Pick largest of last two yields. */ |
8fef55a8 MD |
755 | int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100) |
756 | - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield)); | |
b37fe1c5 MD |
757 | #ifdef DEBUGINFO |
758 | fprintf (stderr, " after GC = %d, delta = %d\n", | |
759 | scm_cells_allocated, | |
760 | delta); | |
761 | #endif | |
762 | if (delta > 0) | |
8fef55a8 | 763 | freelist->min_yield += delta; |
b37fe1c5 MD |
764 | } |
765 | } | |
766 | ||
b6efc951 | 767 | |
4a4c9785 | 768 | /* When we get POSIX threads support, the master will be global and |
4c48ba06 MD |
769 | * common while the freelist will be individual for each thread. |
770 | */ | |
4a4c9785 MD |
771 | |
772 | SCM | |
773 | scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist) | |
774 | { | |
775 | SCM cell; | |
776 | ++scm_ints_disabled; | |
4c48ba06 MD |
777 | do |
778 | { | |
c7387918 | 779 | if (SCM_NULLP (master->clusters)) |
4c48ba06 | 780 | { |
150c200b | 781 | if (master->grow_heap_p || scm_block_gc) |
4c48ba06 | 782 | { |
b6efc951 DH |
783 | /* In order to reduce gc frequency, try to allocate a new heap |
784 | * segment first, even if gc might find some free cells. If we | |
785 | * can't obtain a new heap segment, we will try gc later. | |
786 | */ | |
4c48ba06 | 787 | master->grow_heap_p = 0; |
b6efc951 | 788 | alloc_some_heap (master, return_on_error); |
4c48ba06 | 789 | } |
b6efc951 | 790 | if (SCM_NULLP (master->clusters)) |
b37fe1c5 | 791 | { |
b6efc951 DH |
792 | /* The heap was not grown, either because it wasn't scheduled to |
793 | * grow, or because there was not enough memory available. In | |
794 | * both cases we have to try gc to get some free cells. | |
795 | */ | |
37ddcaf6 MD |
796 | #ifdef DEBUGINFO |
797 | fprintf (stderr, "allocated = %d, ", | |
798 | scm_cells_allocated | |
799 | + master_cells_allocated (&scm_master_freelist) | |
800 | + master_cells_allocated (&scm_master_freelist2)); | |
801 | #endif | |
b37fe1c5 | 802 | scm_igc ("cells"); |
8fef55a8 | 803 | adjust_min_yield (master); |
c7387918 DH |
804 | if (SCM_NULLP (master->clusters)) |
805 | { | |
b6efc951 DH |
806 | /* gc could not free any cells. Now, we _must_ allocate a |
807 | * new heap segment, because there is no other possibility | |
808 | * to provide a new cell for the caller. | |
809 | */ | |
810 | alloc_some_heap (master, abort_on_error); | |
c7387918 | 811 | } |
b37fe1c5 | 812 | } |
4c48ba06 MD |
813 | } |
814 | cell = SCM_CAR (master->clusters); | |
815 | master->clusters = SCM_CDR (master->clusters); | |
b37fe1c5 | 816 | ++master->clusters_allocated; |
4c48ba06 MD |
817 | } |
818 | while (SCM_NULLP (cell)); | |
4a4c9785 | 819 | --scm_ints_disabled; |
3f5d82cd DH |
820 | *freelist = SCM_FREE_CELL_CDR (cell); |
821 | SCM_SET_FREE_CELL_TYPE (cell, scm_tc16_allocated); | |
4a4c9785 MD |
822 | return cell; |
823 | } | |
824 | ||
b6efc951 | 825 | |
4c48ba06 MD |
826 | #if 0 |
827 | /* This is a support routine which can be used to reserve a cluster | |
828 | * for some special use, such as debugging. It won't be useful until | |
829 | * free cells are preserved between garbage collections. | |
830 | */ | |
831 | ||
832 | void | |
833 | scm_alloc_cluster (scm_freelist_t *master) | |
834 | { | |
835 | SCM freelist, cell; | |
836 | cell = scm_gc_for_newcell (master, &freelist); | |
837 | SCM_SETCDR (cell, freelist); | |
838 | return cell; | |
839 | } | |
840 | #endif | |
841 | ||
801cb5e7 MD |
842 | |
843 | scm_c_hook_t scm_before_gc_c_hook; | |
844 | scm_c_hook_t scm_before_mark_c_hook; | |
845 | scm_c_hook_t scm_before_sweep_c_hook; | |
846 | scm_c_hook_t scm_after_sweep_c_hook; | |
847 | scm_c_hook_t scm_after_gc_c_hook; | |
848 | ||
b6efc951 | 849 | |
0f2d19dd | 850 | void |
1bbd0b84 | 851 | scm_igc (const char *what) |
0f2d19dd JB |
852 | { |
853 | int j; | |
854 | ||
406c7d90 | 855 | ++scm_gc_running_p; |
801cb5e7 | 856 | scm_c_hook_run (&scm_before_gc_c_hook, 0); |
4c48ba06 MD |
857 | #ifdef DEBUGINFO |
858 | fprintf (stderr, | |
859 | SCM_NULLP (scm_freelist) | |
860 | ? "*" | |
861 | : (SCM_NULLP (scm_freelist2) ? "o" : "m")); | |
862 | #endif | |
42db06f0 MD |
863 | #ifdef USE_THREADS |
864 | /* During the critical section, only the current thread may run. */ | |
865 | SCM_THREAD_CRITICAL_SECTION_START; | |
866 | #endif | |
867 | ||
e242dfd2 | 868 | /* fprintf (stderr, "gc: %s\n", what); */ |
c68296f8 | 869 | |
ab4bef85 JB |
870 | scm_gc_start (what); |
871 | ||
872 | if (!scm_stack_base || scm_block_gc) | |
873 | { | |
874 | scm_gc_end (); | |
406c7d90 | 875 | --scm_gc_running_p; |
ab4bef85 JB |
876 | return; |
877 | } | |
878 | ||
a5c314c8 JB |
879 | if (scm_mallocated < 0) |
880 | /* The byte count of allocated objects has underflowed. This is | |
881 | probably because you forgot to report the sizes of objects you | |
882 | have allocated, by calling scm_done_malloc or some such. When | |
883 | the GC freed them, it subtracted their size from | |
884 | scm_mallocated, which underflowed. */ | |
885 | abort (); | |
c45acc34 | 886 | |
ab4bef85 JB |
887 | if (scm_gc_heap_lock) |
888 | /* We've invoked the collector while a GC is already in progress. | |
889 | That should never happen. */ | |
890 | abort (); | |
0f2d19dd JB |
891 | |
892 | ++scm_gc_heap_lock; | |
ab4bef85 | 893 | |
0f2d19dd JB |
894 | /* flush dead entries from the continuation stack */ |
895 | { | |
896 | int x; | |
897 | int bound; | |
898 | SCM * elts; | |
899 | elts = SCM_VELTS (scm_continuation_stack); | |
900 | bound = SCM_LENGTH (scm_continuation_stack); | |
901 | x = SCM_INUM (scm_continuation_stack_ptr); | |
902 | while (x < bound) | |
903 | { | |
904 | elts[x] = SCM_BOOL_F; | |
905 | ++x; | |
906 | } | |
907 | } | |
908 | ||
801cb5e7 MD |
909 | scm_c_hook_run (&scm_before_mark_c_hook, 0); |
910 | ||
42db06f0 | 911 | #ifndef USE_THREADS |
a00c95d9 | 912 | |
0f2d19dd JB |
913 | /* Protect from the C stack. This must be the first marking |
914 | * done because it provides information about what objects | |
915 | * are "in-use" by the C code. "in-use" objects are those | |
916 | * for which the values from SCM_LENGTH and SCM_CHARS must remain | |
917 | * usable. This requirement is stricter than a liveness | |
918 | * requirement -- in particular, it constrains the implementation | |
919 | * of scm_vector_set_length_x. | |
920 | */ | |
921 | SCM_FLUSH_REGISTER_WINDOWS; | |
922 | /* This assumes that all registers are saved into the jmp_buf */ | |
923 | setjmp (scm_save_regs_gc_mark); | |
924 | scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark, | |
ce4a361d JB |
925 | ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 + |
926 | sizeof scm_save_regs_gc_mark) | |
927 | / sizeof (SCM_STACKITEM))); | |
0f2d19dd JB |
928 | |
929 | { | |
6ba93e5e | 930 | scm_sizet stack_len = scm_stack_size (scm_stack_base); |
0f2d19dd | 931 | #ifdef SCM_STACK_GROWS_UP |
6ba93e5e | 932 | scm_mark_locations (scm_stack_base, stack_len); |
0f2d19dd | 933 | #else |
6ba93e5e | 934 | scm_mark_locations (scm_stack_base - stack_len, stack_len); |
0f2d19dd JB |
935 | #endif |
936 | } | |
937 | ||
42db06f0 MD |
938 | #else /* USE_THREADS */ |
939 | ||
940 | /* Mark every thread's stack and registers */ | |
945fec60 | 941 | scm_threads_mark_stacks (); |
42db06f0 MD |
942 | |
943 | #endif /* USE_THREADS */ | |
0f2d19dd JB |
944 | |
945 | /* FIXME: insert a phase to un-protect string-data preserved | |
946 | * in scm_vector_set_length_x. | |
947 | */ | |
948 | ||
949 | j = SCM_NUM_PROTECTS; | |
950 | while (j--) | |
951 | scm_gc_mark (scm_sys_protects[j]); | |
952 | ||
9de33deb MD |
953 | /* FIXME: we should have a means to register C functions to be run |
954 | * in different phases of GC | |
a00c95d9 | 955 | */ |
9de33deb | 956 | scm_mark_subr_table (); |
a00c95d9 | 957 | |
42db06f0 MD |
958 | #ifndef USE_THREADS |
959 | scm_gc_mark (scm_root->handle); | |
960 | #endif | |
a00c95d9 | 961 | |
801cb5e7 | 962 | scm_c_hook_run (&scm_before_sweep_c_hook, 0); |
0493cd89 | 963 | |
0f2d19dd JB |
964 | scm_gc_sweep (); |
965 | ||
801cb5e7 MD |
966 | scm_c_hook_run (&scm_after_sweep_c_hook, 0); |
967 | ||
0f2d19dd JB |
968 | --scm_gc_heap_lock; |
969 | scm_gc_end (); | |
42db06f0 MD |
970 | |
971 | #ifdef USE_THREADS | |
972 | SCM_THREAD_CRITICAL_SECTION_END; | |
973 | #endif | |
801cb5e7 | 974 | scm_c_hook_run (&scm_after_gc_c_hook, 0); |
406c7d90 | 975 | --scm_gc_running_p; |
0f2d19dd JB |
976 | } |
977 | ||
978 | \f | |
939794ce | 979 | |
a00c95d9 | 980 | /* {Mark/Sweep} |
0f2d19dd JB |
981 | */ |
982 | ||
983 | ||
984 | ||
985 | /* Mark an object precisely. | |
986 | */ | |
a00c95d9 | 987 | void |
1bbd0b84 | 988 | scm_gc_mark (SCM p) |
acf4331f | 989 | #define FUNC_NAME "scm_gc_mark" |
0f2d19dd JB |
990 | { |
991 | register long i; | |
992 | register SCM ptr; | |
993 | ||
994 | ptr = p; | |
995 | ||
996 | gc_mark_loop: | |
997 | if (SCM_IMP (ptr)) | |
998 | return; | |
999 | ||
1000 | gc_mark_nimp: | |
3f5d82cd | 1001 | if (!SCM_CELLP (ptr)) |
acf4331f | 1002 | SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL); |
0f2d19dd JB |
1003 | |
1004 | switch (SCM_TYP7 (ptr)) | |
1005 | { | |
1006 | case scm_tcs_cons_nimcar: | |
1007 | if (SCM_GCMARKP (ptr)) | |
1008 | break; | |
1009 | SCM_SETGCMARK (ptr); | |
1010 | if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */ | |
1011 | { | |
1012 | ptr = SCM_CAR (ptr); | |
1013 | goto gc_mark_nimp; | |
1014 | } | |
1015 | scm_gc_mark (SCM_CAR (ptr)); | |
1016 | ptr = SCM_GCCDR (ptr); | |
1017 | goto gc_mark_nimp; | |
1018 | case scm_tcs_cons_imcar: | |
acb0a19c MD |
1019 | if (SCM_GCMARKP (ptr)) |
1020 | break; | |
1021 | SCM_SETGCMARK (ptr); | |
1022 | ptr = SCM_GCCDR (ptr); | |
1023 | goto gc_mark_loop; | |
e641afaf | 1024 | case scm_tc7_pws: |
0f2d19dd JB |
1025 | if (SCM_GCMARKP (ptr)) |
1026 | break; | |
1027 | SCM_SETGCMARK (ptr); | |
54778cd3 | 1028 | scm_gc_mark (SCM_CELL_OBJECT_2 (ptr)); |
0f2d19dd JB |
1029 | ptr = SCM_GCCDR (ptr); |
1030 | goto gc_mark_loop; | |
1031 | case scm_tcs_cons_gloc: | |
1032 | if (SCM_GCMARKP (ptr)) | |
1033 | break; | |
1034 | SCM_SETGCMARK (ptr); | |
1035 | { | |
c8045e8d DH |
1036 | /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct |
1037 | * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer | |
1038 | * to a heap cell. If it is a struct, the cell word #0 of ptr is a | |
1039 | * pointer to a struct vtable data region. The fact that these are | |
1040 | * accessed in the same way restricts the possibilites to change the | |
9d47a1e6 | 1041 | * data layout of structs or heap cells. |
c8045e8d DH |
1042 | */ |
1043 | scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc; | |
1044 | scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */ | |
7445e0e8 | 1045 | if (vtable_data [scm_vtable_index_vcell] != 0) |
0f2d19dd | 1046 | { |
7445e0e8 MD |
1047 | /* ptr is a gloc */ |
1048 | SCM gloc_car = SCM_PACK (word0); | |
1049 | scm_gc_mark (gloc_car); | |
1050 | ptr = SCM_GCCDR (ptr); | |
1051 | goto gc_mark_loop; | |
1052 | } | |
1053 | else | |
1054 | { | |
1055 | /* ptr is a struct */ | |
1056 | SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]); | |
1057 | int len = SCM_LENGTH (layout); | |
1058 | char * fields_desc = SCM_CHARS (layout); | |
1059 | /* We're using SCM_GCCDR here like STRUCT_DATA, except | |
1060 | that it removes the mark */ | |
1061 | scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr)); | |
1062 | ||
1063 | if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY) | |
1064 | { | |
1065 | scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure])); | |
1066 | scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter])); | |
1067 | } | |
1068 | if (len) | |
1069 | { | |
1070 | int x; | |
c8045e8d | 1071 | |
7445e0e8 | 1072 | for (x = 0; x < len - 2; x += 2, ++struct_data) |
ad75306c | 1073 | if (fields_desc[x] == 'p') |
7445e0e8 MD |
1074 | scm_gc_mark (SCM_PACK (*struct_data)); |
1075 | if (fields_desc[x] == 'p') | |
1076 | { | |
1077 | if (SCM_LAYOUT_TAILP (fields_desc[x + 1])) | |
1078 | for (x = *struct_data; x; --x) | |
1079 | scm_gc_mark (SCM_PACK (*++struct_data)); | |
1080 | else | |
1081 | scm_gc_mark (SCM_PACK (*struct_data)); | |
1082 | } | |
1083 | } | |
1084 | /* mark vtable */ | |
1085 | ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]); | |
1086 | goto gc_mark_loop; | |
0f2d19dd JB |
1087 | } |
1088 | } | |
1089 | break; | |
1090 | case scm_tcs_closures: | |
1091 | if (SCM_GCMARKP (ptr)) | |
1092 | break; | |
1093 | SCM_SETGCMARK (ptr); | |
1094 | if (SCM_IMP (SCM_CDR (ptr))) | |
1095 | { | |
1096 | ptr = SCM_CLOSCAR (ptr); | |
1097 | goto gc_mark_nimp; | |
1098 | } | |
1099 | scm_gc_mark (SCM_CLOSCAR (ptr)); | |
1100 | ptr = SCM_GCCDR (ptr); | |
1101 | goto gc_mark_nimp; | |
1102 | case scm_tc7_vector: | |
1103 | case scm_tc7_lvector: | |
1104 | #ifdef CCLO | |
1105 | case scm_tc7_cclo: | |
1106 | #endif | |
1107 | if (SCM_GC8MARKP (ptr)) | |
1108 | break; | |
1109 | SCM_SETGC8MARK (ptr); | |
1110 | i = SCM_LENGTH (ptr); | |
1111 | if (i == 0) | |
1112 | break; | |
1113 | while (--i > 0) | |
1114 | if (SCM_NIMP (SCM_VELTS (ptr)[i])) | |
1115 | scm_gc_mark (SCM_VELTS (ptr)[i]); | |
1116 | ptr = SCM_VELTS (ptr)[0]; | |
1117 | goto gc_mark_loop; | |
1118 | case scm_tc7_contin: | |
1119 | if SCM_GC8MARKP | |
1120 | (ptr) break; | |
1121 | SCM_SETGC8MARK (ptr); | |
c68296f8 | 1122 | if (SCM_VELTS (ptr)) |
41b0806d | 1123 | scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr), |
c68296f8 MV |
1124 | (scm_sizet) |
1125 | (SCM_LENGTH (ptr) + | |
1126 | (sizeof (SCM_STACKITEM) + -1 + | |
1127 | sizeof (scm_contregs)) / | |
1128 | sizeof (SCM_STACKITEM))); | |
0f2d19dd | 1129 | break; |
afe5177e | 1130 | #ifdef HAVE_ARRAYS |
0f2d19dd JB |
1131 | case scm_tc7_bvect: |
1132 | case scm_tc7_byvect: | |
1133 | case scm_tc7_ivect: | |
1134 | case scm_tc7_uvect: | |
1135 | case scm_tc7_fvect: | |
1136 | case scm_tc7_dvect: | |
1137 | case scm_tc7_cvect: | |
1138 | case scm_tc7_svect: | |
5c11cc9d | 1139 | #ifdef HAVE_LONG_LONGS |
0f2d19dd JB |
1140 | case scm_tc7_llvect: |
1141 | #endif | |
afe5177e | 1142 | #endif |
0f2d19dd | 1143 | case scm_tc7_string: |
0f2d19dd JB |
1144 | SCM_SETGC8MARK (ptr); |
1145 | break; | |
1146 | ||
1147 | case scm_tc7_substring: | |
0f2d19dd JB |
1148 | if (SCM_GC8MARKP(ptr)) |
1149 | break; | |
1150 | SCM_SETGC8MARK (ptr); | |
1151 | ptr = SCM_CDR (ptr); | |
1152 | goto gc_mark_loop; | |
1153 | ||
1154 | case scm_tc7_wvect: | |
1155 | if (SCM_GC8MARKP(ptr)) | |
1156 | break; | |
ab4bef85 JB |
1157 | SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors; |
1158 | scm_weak_vectors = ptr; | |
0f2d19dd JB |
1159 | SCM_SETGC8MARK (ptr); |
1160 | if (SCM_IS_WHVEC_ANY (ptr)) | |
1161 | { | |
1162 | int x; | |
1163 | int len; | |
1164 | int weak_keys; | |
1165 | int weak_values; | |
1166 | ||
1167 | len = SCM_LENGTH (ptr); | |
1168 | weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr); | |
1169 | weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr); | |
a00c95d9 | 1170 | |
0f2d19dd JB |
1171 | for (x = 0; x < len; ++x) |
1172 | { | |
1173 | SCM alist; | |
1174 | alist = SCM_VELTS (ptr)[x]; | |
46408039 JB |
1175 | |
1176 | /* mark everything on the alist except the keys or | |
1177 | * values, according to weak_values and weak_keys. */ | |
0b5f3f34 | 1178 | while ( SCM_CONSP (alist) |
0f2d19dd | 1179 | && !SCM_GCMARKP (alist) |
0f2d19dd JB |
1180 | && SCM_CONSP (SCM_CAR (alist))) |
1181 | { | |
1182 | SCM kvpair; | |
1183 | SCM next_alist; | |
1184 | ||
1185 | kvpair = SCM_CAR (alist); | |
1186 | next_alist = SCM_CDR (alist); | |
a00c95d9 | 1187 | /* |
0f2d19dd JB |
1188 | * Do not do this: |
1189 | * SCM_SETGCMARK (alist); | |
1190 | * SCM_SETGCMARK (kvpair); | |
1191 | * | |
1192 | * It may be that either the key or value is protected by | |
1193 | * an escaped reference to part of the spine of this alist. | |
1194 | * If we mark the spine here, and only mark one or neither of the | |
1195 | * key and value, they may never be properly marked. | |
1196 | * This leads to a horrible situation in which an alist containing | |
1197 | * freelist cells is exported. | |
1198 | * | |
1199 | * So only mark the spines of these arrays last of all marking. | |
1200 | * If somebody confuses us by constructing a weak vector | |
1201 | * with a circular alist then we are hosed, but at least we | |
1202 | * won't prematurely drop table entries. | |
1203 | */ | |
1204 | if (!weak_keys) | |
1205 | scm_gc_mark (SCM_CAR (kvpair)); | |
1206 | if (!weak_values) | |
1207 | scm_gc_mark (SCM_GCCDR (kvpair)); | |
1208 | alist = next_alist; | |
1209 | } | |
1210 | if (SCM_NIMP (alist)) | |
1211 | scm_gc_mark (alist); | |
1212 | } | |
1213 | } | |
1214 | break; | |
1215 | ||
1216 | case scm_tc7_msymbol: | |
1217 | if (SCM_GC8MARKP(ptr)) | |
1218 | break; | |
1219 | SCM_SETGC8MARK (ptr); | |
1220 | scm_gc_mark (SCM_SYMBOL_FUNC (ptr)); | |
1221 | ptr = SCM_SYMBOL_PROPS (ptr); | |
1222 | goto gc_mark_loop; | |
1223 | case scm_tc7_ssymbol: | |
1224 | if (SCM_GC8MARKP(ptr)) | |
1225 | break; | |
1226 | SCM_SETGC8MARK (ptr); | |
1227 | break; | |
1228 | case scm_tcs_subrs: | |
9de33deb | 1229 | break; |
0f2d19dd JB |
1230 | case scm_tc7_port: |
1231 | i = SCM_PTOBNUM (ptr); | |
1232 | if (!(i < scm_numptob)) | |
1233 | goto def; | |
1234 | if (SCM_GC8MARKP (ptr)) | |
1235 | break; | |
dc53f026 | 1236 | SCM_SETGC8MARK (ptr); |
ebf7394e GH |
1237 | if (SCM_PTAB_ENTRY(ptr)) |
1238 | scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name); | |
dc53f026 JB |
1239 | if (scm_ptobs[i].mark) |
1240 | { | |
1241 | ptr = (scm_ptobs[i].mark) (ptr); | |
1242 | goto gc_mark_loop; | |
1243 | } | |
1244 | else | |
1245 | return; | |
0f2d19dd JB |
1246 | break; |
1247 | case scm_tc7_smob: | |
1248 | if (SCM_GC8MARKP (ptr)) | |
1249 | break; | |
dc53f026 | 1250 | SCM_SETGC8MARK (ptr); |
acb0a19c | 1251 | switch (SCM_GCTYP16 (ptr)) |
0f2d19dd JB |
1252 | { /* should be faster than going through scm_smobs */ |
1253 | case scm_tc_free_cell: | |
1254 | /* printf("found free_cell %X ", ptr); fflush(stdout); */ | |
1bbd0b84 | 1255 | case scm_tc16_allocated: |
acb0a19c MD |
1256 | case scm_tc16_big: |
1257 | case scm_tc16_real: | |
1258 | case scm_tc16_complex: | |
0f2d19dd JB |
1259 | break; |
1260 | default: | |
1261 | i = SCM_SMOBNUM (ptr); | |
1262 | if (!(i < scm_numsmob)) | |
1263 | goto def; | |
dc53f026 JB |
1264 | if (scm_smobs[i].mark) |
1265 | { | |
1266 | ptr = (scm_smobs[i].mark) (ptr); | |
1267 | goto gc_mark_loop; | |
1268 | } | |
1269 | else | |
1270 | return; | |
0f2d19dd JB |
1271 | } |
1272 | break; | |
1273 | default: | |
acf4331f DH |
1274 | def: |
1275 | SCM_MISC_ERROR ("unknown type", SCM_EOL); | |
0f2d19dd JB |
1276 | } |
1277 | } | |
acf4331f | 1278 | #undef FUNC_NAME |
0f2d19dd JB |
1279 | |
1280 | ||
1281 | /* Mark a Region Conservatively | |
1282 | */ | |
1283 | ||
a00c95d9 | 1284 | void |
6e8d25a6 | 1285 | scm_mark_locations (SCM_STACKITEM x[], scm_sizet n) |
0f2d19dd | 1286 | { |
c4da09e2 | 1287 | unsigned long m; |
0f2d19dd | 1288 | |
c4da09e2 DH |
1289 | for (m = 0; m < n; ++m) |
1290 | { | |
1291 | SCM obj = * (SCM *) &x[m]; | |
1292 | if (SCM_CELLP (obj)) | |
1293 | { | |
1294 | SCM_CELLPTR ptr = SCM2PTR (obj); | |
1295 | int i = 0; | |
1296 | int j = scm_n_heap_segs - 1; | |
1297 | if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr) | |
1298 | && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr)) | |
1299 | { | |
1300 | while (i <= j) | |
1301 | { | |
1302 | int seg_id; | |
1303 | seg_id = -1; | |
1304 | if ((i == j) | |
1305 | || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)) | |
1306 | seg_id = i; | |
1307 | else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr)) | |
1308 | seg_id = j; | |
1309 | else | |
1310 | { | |
1311 | int k; | |
1312 | k = (i + j) / 2; | |
1313 | if (k == i) | |
1314 | break; | |
1315 | if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) | |
1316 | { | |
1317 | j = k; | |
1318 | ++i; | |
1319 | if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)) | |
1320 | continue; | |
1321 | else | |
1322 | break; | |
1323 | } | |
1324 | else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) | |
1325 | { | |
1326 | i = k; | |
1327 | --j; | |
1328 | if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr)) | |
1329 | continue; | |
1330 | else | |
1331 | break; | |
1332 | } | |
1333 | } | |
1334 | if (scm_heap_table[seg_id].span == 1 | |
1335 | || SCM_DOUBLE_CELLP (obj)) | |
406c7d90 DH |
1336 | { |
1337 | if (!SCM_FREE_CELL_P (obj)) | |
1338 | scm_gc_mark (obj); | |
1339 | } | |
c4da09e2 DH |
1340 | break; |
1341 | } | |
1342 | } | |
1343 | } | |
1344 | } | |
0f2d19dd JB |
1345 | } |
1346 | ||
1347 | ||
1a548472 DH |
1348 | /* The function scm_cellp determines whether an SCM value can be regarded as a |
1349 | * pointer to a cell on the heap. Binary search is used in order to determine | |
1350 | * the heap segment that contains the cell. | |
1351 | */ | |
2e11a577 | 1352 | int |
6e8d25a6 | 1353 | scm_cellp (SCM value) |
2e11a577 | 1354 | { |
1a548472 DH |
1355 | if (SCM_CELLP (value)) { |
1356 | scm_cell * ptr = SCM2PTR (value); | |
1357 | unsigned int i = 0; | |
1358 | unsigned int j = scm_n_heap_segs - 1; | |
1359 | ||
1360 | while (i < j) { | |
1361 | int k = (i + j) / 2; | |
1362 | if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) { | |
1363 | j = k; | |
1364 | } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) { | |
1365 | i = k + 1; | |
1366 | } | |
1367 | } | |
2e11a577 | 1368 | |
9d47a1e6 | 1369 | if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr) |
1a548472 | 1370 | && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr) |
1a548472 DH |
1371 | && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) { |
1372 | return 1; | |
1373 | } else { | |
1374 | return 0; | |
2e11a577 | 1375 | } |
1a548472 DH |
1376 | } else { |
1377 | return 0; | |
1378 | } | |
2e11a577 MD |
1379 | } |
1380 | ||
1381 | ||
4c48ba06 MD |
1382 | static void |
1383 | gc_sweep_freelist_start (scm_freelist_t *freelist) | |
1384 | { | |
1385 | freelist->cells = SCM_EOL; | |
1386 | freelist->left_to_collect = freelist->cluster_size; | |
b37fe1c5 | 1387 | freelist->clusters_allocated = 0; |
4c48ba06 MD |
1388 | freelist->clusters = SCM_EOL; |
1389 | freelist->clustertail = &freelist->clusters; | |
1811ebce | 1390 | freelist->collected_1 = freelist->collected; |
4c48ba06 MD |
1391 | freelist->collected = 0; |
1392 | } | |
1393 | ||
1394 | static void | |
1395 | gc_sweep_freelist_finish (scm_freelist_t *freelist) | |
1396 | { | |
1811ebce | 1397 | int collected; |
4c48ba06 | 1398 | *freelist->clustertail = freelist->cells; |
3f5d82cd | 1399 | if (!SCM_NULLP (freelist->cells)) |
4c48ba06 MD |
1400 | { |
1401 | SCM c = freelist->cells; | |
1402 | SCM_SETCAR (c, SCM_CDR (c)); | |
1403 | SCM_SETCDR (c, SCM_EOL); | |
1404 | freelist->collected += | |
1405 | freelist->span * (freelist->cluster_size - freelist->left_to_collect); | |
1406 | } | |
b37fe1c5 | 1407 | scm_gc_cells_collected += freelist->collected; |
a00c95d9 | 1408 | |
8fef55a8 | 1409 | /* Although freelist->min_yield is used to test freelist->collected |
7dbff8b1 | 1410 | * (which is the local GC yield for freelist), it is adjusted so |
8fef55a8 | 1411 | * that *total* yield is freelist->min_yield_fraction of total heap |
7dbff8b1 MD |
1412 | * size. This means that a too low yield is compensated by more |
1413 | * heap on the list which is currently doing most work, which is | |
1414 | * just what we want. | |
1415 | */ | |
1811ebce | 1416 | collected = SCM_MAX (freelist->collected_1, freelist->collected); |
8fef55a8 | 1417 | freelist->grow_heap_p = (collected < freelist->min_yield); |
4c48ba06 | 1418 | } |
0f2d19dd | 1419 | |
a00c95d9 | 1420 | void |
0f2d19dd | 1421 | scm_gc_sweep () |
acf4331f | 1422 | #define FUNC_NAME "scm_gc_sweep" |
0f2d19dd JB |
1423 | { |
1424 | register SCM_CELLPTR ptr; | |
0f2d19dd | 1425 | register SCM nfreelist; |
4c48ba06 | 1426 | register scm_freelist_t *freelist; |
0f2d19dd | 1427 | register long m; |
0f2d19dd | 1428 | register int span; |
15e9d186 | 1429 | long i; |
0f2d19dd JB |
1430 | scm_sizet seg_size; |
1431 | ||
0f2d19dd | 1432 | m = 0; |
0f2d19dd | 1433 | |
4c48ba06 MD |
1434 | gc_sweep_freelist_start (&scm_master_freelist); |
1435 | gc_sweep_freelist_start (&scm_master_freelist2); | |
a00c95d9 | 1436 | |
cf2d30f6 | 1437 | for (i = 0; i < scm_n_heap_segs; i++) |
0f2d19dd | 1438 | { |
4c48ba06 | 1439 | register unsigned int left_to_collect; |
4c48ba06 | 1440 | register scm_sizet j; |
15e9d186 | 1441 | |
cf2d30f6 JB |
1442 | /* Unmarked cells go onto the front of the freelist this heap |
1443 | segment points to. Rather than updating the real freelist | |
1444 | pointer as we go along, we accumulate the new head in | |
1445 | nfreelist. Then, if it turns out that the entire segment is | |
1446 | free, we free (i.e., malloc's free) the whole segment, and | |
1447 | simply don't assign nfreelist back into the real freelist. */ | |
4c48ba06 MD |
1448 | freelist = scm_heap_table[i].freelist; |
1449 | nfreelist = freelist->cells; | |
4c48ba06 | 1450 | left_to_collect = freelist->left_to_collect; |
945fec60 | 1451 | span = scm_heap_table[i].span; |
cf2d30f6 | 1452 | |
a00c95d9 ML |
1453 | ptr = CELL_UP (scm_heap_table[i].bounds[0], span); |
1454 | seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr; | |
0f2d19dd JB |
1455 | for (j = seg_size + span; j -= span; ptr += span) |
1456 | { | |
96f6f4ae DH |
1457 | SCM scmptr = PTR2SCM (ptr); |
1458 | ||
0f2d19dd JB |
1459 | switch SCM_TYP7 (scmptr) |
1460 | { | |
1461 | case scm_tcs_cons_gloc: | |
0f2d19dd | 1462 | { |
c8045e8d DH |
1463 | /* Dirk:FIXME:: Again, super ugly code: scmptr may be a |
1464 | * struct or a gloc. See the corresponding comment in | |
1465 | * scm_gc_mark. | |
1466 | */ | |
7445e0e8 MD |
1467 | scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr) |
1468 | - scm_tc3_cons_gloc); | |
1469 | /* access as struct */ | |
1470 | scm_bits_t * vtable_data = (scm_bits_t *) word0; | |
c8045e8d | 1471 | if (SCM_GCMARKP (scmptr)) |
7445e0e8 MD |
1472 | goto cmrkcontinue; |
1473 | else if (vtable_data[scm_vtable_index_vcell] == 0) | |
0f2d19dd | 1474 | { |
7445e0e8 MD |
1475 | /* Structs need to be freed in a special order. |
1476 | * This is handled by GC C hooks in struct.c. | |
1477 | */ | |
1478 | SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free); | |
1479 | scm_structs_to_free = scmptr; | |
c8045e8d DH |
1480 | goto cmrkcontinue; |
1481 | } | |
7445e0e8 | 1482 | /* fall through so that scmptr gets collected */ |
0f2d19dd JB |
1483 | } |
1484 | break; | |
1485 | case scm_tcs_cons_imcar: | |
1486 | case scm_tcs_cons_nimcar: | |
1487 | case scm_tcs_closures: | |
e641afaf | 1488 | case scm_tc7_pws: |
0f2d19dd JB |
1489 | if (SCM_GCMARKP (scmptr)) |
1490 | goto cmrkcontinue; | |
1491 | break; | |
1492 | case scm_tc7_wvect: | |
1493 | if (SCM_GC8MARKP (scmptr)) | |
1494 | { | |
1495 | goto c8mrkcontinue; | |
1496 | } | |
1497 | else | |
1498 | { | |
ab4bef85 JB |
1499 | m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM); |
1500 | scm_must_free ((char *)(SCM_VELTS (scmptr) - 2)); | |
0f2d19dd JB |
1501 | break; |
1502 | } | |
1503 | ||
1504 | case scm_tc7_vector: | |
1505 | case scm_tc7_lvector: | |
1506 | #ifdef CCLO | |
1507 | case scm_tc7_cclo: | |
1508 | #endif | |
1509 | if (SCM_GC8MARKP (scmptr)) | |
1510 | goto c8mrkcontinue; | |
1511 | ||
1512 | m += (SCM_LENGTH (scmptr) * sizeof (SCM)); | |
1513 | freechars: | |
1514 | scm_must_free (SCM_CHARS (scmptr)); | |
1515 | /* SCM_SETCHARS(scmptr, 0);*/ | |
1516 | break; | |
afe5177e | 1517 | #ifdef HAVE_ARRAYS |
0f2d19dd JB |
1518 | case scm_tc7_bvect: |
1519 | if SCM_GC8MARKP (scmptr) | |
1520 | goto c8mrkcontinue; | |
1521 | m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT); | |
1522 | goto freechars; | |
1523 | case scm_tc7_byvect: | |
1524 | if SCM_GC8MARKP (scmptr) | |
1525 | goto c8mrkcontinue; | |
1526 | m += SCM_HUGE_LENGTH (scmptr) * sizeof (char); | |
1527 | goto freechars; | |
1528 | case scm_tc7_ivect: | |
1529 | case scm_tc7_uvect: | |
1530 | if SCM_GC8MARKP (scmptr) | |
1531 | goto c8mrkcontinue; | |
1532 | m += SCM_HUGE_LENGTH (scmptr) * sizeof (long); | |
1533 | goto freechars; | |
1534 | case scm_tc7_svect: | |
1535 | if SCM_GC8MARKP (scmptr) | |
1536 | goto c8mrkcontinue; | |
1537 | m += SCM_HUGE_LENGTH (scmptr) * sizeof (short); | |
1538 | goto freechars; | |
5c11cc9d | 1539 | #ifdef HAVE_LONG_LONGS |
0f2d19dd JB |
1540 | case scm_tc7_llvect: |
1541 | if SCM_GC8MARKP (scmptr) | |
1542 | goto c8mrkcontinue; | |
1543 | m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long); | |
1544 | goto freechars; | |
1545 | #endif | |
1546 | case scm_tc7_fvect: | |
1547 | if SCM_GC8MARKP (scmptr) | |
1548 | goto c8mrkcontinue; | |
1549 | m += SCM_HUGE_LENGTH (scmptr) * sizeof (float); | |
1550 | goto freechars; | |
1551 | case scm_tc7_dvect: | |
1552 | if SCM_GC8MARKP (scmptr) | |
1553 | goto c8mrkcontinue; | |
1554 | m += SCM_HUGE_LENGTH (scmptr) * sizeof (double); | |
1555 | goto freechars; | |
1556 | case scm_tc7_cvect: | |
1557 | if SCM_GC8MARKP (scmptr) | |
1558 | goto c8mrkcontinue; | |
1559 | m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double); | |
1560 | goto freechars; | |
afe5177e | 1561 | #endif |
0f2d19dd | 1562 | case scm_tc7_substring: |
0f2d19dd JB |
1563 | if (SCM_GC8MARKP (scmptr)) |
1564 | goto c8mrkcontinue; | |
1565 | break; | |
1566 | case scm_tc7_string: | |
0f2d19dd JB |
1567 | if (SCM_GC8MARKP (scmptr)) |
1568 | goto c8mrkcontinue; | |
1569 | m += SCM_HUGE_LENGTH (scmptr) + 1; | |
1570 | goto freechars; | |
1571 | case scm_tc7_msymbol: | |
1572 | if (SCM_GC8MARKP (scmptr)) | |
1573 | goto c8mrkcontinue; | |
cf551a2b DH |
1574 | m += (SCM_LENGTH (scmptr) + 1 |
1575 | + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr))); | |
0f2d19dd JB |
1576 | scm_must_free ((char *)SCM_SLOTS (scmptr)); |
1577 | break; | |
1578 | case scm_tc7_contin: | |
1579 | if SCM_GC8MARKP (scmptr) | |
1580 | goto c8mrkcontinue; | |
0db18cf4 | 1581 | m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs); |
c68296f8 MV |
1582 | if (SCM_VELTS (scmptr)) |
1583 | goto freechars; | |
0f2d19dd JB |
1584 | case scm_tc7_ssymbol: |
1585 | if SCM_GC8MARKP(scmptr) | |
1586 | goto c8mrkcontinue; | |
1587 | break; | |
1588 | case scm_tcs_subrs: | |
1589 | continue; | |
1590 | case scm_tc7_port: | |
1591 | if SCM_GC8MARKP (scmptr) | |
1592 | goto c8mrkcontinue; | |
1593 | if SCM_OPENP (scmptr) | |
1594 | { | |
1595 | int k = SCM_PTOBNUM (scmptr); | |
1596 | if (!(k < scm_numptob)) | |
1597 | goto sweeperr; | |
1598 | /* Keep "revealed" ports alive. */ | |
945fec60 | 1599 | if (scm_revealed_count (scmptr) > 0) |
0f2d19dd JB |
1600 | continue; |
1601 | /* Yes, I really do mean scm_ptobs[k].free */ | |
1602 | /* rather than ftobs[k].close. .close */ | |
1603 | /* is for explicit CLOSE-PORT by user */ | |
84af0382 | 1604 | m += (scm_ptobs[k].free) (scmptr); |
0f2d19dd JB |
1605 | SCM_SETSTREAM (scmptr, 0); |
1606 | scm_remove_from_port_table (scmptr); | |
1607 | scm_gc_ports_collected++; | |
24e68a57 | 1608 | SCM_SETAND_CAR (scmptr, ~SCM_OPN); |
0f2d19dd JB |
1609 | } |
1610 | break; | |
1611 | case scm_tc7_smob: | |
1612 | switch SCM_GCTYP16 (scmptr) | |
1613 | { | |
1614 | case scm_tc_free_cell: | |
acb0a19c | 1615 | case scm_tc16_real: |
0f2d19dd JB |
1616 | if SCM_GC8MARKP (scmptr) |
1617 | goto c8mrkcontinue; | |
1618 | break; | |
1619 | #ifdef SCM_BIGDIG | |
acb0a19c | 1620 | case scm_tc16_big: |
0f2d19dd JB |
1621 | if SCM_GC8MARKP (scmptr) |
1622 | goto c8mrkcontinue; | |
1623 | m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT); | |
1624 | goto freechars; | |
1625 | #endif /* def SCM_BIGDIG */ | |
acb0a19c | 1626 | case scm_tc16_complex: |
0f2d19dd JB |
1627 | if SCM_GC8MARKP (scmptr) |
1628 | goto c8mrkcontinue; | |
acb0a19c MD |
1629 | m += 2 * sizeof (double); |
1630 | goto freechars; | |
0f2d19dd JB |
1631 | default: |
1632 | if SCM_GC8MARKP (scmptr) | |
1633 | goto c8mrkcontinue; | |
1634 | ||
1635 | { | |
1636 | int k; | |
1637 | k = SCM_SMOBNUM (scmptr); | |
1638 | if (!(k < scm_numsmob)) | |
1639 | goto sweeperr; | |
c8045e8d | 1640 | m += (scm_smobs[k].free) (scmptr); |
0f2d19dd JB |
1641 | break; |
1642 | } | |
1643 | } | |
1644 | break; | |
1645 | default: | |
acf4331f DH |
1646 | sweeperr: |
1647 | SCM_MISC_ERROR ("unknown type", SCM_EOL); | |
0f2d19dd | 1648 | } |
0f2d19dd | 1649 | #if 0 |
3f5d82cd | 1650 | if (SCM_FREE_CELL_P (scmptr)) |
0f2d19dd JB |
1651 | exit (2); |
1652 | #endif | |
4c48ba06 | 1653 | if (!--left_to_collect) |
4a4c9785 MD |
1654 | { |
1655 | SCM_SETCAR (scmptr, nfreelist); | |
4c48ba06 MD |
1656 | *freelist->clustertail = scmptr; |
1657 | freelist->clustertail = SCM_CDRLOC (scmptr); | |
a00c95d9 | 1658 | |
4a4c9785 | 1659 | nfreelist = SCM_EOL; |
4c48ba06 MD |
1660 | freelist->collected += span * freelist->cluster_size; |
1661 | left_to_collect = freelist->cluster_size; | |
4a4c9785 MD |
1662 | } |
1663 | else | |
4a4c9785 MD |
1664 | { |
1665 | /* Stick the new cell on the front of nfreelist. It's | |
1666 | critical that we mark this cell as freed; otherwise, the | |
1667 | conservative collector might trace it as some other type | |
1668 | of object. */ | |
54778cd3 | 1669 | SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell); |
3f5d82cd | 1670 | SCM_SET_FREE_CELL_CDR (scmptr, nfreelist); |
4a4c9785 MD |
1671 | nfreelist = scmptr; |
1672 | } | |
a00c95d9 | 1673 | |
0f2d19dd JB |
1674 | continue; |
1675 | c8mrkcontinue: | |
1676 | SCM_CLRGC8MARK (scmptr); | |
1677 | continue; | |
1678 | cmrkcontinue: | |
1679 | SCM_CLRGCMARK (scmptr); | |
1680 | } | |
1681 | #ifdef GC_FREE_SEGMENTS | |
1682 | if (n == seg_size) | |
1683 | { | |
15e9d186 JB |
1684 | register long j; |
1685 | ||
4c48ba06 | 1686 | freelist->heap_size -= seg_size; |
cf2d30f6 JB |
1687 | free ((char *) scm_heap_table[i].bounds[0]); |
1688 | scm_heap_table[i].bounds[0] = 0; | |
1689 | for (j = i + 1; j < scm_n_heap_segs; j++) | |
0f2d19dd JB |
1690 | scm_heap_table[j - 1] = scm_heap_table[j]; |
1691 | scm_n_heap_segs -= 1; | |
cf2d30f6 | 1692 | i--; /* We need to scan the segment just moved. */ |
0f2d19dd JB |
1693 | } |
1694 | else | |
1695 | #endif /* ifdef GC_FREE_SEGMENTS */ | |
4a4c9785 MD |
1696 | { |
1697 | /* Update the real freelist pointer to point to the head of | |
1698 | the list of free cells we've built for this segment. */ | |
4c48ba06 | 1699 | freelist->cells = nfreelist; |
4c48ba06 | 1700 | freelist->left_to_collect = left_to_collect; |
4a4c9785 MD |
1701 | } |
1702 | ||
fca7547b | 1703 | #ifdef GUILE_DEBUG_FREELIST |
4c48ba06 | 1704 | scm_check_freelist (freelist == &scm_master_freelist |
8ded62a3 MD |
1705 | ? scm_freelist |
1706 | : scm_freelist2); | |
cf2d30f6 JB |
1707 | scm_map_free_list (); |
1708 | #endif | |
4a4c9785 | 1709 | } |
a00c95d9 | 1710 | |
4c48ba06 MD |
1711 | gc_sweep_freelist_finish (&scm_master_freelist); |
1712 | gc_sweep_freelist_finish (&scm_master_freelist2); | |
a00c95d9 | 1713 | |
8ded62a3 MD |
1714 | /* When we move to POSIX threads private freelists should probably |
1715 | be GC-protected instead. */ | |
1716 | scm_freelist = SCM_EOL; | |
1717 | scm_freelist2 = SCM_EOL; | |
a00c95d9 | 1718 | |
b37fe1c5 | 1719 | scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected); |
8b0d194f | 1720 | scm_gc_yield -= scm_cells_allocated; |
0f2d19dd JB |
1721 | scm_mallocated -= m; |
1722 | scm_gc_malloc_collected = m; | |
1723 | } | |
acf4331f | 1724 | #undef FUNC_NAME |
0f2d19dd JB |
1725 | |
1726 | ||
1727 | \f | |
1728 | ||
1729 | /* {Front end to malloc} | |
1730 | * | |
9d47a1e6 ML |
1731 | * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc, |
1732 | * scm_done_free | |
0f2d19dd JB |
1733 | * |
1734 | * These functions provide services comperable to malloc, realloc, and | |
1735 | * free. They are for allocating malloced parts of scheme objects. | |
9d47a1e6 | 1736 | * The primary purpose of the front end is to impose calls to gc. */ |
0f2d19dd | 1737 | |
bc9d9bb2 | 1738 | |
0f2d19dd JB |
1739 | /* scm_must_malloc |
1740 | * Return newly malloced storage or throw an error. | |
1741 | * | |
1742 | * The parameter WHAT is a string for error reporting. | |
a00c95d9 | 1743 | * If the threshold scm_mtrigger will be passed by this |
0f2d19dd JB |
1744 | * allocation, or if the first call to malloc fails, |
1745 | * garbage collect -- on the presumption that some objects | |
1746 | * using malloced storage may be collected. | |
1747 | * | |
1748 | * The limit scm_mtrigger may be raised by this allocation. | |
1749 | */ | |
07806695 | 1750 | void * |
e4ef2330 | 1751 | scm_must_malloc (scm_sizet size, const char *what) |
0f2d19dd | 1752 | { |
07806695 | 1753 | void *ptr; |
15e9d186 | 1754 | unsigned long nm = scm_mallocated + size; |
e4ef2330 MD |
1755 | |
1756 | if (nm <= scm_mtrigger) | |
0f2d19dd | 1757 | { |
07806695 | 1758 | SCM_SYSCALL (ptr = malloc (size)); |
0f2d19dd JB |
1759 | if (NULL != ptr) |
1760 | { | |
1761 | scm_mallocated = nm; | |
bc9d9bb2 MD |
1762 | #ifdef GUILE_DEBUG_MALLOC |
1763 | scm_malloc_register (ptr, what); | |
1764 | #endif | |
0f2d19dd JB |
1765 | return ptr; |
1766 | } | |
1767 | } | |
6064dcc6 | 1768 | |
0f2d19dd | 1769 | scm_igc (what); |
e4ef2330 | 1770 | |
0f2d19dd | 1771 | nm = scm_mallocated + size; |
07806695 | 1772 | SCM_SYSCALL (ptr = malloc (size)); |
0f2d19dd JB |
1773 | if (NULL != ptr) |
1774 | { | |
1775 | scm_mallocated = nm; | |
6064dcc6 MV |
1776 | if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) { |
1777 | if (nm > scm_mtrigger) | |
1778 | scm_mtrigger = nm + nm / 2; | |
1779 | else | |
1780 | scm_mtrigger += scm_mtrigger / 2; | |
1781 | } | |
bc9d9bb2 MD |
1782 | #ifdef GUILE_DEBUG_MALLOC |
1783 | scm_malloc_register (ptr, what); | |
1784 | #endif | |
1785 | ||
0f2d19dd JB |
1786 | return ptr; |
1787 | } | |
e4ef2330 | 1788 | |
acf4331f | 1789 | scm_memory_error (what); |
0f2d19dd JB |
1790 | } |
1791 | ||
1792 | ||
1793 | /* scm_must_realloc | |
1794 | * is similar to scm_must_malloc. | |
1795 | */ | |
07806695 JB |
1796 | void * |
1797 | scm_must_realloc (void *where, | |
e4ef2330 MD |
1798 | scm_sizet old_size, |
1799 | scm_sizet size, | |
3eeba8d4 | 1800 | const char *what) |
0f2d19dd | 1801 | { |
07806695 | 1802 | void *ptr; |
e4ef2330 MD |
1803 | scm_sizet nm = scm_mallocated + size - old_size; |
1804 | ||
1805 | if (nm <= scm_mtrigger) | |
0f2d19dd | 1806 | { |
07806695 | 1807 | SCM_SYSCALL (ptr = realloc (where, size)); |
0f2d19dd JB |
1808 | if (NULL != ptr) |
1809 | { | |
1810 | scm_mallocated = nm; | |
bc9d9bb2 MD |
1811 | #ifdef GUILE_DEBUG_MALLOC |
1812 | scm_malloc_reregister (where, ptr, what); | |
1813 | #endif | |
0f2d19dd JB |
1814 | return ptr; |
1815 | } | |
1816 | } | |
e4ef2330 | 1817 | |
0f2d19dd | 1818 | scm_igc (what); |
e4ef2330 MD |
1819 | |
1820 | nm = scm_mallocated + size - old_size; | |
07806695 | 1821 | SCM_SYSCALL (ptr = realloc (where, size)); |
0f2d19dd JB |
1822 | if (NULL != ptr) |
1823 | { | |
1824 | scm_mallocated = nm; | |
6064dcc6 MV |
1825 | if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) { |
1826 | if (nm > scm_mtrigger) | |
1827 | scm_mtrigger = nm + nm / 2; | |
1828 | else | |
1829 | scm_mtrigger += scm_mtrigger / 2; | |
1830 | } | |
bc9d9bb2 MD |
1831 | #ifdef GUILE_DEBUG_MALLOC |
1832 | scm_malloc_reregister (where, ptr, what); | |
1833 | #endif | |
0f2d19dd JB |
1834 | return ptr; |
1835 | } | |
e4ef2330 | 1836 | |
acf4331f | 1837 | scm_memory_error (what); |
0f2d19dd JB |
1838 | } |
1839 | ||
acf4331f | 1840 | |
a00c95d9 | 1841 | void |
07806695 | 1842 | scm_must_free (void *obj) |
acf4331f | 1843 | #define FUNC_NAME "scm_must_free" |
0f2d19dd | 1844 | { |
bc9d9bb2 MD |
1845 | #ifdef GUILE_DEBUG_MALLOC |
1846 | scm_malloc_unregister (obj); | |
1847 | #endif | |
0f2d19dd JB |
1848 | if (obj) |
1849 | free (obj); | |
1850 | else | |
acf4331f | 1851 | SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL); |
0f2d19dd | 1852 | } |
acf4331f DH |
1853 | #undef FUNC_NAME |
1854 | ||
0f2d19dd | 1855 | |
c68296f8 MV |
1856 | /* Announce that there has been some malloc done that will be freed |
1857 | * during gc. A typical use is for a smob that uses some malloced | |
1858 | * memory but can not get it from scm_must_malloc (for whatever | |
1859 | * reason). When a new object of this smob is created you call | |
1860 | * scm_done_malloc with the size of the object. When your smob free | |
1861 | * function is called, be sure to include this size in the return | |
9d47a1e6 ML |
1862 | * value. |
1863 | * | |
1864 | * If you can't actually free the memory in the smob free function, | |
1865 | * for whatever reason (like reference counting), you still can (and | |
1866 | * should) report the amount of memory freed when you actually free it. | |
1867 | * Do it by calling scm_done_malloc with the _negated_ size. Clever, | |
1868 | * eh? Or even better, call scm_done_free. */ | |
0f2d19dd | 1869 | |
c68296f8 | 1870 | void |
6e8d25a6 | 1871 | scm_done_malloc (long size) |
c68296f8 MV |
1872 | { |
1873 | scm_mallocated += size; | |
1874 | ||
1875 | if (scm_mallocated > scm_mtrigger) | |
1876 | { | |
1877 | scm_igc ("foreign mallocs"); | |
1878 | if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) | |
1879 | { | |
1880 | if (scm_mallocated > scm_mtrigger) | |
1881 | scm_mtrigger = scm_mallocated + scm_mallocated / 2; | |
1882 | else | |
1883 | scm_mtrigger += scm_mtrigger / 2; | |
1884 | } | |
1885 | } | |
1886 | } | |
1887 | ||
9d47a1e6 ML |
1888 | void |
1889 | scm_done_free (long size) | |
1890 | { | |
1891 | scm_mallocated -= size; | |
1892 | } | |
1893 | ||
c68296f8 MV |
1894 | |
1895 | \f | |
0f2d19dd JB |
1896 | |
1897 | /* {Heap Segments} | |
1898 | * | |
1899 | * Each heap segment is an array of objects of a particular size. | |
1900 | * Every segment has an associated (possibly shared) freelist. | |
1901 | * A table of segment records is kept that records the upper and | |
1902 | * lower extents of the segment; this is used during the conservative | |
1903 | * phase of gc to identify probably gc roots (because they point | |
c68296f8 | 1904 | * into valid segments at reasonable offsets). */ |
0f2d19dd JB |
1905 | |
1906 | /* scm_expmem | |
1907 | * is true if the first segment was smaller than INIT_HEAP_SEG. | |
1908 | * If scm_expmem is set to one, subsequent segment allocations will | |
1909 | * allocate segments of size SCM_EXPHEAP(scm_heap_size). | |
1910 | */ | |
1911 | int scm_expmem = 0; | |
1912 | ||
4c48ba06 MD |
1913 | scm_sizet scm_max_segment_size; |
1914 | ||
0f2d19dd JB |
1915 | /* scm_heap_org |
1916 | * is the lowest base address of any heap segment. | |
1917 | */ | |
1918 | SCM_CELLPTR scm_heap_org; | |
1919 | ||
a00c95d9 | 1920 | scm_heap_seg_data_t * scm_heap_table = 0; |
b6efc951 | 1921 | static unsigned int heap_segment_table_size = 0; |
0f2d19dd JB |
1922 | int scm_n_heap_segs = 0; |
1923 | ||
0f2d19dd JB |
1924 | /* init_heap_seg |
1925 | * initializes a new heap segment and return the number of objects it contains. | |
1926 | * | |
1927 | * The segment origin, segment size in bytes, and the span of objects | |
1928 | * in cells are input parameters. The freelist is both input and output. | |
1929 | * | |
1930 | * This function presume that the scm_heap_table has already been expanded | |
1931 | * to accomodate a new segment record. | |
1932 | */ | |
1933 | ||
1934 | ||
a00c95d9 | 1935 | static scm_sizet |
4c48ba06 | 1936 | init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist) |
0f2d19dd JB |
1937 | { |
1938 | register SCM_CELLPTR ptr; | |
0f2d19dd | 1939 | SCM_CELLPTR seg_end; |
15e9d186 | 1940 | int new_seg_index; |
acb0a19c | 1941 | int n_new_cells; |
4c48ba06 | 1942 | int span = freelist->span; |
a00c95d9 | 1943 | |
0f2d19dd JB |
1944 | if (seg_org == NULL) |
1945 | return 0; | |
1946 | ||
a00c95d9 | 1947 | ptr = CELL_UP (seg_org, span); |
acb0a19c | 1948 | |
a00c95d9 | 1949 | /* Compute the ceiling on valid object pointers w/in this segment. |
0f2d19dd | 1950 | */ |
a00c95d9 | 1951 | seg_end = CELL_DN ((char *) seg_org + size, span); |
0f2d19dd | 1952 | |
a00c95d9 | 1953 | /* Find the right place and insert the segment record. |
0f2d19dd JB |
1954 | * |
1955 | */ | |
1956 | for (new_seg_index = 0; | |
1957 | ( (new_seg_index < scm_n_heap_segs) | |
1958 | && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org)); | |
1959 | new_seg_index++) | |
1960 | ; | |
1961 | ||
1962 | { | |
1963 | int i; | |
1964 | for (i = scm_n_heap_segs; i > new_seg_index; --i) | |
1965 | scm_heap_table[i] = scm_heap_table[i - 1]; | |
1966 | } | |
a00c95d9 | 1967 | |
0f2d19dd JB |
1968 | ++scm_n_heap_segs; |
1969 | ||
945fec60 | 1970 | scm_heap_table[new_seg_index].span = span; |
4c48ba06 | 1971 | scm_heap_table[new_seg_index].freelist = freelist; |
195e6201 DH |
1972 | scm_heap_table[new_seg_index].bounds[0] = ptr; |
1973 | scm_heap_table[new_seg_index].bounds[1] = seg_end; | |
0f2d19dd JB |
1974 | |
1975 | ||
a00c95d9 | 1976 | /* Compute the least valid object pointer w/in this segment |
0f2d19dd | 1977 | */ |
a00c95d9 | 1978 | ptr = CELL_UP (ptr, span); |
0f2d19dd JB |
1979 | |
1980 | ||
acb0a19c MD |
1981 | /*n_new_cells*/ |
1982 | n_new_cells = seg_end - ptr; | |
0f2d19dd | 1983 | |
4c48ba06 | 1984 | freelist->heap_size += n_new_cells; |
4a4c9785 | 1985 | |
a00c95d9 | 1986 | /* Partition objects in this segment into clusters */ |
4a4c9785 MD |
1987 | { |
1988 | SCM clusters; | |
1989 | SCM *clusterp = &clusters; | |
4c48ba06 | 1990 | int n_cluster_cells = span * freelist->cluster_size; |
4a4c9785 | 1991 | |
4c48ba06 | 1992 | while (n_new_cells > span) /* at least one spine + one freecell */ |
4a4c9785 | 1993 | { |
4c48ba06 MD |
1994 | /* Determine end of cluster |
1995 | */ | |
1996 | if (n_new_cells >= n_cluster_cells) | |
1997 | { | |
1998 | seg_end = ptr + n_cluster_cells; | |
1999 | n_new_cells -= n_cluster_cells; | |
2000 | } | |
4a4c9785 | 2001 | else |
a00c95d9 ML |
2002 | /* [cmm] looks like the segment size doesn't divide cleanly by |
2003 | cluster size. bad cmm! */ | |
2004 | abort(); | |
4a4c9785 | 2005 | |
4c48ba06 MD |
2006 | /* Allocate cluster spine |
2007 | */ | |
4a4c9785 MD |
2008 | *clusterp = PTR2SCM (ptr); |
2009 | SCM_SETCAR (*clusterp, PTR2SCM (ptr + span)); | |
2010 | clusterp = SCM_CDRLOC (*clusterp); | |
4a4c9785 | 2011 | ptr += span; |
a00c95d9 | 2012 | |
4a4c9785 MD |
2013 | while (ptr < seg_end) |
2014 | { | |
96f6f4ae DH |
2015 | SCM scmptr = PTR2SCM (ptr); |
2016 | ||
54778cd3 | 2017 | SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell); |
3f5d82cd | 2018 | SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (ptr + span)); |
4a4c9785 MD |
2019 | ptr += span; |
2020 | } | |
4c48ba06 | 2021 | |
3f5d82cd | 2022 | SCM_SET_FREE_CELL_CDR (PTR2SCM (ptr - span), SCM_EOL); |
4a4c9785 | 2023 | } |
a00c95d9 | 2024 | |
4a4c9785 MD |
2025 | /* Patch up the last cluster pointer in the segment |
2026 | * to join it to the input freelist. | |
2027 | */ | |
4c48ba06 MD |
2028 | *clusterp = freelist->clusters; |
2029 | freelist->clusters = clusters; | |
4a4c9785 MD |
2030 | } |
2031 | ||
4c48ba06 MD |
2032 | #ifdef DEBUGINFO |
2033 | fprintf (stderr, "H"); | |
2034 | #endif | |
0f2d19dd | 2035 | return size; |
0f2d19dd JB |
2036 | } |
2037 | ||
a00c95d9 ML |
2038 | static scm_sizet |
2039 | round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len) | |
2040 | { | |
2041 | scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist); | |
2042 | ||
2043 | return | |
2044 | (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes | |
2045 | + ALIGNMENT_SLACK (freelist); | |
2046 | } | |
2047 | ||
a00c95d9 | 2048 | static void |
b6efc951 | 2049 | alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy) |
acf4331f | 2050 | #define FUNC_NAME "alloc_some_heap" |
0f2d19dd | 2051 | { |
0f2d19dd | 2052 | SCM_CELLPTR ptr; |
b37fe1c5 | 2053 | long len; |
a00c95d9 | 2054 | |
9d47a1e6 | 2055 | if (scm_gc_heap_lock) |
b6efc951 DH |
2056 | { |
2057 | /* Critical code sections (such as the garbage collector) aren't | |
2058 | * supposed to add heap segments. | |
2059 | */ | |
2060 | fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n"); | |
2061 | abort (); | |
2062 | } | |
0f2d19dd | 2063 | |
9d47a1e6 | 2064 | if (scm_n_heap_segs == heap_segment_table_size) |
b6efc951 DH |
2065 | { |
2066 | /* We have to expand the heap segment table to have room for the new | |
2067 | * segment. Do not yet increment scm_n_heap_segs -- that is done by | |
2068 | * init_heap_seg only if the allocation of the segment itself succeeds. | |
2069 | */ | |
2070 | unsigned int new_table_size = scm_n_heap_segs + 1; | |
2071 | size_t size = new_table_size * sizeof (scm_heap_seg_data_t); | |
2072 | scm_heap_seg_data_t * new_heap_table; | |
2073 | ||
2074 | SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *) | |
2075 | realloc ((char *)scm_heap_table, size))); | |
2076 | if (!new_heap_table) | |
2077 | { | |
2078 | if (error_policy == abort_on_error) | |
2079 | { | |
2080 | fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n"); | |
2081 | abort (); | |
2082 | } | |
2083 | else | |
2084 | { | |
2085 | return; | |
2086 | } | |
2087 | } | |
2088 | else | |
2089 | { | |
2090 | scm_heap_table = new_heap_table; | |
2091 | heap_segment_table_size = new_table_size; | |
2092 | } | |
2093 | } | |
0f2d19dd JB |
2094 | |
2095 | ||
2096 | /* Pick a size for the new heap segment. | |
a00c95d9 | 2097 | * The rule for picking the size of a segment is explained in |
0f2d19dd JB |
2098 | * gc.h |
2099 | */ | |
4c48ba06 | 2100 | { |
1811ebce MD |
2101 | /* Assure that the new segment is predicted to be large enough. |
2102 | * | |
2103 | * New yield should at least equal GC fraction of new heap size, i.e. | |
2104 | * | |
2105 | * y + dh > f * (h + dh) | |
2106 | * | |
2107 | * y : yield | |
8fef55a8 | 2108 | * f : min yield fraction |
1811ebce MD |
2109 | * h : heap size |
2110 | * dh : size of new heap segment | |
2111 | * | |
2112 | * This gives dh > (f * h - y) / (1 - f) | |
bda1446c | 2113 | */ |
8fef55a8 | 2114 | int f = freelist->min_yield_fraction; |
1811ebce MD |
2115 | long h = SCM_HEAP_SIZE; |
2116 | long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f); | |
4c48ba06 MD |
2117 | len = SCM_EXPHEAP (freelist->heap_size); |
2118 | #ifdef DEBUGINFO | |
2119 | fprintf (stderr, "(%d < %d)", len, min_cells); | |
2120 | #endif | |
2121 | if (len < min_cells) | |
1811ebce | 2122 | len = min_cells + freelist->cluster_size; |
4c48ba06 | 2123 | len *= sizeof (scm_cell); |
1811ebce MD |
2124 | /* force new sampling */ |
2125 | freelist->collected = LONG_MAX; | |
4c48ba06 | 2126 | } |
a00c95d9 | 2127 | |
4c48ba06 MD |
2128 | if (len > scm_max_segment_size) |
2129 | len = scm_max_segment_size; | |
0f2d19dd JB |
2130 | |
2131 | { | |
2132 | scm_sizet smallest; | |
2133 | ||
a00c95d9 | 2134 | smallest = CLUSTER_SIZE_IN_BYTES (freelist); |
a00c95d9 | 2135 | |
0f2d19dd | 2136 | if (len < smallest) |
a00c95d9 | 2137 | len = smallest; |
0f2d19dd JB |
2138 | |
2139 | /* Allocate with decaying ambition. */ | |
2140 | while ((len >= SCM_MIN_HEAP_SEG_SIZE) | |
2141 | && (len >= smallest)) | |
2142 | { | |
1811ebce | 2143 | scm_sizet rounded_len = round_to_cluster_size (freelist, len); |
a00c95d9 | 2144 | SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len)); |
0f2d19dd JB |
2145 | if (ptr) |
2146 | { | |
a00c95d9 | 2147 | init_heap_seg (ptr, rounded_len, freelist); |
0f2d19dd JB |
2148 | return; |
2149 | } | |
2150 | len /= 2; | |
2151 | } | |
2152 | } | |
2153 | ||
b6efc951 DH |
2154 | if (error_policy == abort_on_error) |
2155 | { | |
2156 | fprintf (stderr, "alloc_some_heap: Could not grow heap.\n"); | |
2157 | abort (); | |
2158 | } | |
0f2d19dd | 2159 | } |
acf4331f | 2160 | #undef FUNC_NAME |
0f2d19dd JB |
2161 | |
2162 | ||
a00c95d9 | 2163 | SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0, |
1bbd0b84 | 2164 | (SCM name), |
b380b885 | 2165 | "") |
1bbd0b84 | 2166 | #define FUNC_NAME s_scm_unhash_name |
0f2d19dd JB |
2167 | { |
2168 | int x; | |
2169 | int bound; | |
3b3b36dd | 2170 | SCM_VALIDATE_SYMBOL (1,name); |
0f2d19dd JB |
2171 | SCM_DEFER_INTS; |
2172 | bound = scm_n_heap_segs; | |
2173 | for (x = 0; x < bound; ++x) | |
2174 | { | |
2175 | SCM_CELLPTR p; | |
2176 | SCM_CELLPTR pbound; | |
195e6201 DH |
2177 | p = scm_heap_table[x].bounds[0]; |
2178 | pbound = scm_heap_table[x].bounds[1]; | |
0f2d19dd JB |
2179 | while (p < pbound) |
2180 | { | |
c8045e8d DH |
2181 | SCM cell = PTR2SCM (p); |
2182 | if (SCM_TYP3 (cell) == scm_tc3_cons_gloc) | |
0f2d19dd | 2183 | { |
c8045e8d DH |
2184 | /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a |
2185 | * struct cell. See the corresponding comment in scm_gc_mark. | |
2186 | */ | |
2187 | scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc; | |
2188 | SCM gloc_car = SCM_PACK (word0); /* access as gloc */ | |
2189 | SCM vcell = SCM_CELL_OBJECT_1 (gloc_car); | |
9a09deb1 | 2190 | if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name)) |
c8045e8d | 2191 | && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1)) |
0f2d19dd | 2192 | { |
c8045e8d | 2193 | SCM_SET_CELL_OBJECT_0 (cell, name); |
0f2d19dd JB |
2194 | } |
2195 | } | |
2196 | ++p; | |
2197 | } | |
2198 | } | |
2199 | SCM_ALLOW_INTS; | |
2200 | return name; | |
2201 | } | |
1bbd0b84 | 2202 | #undef FUNC_NAME |
0f2d19dd JB |
2203 | |
2204 | ||
2205 | \f | |
2206 | /* {GC Protection Helper Functions} | |
2207 | */ | |
2208 | ||
2209 | ||
0f2d19dd | 2210 | void |
6e8d25a6 GB |
2211 | scm_remember (SCM *ptr) |
2212 | { /* empty */ } | |
0f2d19dd | 2213 | |
1cc91f1b | 2214 | |
c209c88e | 2215 | /* |
41b0806d GB |
2216 | These crazy functions prevent garbage collection |
2217 | of arguments after the first argument by | |
2218 | ensuring they remain live throughout the | |
2219 | function because they are used in the last | |
2220 | line of the code block. | |
2221 | It'd be better to have a nice compiler hint to | |
2222 | aid the conservative stack-scanning GC. --03/09/00 gjb */ | |
0f2d19dd JB |
2223 | SCM |
2224 | scm_return_first (SCM elt, ...) | |
0f2d19dd JB |
2225 | { |
2226 | return elt; | |
2227 | } | |
2228 | ||
41b0806d GB |
2229 | int |
2230 | scm_return_first_int (int i, ...) | |
2231 | { | |
2232 | return i; | |
2233 | } | |
2234 | ||
0f2d19dd | 2235 | |
0f2d19dd | 2236 | SCM |
6e8d25a6 | 2237 | scm_permanent_object (SCM obj) |
0f2d19dd JB |
2238 | { |
2239 | SCM_REDEFER_INTS; | |
2240 | scm_permobjs = scm_cons (obj, scm_permobjs); | |
2241 | SCM_REALLOW_INTS; | |
2242 | return obj; | |
2243 | } | |
2244 | ||
2245 | ||
7bd4fbe2 MD |
2246 | /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all |
2247 | other references are dropped, until the object is unprotected by calling | |
2248 | scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest, | |
2249 | i. e. it is possible to protect the same object several times, but it is | |
2250 | necessary to unprotect the object the same number of times to actually get | |
2251 | the object unprotected. It is an error to unprotect an object more often | |
2252 | than it has been protected before. The function scm_protect_object returns | |
2253 | OBJ. | |
2254 | */ | |
2255 | ||
2256 | /* Implementation note: For every object X, there is a counter which | |
2257 | scm_protect_object(X) increments and scm_unprotect_object(X) decrements. | |
2258 | */ | |
686765af | 2259 | |
ef290276 | 2260 | SCM |
6e8d25a6 | 2261 | scm_protect_object (SCM obj) |
ef290276 | 2262 | { |
686765af | 2263 | SCM handle; |
9d47a1e6 | 2264 | |
686765af | 2265 | /* This critical section barrier will be replaced by a mutex. */ |
2dd6a83a | 2266 | SCM_REDEFER_INTS; |
9d47a1e6 | 2267 | |
0f0f0899 MD |
2268 | handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0)); |
2269 | SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1)); | |
9d47a1e6 | 2270 | |
2dd6a83a | 2271 | SCM_REALLOW_INTS; |
9d47a1e6 | 2272 | |
ef290276 JB |
2273 | return obj; |
2274 | } | |
2275 | ||
2276 | ||
2277 | /* Remove any protection for OBJ established by a prior call to | |
dab7f566 | 2278 | scm_protect_object. This function returns OBJ. |
ef290276 | 2279 | |
dab7f566 | 2280 | See scm_protect_object for more information. */ |
ef290276 | 2281 | SCM |
6e8d25a6 | 2282 | scm_unprotect_object (SCM obj) |
ef290276 | 2283 | { |
686765af | 2284 | SCM handle; |
9d47a1e6 | 2285 | |
686765af | 2286 | /* This critical section barrier will be replaced by a mutex. */ |
2dd6a83a | 2287 | SCM_REDEFER_INTS; |
9d47a1e6 | 2288 | |
686765af | 2289 | handle = scm_hashq_get_handle (scm_protects, obj); |
9d47a1e6 | 2290 | |
0f0f0899 | 2291 | if (SCM_IMP (handle)) |
686765af | 2292 | { |
0f0f0899 MD |
2293 | fprintf (stderr, "scm_unprotect_object called on unprotected object\n"); |
2294 | abort (); | |
686765af | 2295 | } |
6a199940 DH |
2296 | else |
2297 | { | |
2298 | unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1; | |
2299 | if (count == 0) | |
2300 | scm_hashq_remove_x (scm_protects, obj); | |
2301 | else | |
2302 | SCM_SETCDR (handle, SCM_MAKINUM (count)); | |
2303 | } | |
686765af | 2304 | |
2dd6a83a | 2305 | SCM_REALLOW_INTS; |
ef290276 JB |
2306 | |
2307 | return obj; | |
2308 | } | |
2309 | ||
c45acc34 JB |
2310 | int terminating; |
2311 | ||
2312 | /* called on process termination. */ | |
e52ceaac MD |
2313 | #ifdef HAVE_ATEXIT |
2314 | static void | |
2315 | cleanup (void) | |
2316 | #else | |
2317 | #ifdef HAVE_ON_EXIT | |
51157deb MD |
2318 | extern int on_exit (void (*procp) (), int arg); |
2319 | ||
e52ceaac MD |
2320 | static void |
2321 | cleanup (int status, void *arg) | |
2322 | #else | |
2323 | #error Dont know how to setup a cleanup handler on your system. | |
2324 | #endif | |
2325 | #endif | |
c45acc34 JB |
2326 | { |
2327 | terminating = 1; | |
2328 | scm_flush_all_ports (); | |
2329 | } | |
ef290276 | 2330 | |
0f2d19dd | 2331 | \f |
acb0a19c | 2332 | static int |
4c48ba06 | 2333 | make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist) |
acb0a19c | 2334 | { |
a00c95d9 ML |
2335 | scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size); |
2336 | if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size), | |
2337 | rounded_size, | |
4c48ba06 | 2338 | freelist)) |
acb0a19c | 2339 | { |
a00c95d9 ML |
2340 | rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE); |
2341 | if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size), | |
2342 | rounded_size, | |
4c48ba06 | 2343 | freelist)) |
acb0a19c MD |
2344 | return 1; |
2345 | } | |
2346 | else | |
2347 | scm_expmem = 1; | |
2348 | ||
8fef55a8 MD |
2349 | if (freelist->min_yield_fraction) |
2350 | freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction | |
b37fe1c5 | 2351 | / 100); |
8fef55a8 | 2352 | freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield); |
a00c95d9 | 2353 | |
acb0a19c MD |
2354 | return 0; |
2355 | } | |
2356 | ||
2357 | \f | |
4c48ba06 MD |
2358 | static void |
2359 | init_freelist (scm_freelist_t *freelist, | |
2360 | int span, | |
2361 | int cluster_size, | |
8fef55a8 | 2362 | int min_yield) |
4c48ba06 MD |
2363 | { |
2364 | freelist->clusters = SCM_EOL; | |
2365 | freelist->cluster_size = cluster_size + 1; | |
b37fe1c5 MD |
2366 | freelist->left_to_collect = 0; |
2367 | freelist->clusters_allocated = 0; | |
8fef55a8 MD |
2368 | freelist->min_yield = 0; |
2369 | freelist->min_yield_fraction = min_yield; | |
4c48ba06 MD |
2370 | freelist->span = span; |
2371 | freelist->collected = 0; | |
1811ebce | 2372 | freelist->collected_1 = 0; |
4c48ba06 MD |
2373 | freelist->heap_size = 0; |
2374 | } | |
2375 | ||
4a4c9785 | 2376 | int |
4c48ba06 MD |
2377 | scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1, |
2378 | scm_sizet init_heap_size_2, int gc_trigger_2, | |
2379 | scm_sizet max_segment_size) | |
0f2d19dd JB |
2380 | { |
2381 | scm_sizet j; | |
2382 | ||
4c48ba06 | 2383 | if (!init_heap_size_1) |
aeacfc8f | 2384 | init_heap_size_1 = scm_default_init_heap_size_1; |
4c48ba06 | 2385 | if (!init_heap_size_2) |
aeacfc8f | 2386 | init_heap_size_2 = scm_default_init_heap_size_2; |
4c48ba06 | 2387 | |
0f2d19dd JB |
2388 | j = SCM_NUM_PROTECTS; |
2389 | while (j) | |
2390 | scm_sys_protects[--j] = SCM_BOOL_F; | |
2391 | scm_block_gc = 1; | |
4a4c9785 | 2392 | |
4a4c9785 | 2393 | scm_freelist = SCM_EOL; |
4c48ba06 MD |
2394 | scm_freelist2 = SCM_EOL; |
2395 | init_freelist (&scm_master_freelist, | |
2396 | 1, SCM_CLUSTER_SIZE_1, | |
aeacfc8f | 2397 | gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1); |
4c48ba06 MD |
2398 | init_freelist (&scm_master_freelist2, |
2399 | 2, SCM_CLUSTER_SIZE_2, | |
aeacfc8f | 2400 | gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2); |
4c48ba06 | 2401 | scm_max_segment_size |
aeacfc8f | 2402 | = max_segment_size ? max_segment_size : scm_default_max_segment_size; |
4a4c9785 | 2403 | |
0f2d19dd JB |
2404 | scm_expmem = 0; |
2405 | ||
2406 | j = SCM_HEAP_SEG_SIZE; | |
2407 | scm_mtrigger = SCM_INIT_MALLOC_LIMIT; | |
a00c95d9 ML |
2408 | scm_heap_table = ((scm_heap_seg_data_t *) |
2409 | scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims")); | |
b6efc951 | 2410 | heap_segment_table_size = 2; |
acb0a19c | 2411 | |
4c48ba06 MD |
2412 | if (make_initial_segment (init_heap_size_1, &scm_master_freelist) || |
2413 | make_initial_segment (init_heap_size_2, &scm_master_freelist2)) | |
4a4c9785 | 2414 | return 1; |
acb0a19c | 2415 | |
801cb5e7 | 2416 | /* scm_hplims[0] can change. do not remove scm_heap_org */ |
a00c95d9 | 2417 | scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1); |
acb0a19c | 2418 | |
801cb5e7 MD |
2419 | scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL); |
2420 | scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL); | |
2421 | scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL); | |
2422 | scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL); | |
2423 | scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL); | |
0f2d19dd JB |
2424 | |
2425 | /* Initialise the list of ports. */ | |
840ae05d JB |
2426 | scm_port_table = (scm_port **) |
2427 | malloc (sizeof (scm_port *) * scm_port_table_room); | |
0f2d19dd JB |
2428 | if (!scm_port_table) |
2429 | return 1; | |
2430 | ||
a18bcd0e | 2431 | #ifdef HAVE_ATEXIT |
c45acc34 | 2432 | atexit (cleanup); |
e52ceaac MD |
2433 | #else |
2434 | #ifdef HAVE_ON_EXIT | |
2435 | on_exit (cleanup, 0); | |
2436 | #endif | |
a18bcd0e | 2437 | #endif |
0f2d19dd JB |
2438 | |
2439 | scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL); | |
24e68a57 | 2440 | SCM_SETCDR (scm_undefineds, scm_undefineds); |
0f2d19dd JB |
2441 | |
2442 | scm_listofnull = scm_cons (SCM_EOL, SCM_EOL); | |
2443 | scm_nullstr = scm_makstr (0L, 0); | |
a8741caa | 2444 | scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED); |
54778cd3 DH |
2445 | scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL); |
2446 | scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim)); | |
2447 | scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL); | |
8960e0a0 | 2448 | scm_stand_in_procs = SCM_EOL; |
0f2d19dd | 2449 | scm_permobjs = SCM_EOL; |
686765af | 2450 | scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL); |
54778cd3 DH |
2451 | scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM)); |
2452 | scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM)); | |
0f2d19dd JB |
2453 | #ifdef SCM_BIGDIG |
2454 | scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD)); | |
2455 | #endif | |
2456 | return 0; | |
2457 | } | |
939794ce | 2458 | |
0f2d19dd JB |
2459 | \f |
2460 | ||
939794ce DH |
2461 | SCM scm_after_gc_hook; |
2462 | ||
2463 | #if (SCM_DEBUG_DEPRECATED == 0) | |
2464 | static SCM scm_gc_vcell; /* the vcell for gc-thunk. */ | |
2465 | #endif /* SCM_DEBUG_DEPRECATED == 0 */ | |
2466 | static SCM gc_async; | |
2467 | ||
2468 | ||
2469 | /* The function gc_async_thunk causes the execution of the after-gc-hook. It | |
2470 | * is run after the gc, as soon as the asynchronous events are handled by the | |
2471 | * evaluator. | |
2472 | */ | |
2473 | static SCM | |
2474 | gc_async_thunk (void) | |
2475 | { | |
2476 | scm_c_run_hook (scm_after_gc_hook, SCM_EOL); | |
2477 | ||
2478 | #if (SCM_DEBUG_DEPRECATED == 0) | |
2479 | ||
2480 | /* The following code will be removed in Guile 1.5. */ | |
2481 | if (SCM_NFALSEP (scm_gc_vcell)) | |
2482 | { | |
2483 | SCM proc = SCM_CDR (scm_gc_vcell); | |
2484 | ||
2485 | if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc)) | |
2486 | scm_apply (proc, SCM_EOL, SCM_EOL); | |
2487 | } | |
2488 | ||
2489 | #endif /* SCM_DEBUG_DEPRECATED == 0 */ | |
2490 | ||
2491 | return SCM_UNSPECIFIED; | |
2492 | } | |
2493 | ||
2494 | ||
2495 | /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of | |
2496 | * the garbage collection. The only purpose of this function is to mark the | |
2497 | * gc_async (which will eventually lead to the execution of the | |
2498 | * gc_async_thunk). | |
2499 | */ | |
2500 | static void * | |
2501 | mark_gc_async (void * hook_data, void *func_data, void *data) | |
2502 | { | |
2503 | scm_system_async_mark (gc_async); | |
2504 | return NULL; | |
2505 | } | |
2506 | ||
2507 | ||
0f2d19dd JB |
2508 | void |
2509 | scm_init_gc () | |
0f2d19dd | 2510 | { |
939794ce DH |
2511 | SCM after_gc_thunk; |
2512 | ||
801cb5e7 | 2513 | scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0); |
939794ce DH |
2514 | |
2515 | #if (SCM_DEBUG_DEPRECATED == 0) | |
2516 | scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F); | |
2517 | #endif /* SCM_DEBUG_DEPRECATED == 0 */ | |
2518 | /* Dirk:FIXME:: We don't really want a binding here. */ | |
2519 | after_gc_thunk = scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk); | |
2520 | gc_async = scm_system_async (after_gc_thunk); | |
2521 | ||
2522 | scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0); | |
2523 | ||
a0599745 | 2524 | #include "libguile/gc.x" |
0f2d19dd | 2525 | } |
89e00824 ML |
2526 | |
2527 | /* | |
2528 | Local Variables: | |
2529 | c-file-style: "gnu" | |
2530 | End: | |
2531 | */ |