Commit | Line | Data |
---|---|---|
d9377076 | 1 | /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006, 2008, 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc. |
a00c95d9 | 2 | * |
73be1d9e | 3 | * This library is free software; you can redistribute it and/or |
53befeb7 NJ |
4 | * modify it under the terms of the GNU Lesser General Public License |
5 | * as published by the Free Software Foundation; either version 3 of | |
6 | * the License, or (at your option) any later version. | |
a00c95d9 | 7 | * |
53befeb7 NJ |
8 | * This library is distributed in the hope that it will be useful, but |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
73be1d9e MV |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
11 | * Lesser General Public License for more details. | |
a00c95d9 | 12 | * |
73be1d9e MV |
13 | * You should have received a copy of the GNU Lesser General Public |
14 | * License along with this library; if not, write to the Free Software | |
53befeb7 NJ |
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
16 | * 02110-1301 USA | |
73be1d9e | 17 | */ |
1bbd0b84 | 18 | |
37ddcaf6 MD |
19 | /* #define DEBUGINFO */ |
20 | ||
dbb605f5 | 21 | #ifdef HAVE_CONFIG_H |
aa54a9b0 RB |
22 | # include <config.h> |
23 | #endif | |
56495472 | 24 | |
e7bca227 LC |
25 | #include "libguile/gen-scmconfig.h" |
26 | ||
0f2d19dd | 27 | #include <stdio.h> |
e6e2e95a | 28 | #include <errno.h> |
783e7774 | 29 | #include <string.h> |
34cf38c3 | 30 | #include <stdlib.h> |
6360beb2 | 31 | #include <math.h> |
e6e2e95a | 32 | |
3ec17f28 LC |
33 | #ifdef __ia64__ |
34 | #include <ucontext.h> | |
35 | extern unsigned long * __libc_ia64_register_backing_store_base; | |
36 | #endif | |
37 | ||
a0599745 | 38 | #include "libguile/_scm.h" |
0a7a7445 | 39 | #include "libguile/eval.h" |
a0599745 MD |
40 | #include "libguile/stime.h" |
41 | #include "libguile/stackchk.h" | |
42 | #include "libguile/struct.h" | |
a0599745 | 43 | #include "libguile/smob.h" |
2fa901a5 | 44 | #include "libguile/arrays.h" |
a0599745 MD |
45 | #include "libguile/async.h" |
46 | #include "libguile/ports.h" | |
47 | #include "libguile/root.h" | |
48 | #include "libguile/strings.h" | |
49 | #include "libguile/vectors.h" | |
686765af | 50 | #include "libguile/hashtab.h" |
ecf470a2 | 51 | #include "libguile/tags.h" |
a0599745 | 52 | |
c8a1bdc4 | 53 | #include "libguile/private-gc.h" |
a0599745 | 54 | #include "libguile/validate.h" |
1be6b49c | 55 | #include "libguile/deprecation.h" |
a0599745 | 56 | #include "libguile/gc.h" |
9de87eea | 57 | #include "libguile/dynwind.h" |
fce59c93 | 58 | |
1c44468d | 59 | #include "libguile/bdw-gc.h" |
a82e7953 | 60 | |
cc3546b0 AW |
61 | /* For GC_set_start_callback. */ |
62 | #include <gc/gc_mark.h> | |
63 | ||
bc9d9bb2 | 64 | #ifdef GUILE_DEBUG_MALLOC |
a0599745 | 65 | #include "libguile/debug-malloc.h" |
bc9d9bb2 MD |
66 | #endif |
67 | ||
0f2d19dd | 68 | #ifdef HAVE_UNISTD_H |
95b88819 | 69 | #include <unistd.h> |
0f2d19dd JB |
70 | #endif |
71 | ||
eae33935 | 72 | /* Set this to != 0 if every cell that is accessed shall be checked: |
61045190 | 73 | */ |
eab1b259 HWN |
74 | int scm_debug_cell_accesses_p = 0; |
75 | int scm_expensive_debug_cell_accesses_p = 0; | |
406c7d90 | 76 | |
e81d98ec DH |
77 | /* Set this to 0 if no additional gc's shall be performed, otherwise set it to |
78 | * the number of cell accesses after which a gc shall be called. | |
79 | */ | |
eab1b259 | 80 | int scm_debug_cells_gc_interval = 0; |
e81d98ec | 81 | |
acbccb0c | 82 | /* Hash table that keeps a reference to objects the user wants to protect from |
fbe1cb7f AW |
83 | garbage collection. */ |
84 | static SCM scm_protects; | |
e7efe8e7 AW |
85 | |
86 | ||
eab1b259 HWN |
87 | #if (SCM_DEBUG_CELL_ACCESSES == 1) |
88 | ||
89 | ||
90 | /* | |
91 | ||
92 | Assert that the given object is a valid reference to a valid cell. This | |
93 | test involves to determine whether the object is a cell pointer, whether | |
94 | this pointer actually points into a heap segment and whether the cell | |
95 | pointed to is not a free cell. Further, additional garbage collections may | |
96 | get executed after a user defined number of cell accesses. This helps to | |
97 | find places in the C code where references are dropped for extremely short | |
98 | periods. | |
99 | ||
100 | */ | |
406c7d90 | 101 | void |
eab1b259 | 102 | scm_i_expensive_validation_check (SCM cell) |
406c7d90 | 103 | { |
eab1b259 HWN |
104 | /* If desired, perform additional garbage collections after a user |
105 | * defined number of cell accesses. | |
106 | */ | |
107 | if (scm_debug_cells_gc_interval) | |
108 | { | |
109 | static unsigned int counter = 0; | |
61045190 | 110 | |
eab1b259 HWN |
111 | if (counter != 0) |
112 | { | |
113 | --counter; | |
114 | } | |
115 | else | |
116 | { | |
117 | counter = scm_debug_cells_gc_interval; | |
b17e0ac3 | 118 | scm_gc (); |
eab1b259 HWN |
119 | } |
120 | } | |
121 | } | |
122 | ||
8c93b597 LC |
123 | /* Whether cell validation is already running. */ |
124 | static int scm_i_cell_validation_already_running = 0; | |
125 | ||
eab1b259 HWN |
126 | void |
127 | scm_assert_cell_valid (SCM cell) | |
128 | { | |
129 | if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p) | |
406c7d90 | 130 | { |
eab1b259 | 131 | scm_i_cell_validation_already_running = 1; /* set to avoid recursion */ |
406c7d90 | 132 | |
c8a1bdc4 | 133 | /* |
eab1b259 HWN |
134 | During GC, no user-code should be run, and the guile core |
135 | should use non-protected accessors. | |
136 | */ | |
c8a1bdc4 | 137 | if (scm_gc_running_p) |
eab1b259 | 138 | return; |
c8a1bdc4 HWN |
139 | |
140 | /* | |
eab1b259 HWN |
141 | Only scm_in_heap_p and rescanning the heap is wildly |
142 | expensive. | |
143 | */ | |
144 | if (scm_expensive_debug_cell_accesses_p) | |
145 | scm_i_expensive_validation_check (cell); | |
b4246e5b | 146 | |
eab1b259 | 147 | scm_i_cell_validation_already_running = 0; /* re-enable */ |
406c7d90 DH |
148 | } |
149 | } | |
150 | ||
151 | ||
eab1b259 | 152 | |
406c7d90 DH |
153 | SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0, |
154 | (SCM flag), | |
1e6808ea | 155 | "If @var{flag} is @code{#f}, cell access checking is disabled.\n" |
eab1b259 | 156 | "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n" |
e81d98ec | 157 | "but no additional calls to garbage collection are issued.\n" |
eab1b259 | 158 | "If @var{flag} is a number, strict cell access checking is enabled,\n" |
e81d98ec DH |
159 | "with an additional garbage collection after the given\n" |
160 | "number of cell accesses.\n" | |
1e6808ea MG |
161 | "This procedure only exists when the compile-time flag\n" |
162 | "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.") | |
406c7d90 DH |
163 | #define FUNC_NAME s_scm_set_debug_cell_accesses_x |
164 | { | |
7888309b | 165 | if (scm_is_false (flag)) |
eab1b259 HWN |
166 | { |
167 | scm_debug_cell_accesses_p = 0; | |
168 | } | |
bc36d050 | 169 | else if (scm_is_eq (flag, SCM_BOOL_T)) |
eab1b259 HWN |
170 | { |
171 | scm_debug_cells_gc_interval = 0; | |
172 | scm_debug_cell_accesses_p = 1; | |
173 | scm_expensive_debug_cell_accesses_p = 0; | |
174 | } | |
e11e83f3 | 175 | else |
eab1b259 | 176 | { |
e11e83f3 | 177 | scm_debug_cells_gc_interval = scm_to_signed_integer (flag, 0, INT_MAX); |
eab1b259 HWN |
178 | scm_debug_cell_accesses_p = 1; |
179 | scm_expensive_debug_cell_accesses_p = 1; | |
180 | } | |
406c7d90 DH |
181 | return SCM_UNSPECIFIED; |
182 | } | |
183 | #undef FUNC_NAME | |
0f2d19dd | 184 | |
ecf470a2 | 185 | |
c8a1bdc4 | 186 | #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */ |
0f2d19dd JB |
187 | |
188 | \f | |
14294ce0 | 189 | |
26224b3f LC |
190 | /* Hooks. */ |
191 | scm_t_c_hook scm_before_gc_c_hook; | |
192 | scm_t_c_hook scm_before_mark_c_hook; | |
193 | scm_t_c_hook scm_before_sweep_c_hook; | |
194 | scm_t_c_hook scm_after_sweep_c_hook; | |
195 | scm_t_c_hook scm_after_gc_c_hook; | |
945fec60 | 196 | |
0f2d19dd | 197 | |
0fbdbe6c AW |
198 | static void |
199 | run_before_gc_c_hook (void) | |
200 | { | |
e1fbe716 AW |
201 | if (!SCM_I_CURRENT_THREAD) |
202 | /* GC while a thread is spinning up; punt. */ | |
203 | return; | |
204 | ||
0fbdbe6c AW |
205 | scm_c_hook_run (&scm_before_gc_c_hook, NULL); |
206 | } | |
207 | ||
208 | ||
0f2d19dd JB |
209 | /* GC Statistics Keeping |
210 | */ | |
b74e86cf | 211 | unsigned long scm_gc_ports_collected = 0; |
00b6ef23 AW |
212 | static long gc_time_taken = 0; |
213 | static long gc_start_time = 0; | |
214 | ||
6360beb2 AW |
215 | static unsigned long free_space_divisor; |
216 | static unsigned long minimum_free_space_divisor; | |
217 | static double target_free_space_divisor; | |
b74e86cf | 218 | |
915b3f9f | 219 | static unsigned long protected_obj_count = 0; |
c2cbcc57 | 220 | |
0f2d19dd | 221 | |
17ab1dc3 | 222 | SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken"); |
915b3f9f LC |
223 | SCM_SYMBOL (sym_heap_size, "heap-size"); |
224 | SCM_SYMBOL (sym_heap_free_size, "heap-free-size"); | |
225 | SCM_SYMBOL (sym_heap_total_allocated, "heap-total-allocated"); | |
17ab1dc3 | 226 | SCM_SYMBOL (sym_heap_allocated_since_gc, "heap-allocated-since-gc"); |
7eec4c37 | 227 | SCM_SYMBOL (sym_protected_objects, "protected-objects"); |
17ab1dc3 | 228 | SCM_SYMBOL (sym_times, "gc-times"); |
cf2d30f6 | 229 | |
d3dd80ab | 230 | |
0f2d19dd JB |
231 | /* {Scheme Interface to GC} |
232 | */ | |
1367aa5e HWN |
233 | static SCM |
234 | tag_table_to_type_alist (void *closure, SCM key, SCM val, SCM acc) | |
235 | { | |
8fecbb19 | 236 | if (scm_is_integer (key)) |
8a00ba71 | 237 | { |
3e2073bd | 238 | int c_tag = scm_to_int (key); |
8fecbb19 HWN |
239 | |
240 | char const * name = scm_i_tag_name (c_tag); | |
241 | if (name != NULL) | |
242 | { | |
243 | key = scm_from_locale_string (name); | |
244 | } | |
245 | else | |
246 | { | |
247 | char s[100]; | |
248 | sprintf (s, "tag %d", c_tag); | |
249 | key = scm_from_locale_string (s); | |
250 | } | |
8a00ba71 | 251 | } |
8fecbb19 | 252 | |
1367aa5e HWN |
253 | return scm_cons (scm_cons (key, val), acc); |
254 | } | |
255 | ||
256 | SCM_DEFINE (scm_gc_live_object_stats, "gc-live-object-stats", 0, 0, 0, | |
257 | (), | |
258 | "Return an alist of statistics of the current live objects. ") | |
259 | #define FUNC_NAME s_scm_gc_live_object_stats | |
260 | { | |
261 | SCM tab = scm_make_hash_table (scm_from_int (57)); | |
b01532af NJ |
262 | SCM alist; |
263 | ||
b01532af | 264 | alist |
1367aa5e HWN |
265 | = scm_internal_hash_fold (&tag_table_to_type_alist, NULL, SCM_EOL, tab); |
266 | ||
267 | return alist; | |
268 | } | |
269 | #undef FUNC_NAME | |
270 | ||
c2cbcc57 | 271 | extern int scm_gc_malloc_yield_percentage; |
a00c95d9 | 272 | SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0, |
1bbd0b84 | 273 | (), |
1e6808ea | 274 | "Return an association list of statistics about Guile's current\n" |
c8a1bdc4 | 275 | "use of storage.\n") |
1bbd0b84 | 276 | #define FUNC_NAME s_scm_gc_stats |
0f2d19dd | 277 | { |
0f2d19dd | 278 | SCM answer; |
14294ce0 | 279 | GC_word heap_size, free_bytes, unmapped_bytes, bytes_since_gc, total_bytes; |
915b3f9f | 280 | size_t gc_times; |
4c9419ac | 281 | |
14294ce0 AW |
282 | GC_get_heap_usage_safe (&heap_size, &free_bytes, &unmapped_bytes, |
283 | &bytes_since_gc, &total_bytes); | |
284 | gc_times = GC_gc_no; | |
fca43887 | 285 | |
b9bd8526 | 286 | answer = |
00b6ef23 | 287 | scm_list_n (scm_cons (sym_gc_time_taken, scm_from_long (gc_time_taken)), |
915b3f9f LC |
288 | scm_cons (sym_heap_size, scm_from_size_t (heap_size)), |
289 | scm_cons (sym_heap_free_size, scm_from_size_t (free_bytes)), | |
290 | scm_cons (sym_heap_total_allocated, | |
291 | scm_from_size_t (total_bytes)), | |
17ab1dc3 AW |
292 | scm_cons (sym_heap_allocated_since_gc, |
293 | scm_from_size_t (bytes_since_gc)), | |
915b3f9f LC |
294 | scm_cons (sym_protected_objects, |
295 | scm_from_ulong (protected_obj_count)), | |
296 | scm_cons (sym_times, scm_from_size_t (gc_times)), | |
b9bd8526 | 297 | SCM_UNDEFINED); |
fca43887 | 298 | |
c8a1bdc4 | 299 | return answer; |
0f2d19dd | 300 | } |
c8a1bdc4 | 301 | #undef FUNC_NAME |
0f2d19dd | 302 | |
539b08a4 | 303 | |
7f9ec18a LC |
304 | SCM_DEFINE (scm_gc_dump, "gc-dump", 0, 0, 0, |
305 | (void), | |
306 | "Dump information about the garbage collector's internal data " | |
307 | "structures and memory usage to the standard output.") | |
308 | #define FUNC_NAME s_scm_gc_dump | |
309 | { | |
310 | GC_dump (); | |
311 | ||
312 | return SCM_UNSPECIFIED; | |
313 | } | |
314 | #undef FUNC_NAME | |
315 | ||
acf4331f | 316 | |
c8a1bdc4 HWN |
317 | SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0, |
318 | (SCM obj), | |
319 | "Return an integer that for the lifetime of @var{obj} is uniquely\n" | |
320 | "returned by this function for @var{obj}") | |
321 | #define FUNC_NAME s_scm_object_address | |
c68296f8 | 322 | { |
b9bd8526 | 323 | return scm_from_ulong (SCM_UNPACK (obj)); |
c68296f8 | 324 | } |
c8a1bdc4 | 325 | #undef FUNC_NAME |
c68296f8 | 326 | |
1be6b49c | 327 | |
915b3f9f LC |
328 | SCM_DEFINE (scm_gc_disable, "gc-disable", 0, 0, 0, |
329 | (), | |
330 | "Disables the garbage collector. Nested calls are permitted. " | |
331 | "GC is re-enabled once @code{gc-enable} has been called the " | |
332 | "same number of times @code{gc-disable} was called.") | |
333 | #define FUNC_NAME s_scm_gc_disable | |
334 | { | |
335 | GC_disable (); | |
336 | return SCM_UNSPECIFIED; | |
337 | } | |
338 | #undef FUNC_NAME | |
339 | ||
340 | SCM_DEFINE (scm_gc_enable, "gc-enable", 0, 0, 0, | |
341 | (), | |
342 | "Enables the garbage collector.") | |
343 | #define FUNC_NAME s_scm_gc_enable | |
344 | { | |
345 | GC_enable (); | |
346 | return SCM_UNSPECIFIED; | |
347 | } | |
348 | #undef FUNC_NAME | |
349 | ||
350 | ||
c8a1bdc4 HWN |
351 | SCM_DEFINE (scm_gc, "gc", 0, 0, 0, |
352 | (), | |
353 | "Scans all of SCM objects and reclaims for further use those that are\n" | |
354 | "no longer accessible.") | |
355 | #define FUNC_NAME s_scm_gc | |
356 | { | |
b17e0ac3 | 357 | scm_i_gc ("call"); |
f740445a AW |
358 | /* If you're calling scm_gc(), you probably want synchronous |
359 | finalization. */ | |
eaf99988 | 360 | GC_invoke_finalizers (); |
c8a1bdc4 | 361 | return SCM_UNSPECIFIED; |
9d47a1e6 | 362 | } |
c8a1bdc4 | 363 | #undef FUNC_NAME |
9d47a1e6 | 364 | |
c8a1bdc4 | 365 | void |
b17e0ac3 | 366 | scm_i_gc (const char *what) |
c8a1bdc4 | 367 | { |
26224b3f | 368 | GC_gcollect (); |
eab1b259 | 369 | } |
0f2d19dd | 370 | |
4c7016dc | 371 | |
0f2d19dd JB |
372 | \f |
373 | /* {GC Protection Helper Functions} | |
374 | */ | |
375 | ||
376 | ||
5d2b97cd DH |
377 | /* |
378 | * If within a function you need to protect one or more scheme objects from | |
379 | * garbage collection, pass them as parameters to one of the | |
380 | * scm_remember_upto_here* functions below. These functions don't do | |
381 | * anything, but since the compiler does not know that they are actually | |
382 | * no-ops, it will generate code that calls these functions with the given | |
383 | * parameters. Therefore, you can be sure that the compiler will keep those | |
384 | * scheme values alive (on the stack or in a register) up to the point where | |
385 | * scm_remember_upto_here* is called. In other words, place the call to | |
592996c9 | 386 | * scm_remember_upto_here* _behind_ the last code in your function, that |
5d2b97cd DH |
387 | * depends on the scheme object to exist. |
388 | * | |
8c494e99 DH |
389 | * Example: We want to make sure that the string object str does not get |
390 | * garbage collected during the execution of 'some_function' in the code | |
391 | * below, because otherwise the characters belonging to str would be freed and | |
5d2b97cd DH |
392 | * 'some_function' might access freed memory. To make sure that the compiler |
393 | * keeps str alive on the stack or in a register such that it is visible to | |
394 | * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the | |
395 | * call to 'some_function'. Note that this would not be necessary if str was | |
396 | * used anyway after the call to 'some_function'. | |
eb01cb64 | 397 | * char *chars = scm_i_string_chars (str); |
5d2b97cd DH |
398 | * some_function (chars); |
399 | * scm_remember_upto_here_1 (str); // str will be alive up to this point. | |
400 | */ | |
401 | ||
9e1569bd KR |
402 | /* Remove any macro versions of these while defining the functions. |
403 | Functions are always included in the library, for upward binary | |
404 | compatibility and in case combinations of GCC and non-GCC are used. */ | |
405 | #undef scm_remember_upto_here_1 | |
406 | #undef scm_remember_upto_here_2 | |
407 | ||
5d2b97cd | 408 | void |
e81d98ec | 409 | scm_remember_upto_here_1 (SCM obj SCM_UNUSED) |
5d2b97cd DH |
410 | { |
411 | /* Empty. Protects a single object from garbage collection. */ | |
412 | } | |
413 | ||
414 | void | |
e81d98ec | 415 | scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED) |
5d2b97cd DH |
416 | { |
417 | /* Empty. Protects two objects from garbage collection. */ | |
418 | } | |
419 | ||
420 | void | |
e81d98ec | 421 | scm_remember_upto_here (SCM obj SCM_UNUSED, ...) |
5d2b97cd DH |
422 | { |
423 | /* Empty. Protects any number of objects from garbage collection. */ | |
424 | } | |
425 | ||
c209c88e | 426 | /* |
41b0806d GB |
427 | These crazy functions prevent garbage collection |
428 | of arguments after the first argument by | |
429 | ensuring they remain live throughout the | |
430 | function because they are used in the last | |
431 | line of the code block. | |
432 | It'd be better to have a nice compiler hint to | |
433 | aid the conservative stack-scanning GC. --03/09/00 gjb */ | |
0f2d19dd JB |
434 | SCM |
435 | scm_return_first (SCM elt, ...) | |
0f2d19dd JB |
436 | { |
437 | return elt; | |
438 | } | |
439 | ||
41b0806d GB |
440 | int |
441 | scm_return_first_int (int i, ...) | |
442 | { | |
443 | return i; | |
444 | } | |
445 | ||
0f2d19dd | 446 | |
0f2d19dd | 447 | SCM |
6e8d25a6 | 448 | scm_permanent_object (SCM obj) |
0f2d19dd | 449 | { |
8e7b3e98 | 450 | return (scm_gc_protect_object (obj)); |
0f2d19dd JB |
451 | } |
452 | ||
453 | ||
7bd4fbe2 MD |
454 | /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all |
455 | other references are dropped, until the object is unprotected by calling | |
6b1b030e | 456 | scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest, |
7bd4fbe2 MD |
457 | i. e. it is possible to protect the same object several times, but it is |
458 | necessary to unprotect the object the same number of times to actually get | |
459 | the object unprotected. It is an error to unprotect an object more often | |
460 | than it has been protected before. The function scm_protect_object returns | |
461 | OBJ. | |
462 | */ | |
463 | ||
464 | /* Implementation note: For every object X, there is a counter which | |
1f584400 | 465 | scm_gc_protect_object (X) increments and scm_gc_unprotect_object (X) decrements. |
7bd4fbe2 | 466 | */ |
686765af | 467 | |
7eec4c37 HWN |
468 | |
469 | ||
ef290276 | 470 | SCM |
6b1b030e | 471 | scm_gc_protect_object (SCM obj) |
ef290276 | 472 | { |
686765af | 473 | SCM handle; |
9d47a1e6 | 474 | |
686765af | 475 | /* This critical section barrier will be replaced by a mutex. */ |
33b320ae NJ |
476 | /* njrev: Indeed; if my comment above is correct, there is the same |
477 | critsec/mutex inconsistency here. */ | |
9de87eea | 478 | SCM_CRITICAL_SECTION_START; |
9d47a1e6 | 479 | |
acbccb0c | 480 | handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0)); |
e11e83f3 | 481 | SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1))); |
9d47a1e6 | 482 | |
7eec4c37 HWN |
483 | protected_obj_count ++; |
484 | ||
9de87eea | 485 | SCM_CRITICAL_SECTION_END; |
9d47a1e6 | 486 | |
ef290276 JB |
487 | return obj; |
488 | } | |
489 | ||
490 | ||
491 | /* Remove any protection for OBJ established by a prior call to | |
dab7f566 | 492 | scm_protect_object. This function returns OBJ. |
ef290276 | 493 | |
dab7f566 | 494 | See scm_protect_object for more information. */ |
ef290276 | 495 | SCM |
6b1b030e | 496 | scm_gc_unprotect_object (SCM obj) |
ef290276 | 497 | { |
686765af | 498 | SCM handle; |
9d47a1e6 | 499 | |
686765af | 500 | /* This critical section barrier will be replaced by a mutex. */ |
33b320ae | 501 | /* njrev: and again. */ |
9de87eea | 502 | SCM_CRITICAL_SECTION_START; |
9d47a1e6 | 503 | |
0ff7e3ff HWN |
504 | if (scm_gc_running_p) |
505 | { | |
506 | fprintf (stderr, "scm_unprotect_object called during GC.\n"); | |
507 | abort (); | |
508 | } | |
b17e0ac3 | 509 | |
acbccb0c | 510 | handle = scm_hashq_get_handle (scm_protects, obj); |
9d47a1e6 | 511 | |
7888309b | 512 | if (scm_is_false (handle)) |
686765af | 513 | { |
0f0f0899 MD |
514 | fprintf (stderr, "scm_unprotect_object called on unprotected object\n"); |
515 | abort (); | |
686765af | 516 | } |
6a199940 DH |
517 | else |
518 | { | |
e11e83f3 | 519 | SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1)); |
bc36d050 | 520 | if (scm_is_eq (count, scm_from_int (0))) |
acbccb0c | 521 | scm_hashq_remove_x (scm_protects, obj); |
6a199940 | 522 | else |
1be6b49c | 523 | SCM_SETCDR (handle, count); |
6a199940 | 524 | } |
7eec4c37 | 525 | protected_obj_count --; |
686765af | 526 | |
9de87eea | 527 | SCM_CRITICAL_SECTION_END; |
ef290276 JB |
528 | |
529 | return obj; | |
530 | } | |
531 | ||
6b1b030e ML |
532 | void |
533 | scm_gc_register_root (SCM *p) | |
534 | { | |
8e7b3e98 | 535 | /* Nothing. */ |
6b1b030e ML |
536 | } |
537 | ||
538 | void | |
539 | scm_gc_unregister_root (SCM *p) | |
540 | { | |
8e7b3e98 | 541 | /* Nothing. */ |
6b1b030e ML |
542 | } |
543 | ||
544 | void | |
545 | scm_gc_register_roots (SCM *b, unsigned long n) | |
546 | { | |
547 | SCM *p = b; | |
548 | for (; p < b + n; ++p) | |
549 | scm_gc_register_root (p); | |
550 | } | |
551 | ||
552 | void | |
553 | scm_gc_unregister_roots (SCM *b, unsigned long n) | |
554 | { | |
555 | SCM *p = b; | |
556 | for (; p < b + n; ++p) | |
557 | scm_gc_unregister_root (p); | |
558 | } | |
559 | ||
0f2d19dd | 560 | \f |
a00c95d9 | 561 | |
4c48ba06 | 562 | |
c8a1bdc4 HWN |
563 | /* |
564 | MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC. | |
565 | */ | |
85db4a2c DH |
566 | |
567 | /* Get an integer from an environment variable. */ | |
c8a1bdc4 HWN |
568 | int |
569 | scm_getenv_int (const char *var, int def) | |
85db4a2c | 570 | { |
c8a1bdc4 HWN |
571 | char *end = 0; |
572 | char *val = getenv (var); | |
573 | long res = def; | |
85db4a2c DH |
574 | if (!val) |
575 | return def; | |
576 | res = strtol (val, &end, 10); | |
577 | if (end == val) | |
578 | return def; | |
579 | return res; | |
580 | } | |
581 | ||
c35738c1 MD |
582 | void |
583 | scm_storage_prehistory () | |
584 | { | |
184327a6 | 585 | GC_all_interior_pointers = 0; |
6360beb2 AW |
586 | free_space_divisor = scm_getenv_int ("GC_FREE_SPACE_DIVISOR", 3); |
587 | minimum_free_space_divisor = free_space_divisor; | |
588 | target_free_space_divisor = free_space_divisor; | |
589 | GC_set_free_space_divisor (free_space_divisor); | |
eaf99988 | 590 | GC_set_finalize_on_demand (1); |
184327a6 | 591 | |
a82e7953 | 592 | GC_INIT (); |
e7bca227 | 593 | |
11d2fc06 LC |
594 | #if (! ((defined GC_VERSION_MAJOR) && (GC_VERSION_MAJOR >= 7))) \ |
595 | && (defined SCM_I_GSC_USE_PTHREAD_THREADS) | |
e7bca227 LC |
596 | /* When using GC 6.8, this call is required to initialize thread-local |
597 | freelists (shouldn't be necessary with GC 7.0). */ | |
598 | GC_init (); | |
599 | #endif | |
600 | ||
fdab75a1 | 601 | GC_expand_hp (SCM_DEFAULT_INIT_HEAP_SIZE_2); |
915b3f9f | 602 | |
184327a6 LC |
603 | /* We only need to register a displacement for those types for which the |
604 | higher bits of the type tag are used to store a pointer (that is, a | |
605 | pointer to an 8-octet aligned region). For `scm_tc3_struct', this is | |
606 | handled in `scm_alloc_struct ()'. */ | |
607 | GC_REGISTER_DISPLACEMENT (scm_tc3_cons); | |
314b8716 | 608 | /* GC_REGISTER_DISPLACEMENT (scm_tc3_unused); */ |
184327a6 | 609 | |
915b3f9f | 610 | /* Sanity check. */ |
acbccb0c | 611 | if (!GC_is_visible (&scm_protects)) |
915b3f9f | 612 | abort (); |
a82e7953 | 613 | |
c35738c1 MD |
614 | scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL); |
615 | scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL); | |
616 | scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL); | |
617 | scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL); | |
618 | scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL); | |
619 | } | |
85db4a2c | 620 | |
9de87eea | 621 | scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER; |
eb01cb64 | 622 | |
562cd1b8 AW |
623 | void |
624 | scm_init_gc_protect_object () | |
0f2d19dd | 625 | { |
acbccb0c | 626 | scm_protects = scm_c_make_hash_table (31); |
4a4c9785 | 627 | |
9de87eea MV |
628 | #if 0 |
629 | /* We can't have a cleanup handler since we have no thread to run it | |
630 | in. */ | |
631 | ||
a18bcd0e | 632 | #ifdef HAVE_ATEXIT |
c45acc34 | 633 | atexit (cleanup); |
e52ceaac MD |
634 | #else |
635 | #ifdef HAVE_ON_EXIT | |
636 | on_exit (cleanup, 0); | |
637 | #endif | |
9de87eea MV |
638 | #endif |
639 | ||
a18bcd0e | 640 | #endif |
0f2d19dd | 641 | } |
939794ce | 642 | |
0f2d19dd JB |
643 | \f |
644 | ||
939794ce DH |
645 | SCM scm_after_gc_hook; |
646 | ||
cc3546b0 | 647 | static SCM after_gc_async_cell; |
939794ce | 648 | |
cc3546b0 AW |
649 | /* The function after_gc_async_thunk causes the execution of the |
650 | * after-gc-hook. It is run after the gc, as soon as the asynchronous | |
651 | * events are handled by the evaluator. | |
939794ce DH |
652 | */ |
653 | static SCM | |
cc3546b0 | 654 | after_gc_async_thunk (void) |
939794ce | 655 | { |
cc3546b0 AW |
656 | /* Fun, no? Hook-run *and* run-hook? */ |
657 | scm_c_hook_run (&scm_after_gc_c_hook, NULL); | |
939794ce | 658 | scm_c_run_hook (scm_after_gc_hook, SCM_EOL); |
939794ce DH |
659 | return SCM_UNSPECIFIED; |
660 | } | |
661 | ||
662 | ||
cc3546b0 AW |
663 | /* The function queue_after_gc_hook is run by the scm_before_gc_c_hook |
664 | * at the end of the garbage collection. The only purpose of this | |
665 | * function is to mark the after_gc_async (which will eventually lead to | |
666 | * the execution of the after_gc_async_thunk). | |
939794ce DH |
667 | */ |
668 | static void * | |
cc3546b0 AW |
669 | queue_after_gc_hook (void * hook_data SCM_UNUSED, |
670 | void *fn_data SCM_UNUSED, | |
671 | void *data SCM_UNUSED) | |
e81d98ec DH |
672 | { |
673 | /* If cell access debugging is enabled, the user may choose to perform | |
674 | * additional garbage collections after an arbitrary number of cell | |
675 | * accesses. We don't want the scheme level after-gc-hook to be performed | |
676 | * for each of these garbage collections for the following reason: The | |
677 | * execution of the after-gc-hook causes cell accesses itself. Thus, if the | |
678 | * after-gc-hook was performed with every gc, and if the gc was performed | |
679 | * after a very small number of cell accesses, then the number of cell | |
680 | * accesses during the execution of the after-gc-hook will suffice to cause | |
681 | * the execution of the next gc. Then, guile would keep executing the | |
682 | * after-gc-hook over and over again, and would never come to do other | |
683 | * things. | |
eae33935 | 684 | * |
e81d98ec DH |
685 | * To overcome this problem, if cell access debugging with additional |
686 | * garbage collections is enabled, the after-gc-hook is never run by the | |
687 | * garbage collecter. When running guile with cell access debugging and the | |
688 | * execution of the after-gc-hook is desired, then it is necessary to run | |
689 | * the hook explicitly from the user code. This has the effect, that from | |
690 | * the scheme level point of view it seems that garbage collection is | |
691 | * performed with a much lower frequency than it actually is. Obviously, | |
692 | * this will not work for code that depends on a fixed one to one | |
693 | * relationship between the execution counts of the C level garbage | |
694 | * collection hooks and the execution count of the scheme level | |
695 | * after-gc-hook. | |
696 | */ | |
9de87eea | 697 | |
e81d98ec | 698 | #if (SCM_DEBUG_CELL_ACCESSES == 1) |
eab1b259 | 699 | if (scm_debug_cells_gc_interval == 0) |
e81d98ec | 700 | #endif |
cc3546b0 AW |
701 | { |
702 | scm_i_thread *t = SCM_I_CURRENT_THREAD; | |
703 | ||
704 | if (scm_is_false (SCM_CDR (after_gc_async_cell))) | |
705 | { | |
706 | SCM_SETCDR (after_gc_async_cell, t->active_asyncs); | |
707 | t->active_asyncs = after_gc_async_cell; | |
708 | t->pending_asyncs = 1; | |
709 | } | |
710 | } | |
e81d98ec | 711 | |
939794ce DH |
712 | return NULL; |
713 | } | |
714 | ||
00b6ef23 AW |
715 | \f |
716 | ||
717 | static void * | |
718 | start_gc_timer (void * hook_data SCM_UNUSED, | |
719 | void *fn_data SCM_UNUSED, | |
720 | void *data SCM_UNUSED) | |
721 | { | |
722 | if (!gc_start_time) | |
723 | gc_start_time = scm_c_get_internal_run_time (); | |
724 | ||
725 | return NULL; | |
726 | } | |
727 | ||
728 | static void * | |
729 | accumulate_gc_timer (void * hook_data SCM_UNUSED, | |
730 | void *fn_data SCM_UNUSED, | |
731 | void *data SCM_UNUSED) | |
732 | { | |
733 | if (gc_start_time) | |
6360beb2 AW |
734 | { |
735 | long now = scm_c_get_internal_run_time (); | |
00b6ef23 AW |
736 | gc_time_taken += now - gc_start_time; |
737 | gc_start_time = 0; | |
738 | } | |
739 | ||
740 | return NULL; | |
741 | } | |
742 | ||
6360beb2 AW |
743 | /* Return some idea of the memory footprint of a process, in bytes. |
744 | Currently only works on Linux systems. */ | |
745 | static size_t | |
746 | get_image_size (void) | |
747 | { | |
748 | unsigned long size, resident, share; | |
8ac70433 | 749 | size_t ret = 0; |
6360beb2 AW |
750 | |
751 | FILE *fp = fopen ("/proc/self/statm", "r"); | |
752 | ||
753 | if (fp && fscanf (fp, "%lu %lu %lu", &size, &resident, &share) == 3) | |
754 | ret = resident * 4096; | |
755 | ||
756 | if (fp) | |
757 | fclose (fp); | |
758 | ||
759 | return ret; | |
760 | } | |
761 | ||
fd51e661 AW |
762 | /* These are discussed later. */ |
763 | static size_t bytes_until_gc; | |
764 | static scm_i_pthread_mutex_t bytes_until_gc_lock = SCM_I_PTHREAD_MUTEX_INITIALIZER; | |
765 | ||
6360beb2 AW |
766 | /* Make GC run more frequently when the process image size is growing, |
767 | measured against the number of bytes allocated through the GC. | |
768 | ||
769 | If Guile is allocating at a GC-managed heap size H, libgc will tend | |
770 | to limit the process image size to H*N. But if at the same time the | |
771 | user program is mallocating at a rate M bytes per GC-allocated byte, | |
772 | then the process stabilizes at H*N*M -- assuming that collecting data | |
773 | will result in malloc'd data being freed. It doesn't take a very | |
774 | large M for this to be a bad situation. To limit the image size, | |
775 | Guile should GC more often -- the bigger the M, the more often. | |
776 | ||
777 | Numeric functions that produce bigger and bigger integers are | |
778 | pessimal, because M is an increasing function of time. Here is an | |
779 | example of such a function: | |
780 | ||
781 | (define (factorial n) | |
782 | (define (fac n acc) | |
783 | (if (<= n 1) | |
784 | acc | |
785 | (fac (1- n) (* n acc)))) | |
786 | (fac n 1)) | |
787 | ||
788 | It is possible for a process to grow for reasons that will not be | |
789 | solved by faster GC. In that case M will be estimated as | |
790 | artificially high for a while, and so GC will happen more often on | |
791 | the Guile side. But when it stabilizes, Guile can ease back the GC | |
792 | frequency. | |
793 | ||
794 | The key is to measure process image growth, not mallocation rate. | |
795 | For maximum effectiveness, Guile reacts quickly to process growth, | |
796 | and exponentially backs down when the process stops growing. | |
797 | ||
798 | See http://thread.gmane.org/gmane.lisp.guile.devel/12552/focus=12936 | |
799 | for further discussion. | |
800 | */ | |
801 | static void * | |
802 | adjust_gc_frequency (void * hook_data SCM_UNUSED, | |
803 | void *fn_data SCM_UNUSED, | |
804 | void *data SCM_UNUSED) | |
805 | { | |
806 | static size_t prev_image_size = 0; | |
807 | static size_t prev_bytes_alloced = 0; | |
808 | size_t image_size; | |
809 | size_t bytes_alloced; | |
810 | ||
fd51e661 AW |
811 | scm_i_pthread_mutex_lock (&bytes_until_gc_lock); |
812 | bytes_until_gc = GC_get_heap_size (); | |
813 | scm_i_pthread_mutex_unlock (&bytes_until_gc_lock); | |
814 | ||
6360beb2 AW |
815 | image_size = get_image_size (); |
816 | bytes_alloced = GC_get_total_bytes (); | |
817 | ||
d1c03624 | 818 | #define HEURISTICS_DEBUG 0 |
6360beb2 AW |
819 | |
820 | #if HEURISTICS_DEBUG | |
821 | fprintf (stderr, "prev image / alloced: %lu / %lu\n", prev_image_size, prev_bytes_alloced); | |
822 | fprintf (stderr, " image / alloced: %lu / %lu\n", image_size, bytes_alloced); | |
823 | fprintf (stderr, "divisor %lu / %f\n", free_space_divisor, target_free_space_divisor); | |
824 | #endif | |
825 | ||
826 | if (prev_image_size && bytes_alloced != prev_bytes_alloced) | |
827 | { | |
828 | double growth_rate, new_target_free_space_divisor; | |
829 | double decay_factor = 0.5; | |
830 | double hysteresis = 0.1; | |
831 | ||
832 | growth_rate = ((double) image_size - prev_image_size) | |
833 | / ((double)bytes_alloced - prev_bytes_alloced); | |
834 | ||
835 | #if HEURISTICS_DEBUG | |
836 | fprintf (stderr, "growth rate %f\n", growth_rate); | |
837 | #endif | |
838 | ||
839 | new_target_free_space_divisor = minimum_free_space_divisor; | |
840 | ||
841 | if (growth_rate > 0) | |
842 | new_target_free_space_divisor *= 1.0 + growth_rate; | |
843 | ||
844 | #if HEURISTICS_DEBUG | |
845 | fprintf (stderr, "new divisor %f\n", new_target_free_space_divisor); | |
846 | #endif | |
847 | ||
848 | if (new_target_free_space_divisor < target_free_space_divisor) | |
849 | /* Decay down. */ | |
850 | target_free_space_divisor = | |
851 | (decay_factor * target_free_space_divisor | |
852 | + (1.0 - decay_factor) * new_target_free_space_divisor); | |
853 | else | |
854 | /* Jump up. */ | |
855 | target_free_space_divisor = new_target_free_space_divisor; | |
856 | ||
857 | #if HEURISTICS_DEBUG | |
858 | fprintf (stderr, "new target divisor %f\n", target_free_space_divisor); | |
859 | #endif | |
860 | ||
861 | if (free_space_divisor + 0.5 + hysteresis < target_free_space_divisor | |
862 | || free_space_divisor - 0.5 - hysteresis > target_free_space_divisor) | |
863 | { | |
864 | free_space_divisor = lround (target_free_space_divisor); | |
865 | #if HEURISTICS_DEBUG | |
866 | fprintf (stderr, "new divisor %lu\n", free_space_divisor); | |
867 | #endif | |
868 | GC_set_free_space_divisor (free_space_divisor); | |
869 | } | |
870 | } | |
871 | ||
872 | prev_image_size = image_size; | |
873 | prev_bytes_alloced = bytes_alloced; | |
874 | ||
875 | return NULL; | |
876 | } | |
877 | ||
fd51e661 AW |
878 | /* The adjust_gc_frequency routine handles transients in the process |
879 | image size. It can't handle instense non-GC-managed steady-state | |
880 | allocation though, as it decays the FSD at steady-state down to its | |
881 | minimum value. | |
882 | ||
883 | The only real way to handle continuous, high non-GC allocation is to | |
884 | let the GC know about it. This routine can handle non-GC allocation | |
885 | rates that are similar in size to the GC-managed heap size. | |
886 | */ | |
887 | ||
888 | void | |
889 | scm_gc_register_allocation (size_t size) | |
890 | { | |
891 | scm_i_pthread_mutex_lock (&bytes_until_gc_lock); | |
892 | if (bytes_until_gc - size > bytes_until_gc) | |
893 | { | |
894 | bytes_until_gc = GC_get_heap_size (); | |
895 | scm_i_pthread_mutex_unlock (&bytes_until_gc_lock); | |
896 | GC_gcollect (); | |
897 | } | |
898 | else | |
899 | { | |
900 | bytes_until_gc -= size; | |
901 | scm_i_pthread_mutex_unlock (&bytes_until_gc_lock); | |
902 | } | |
903 | } | |
904 | ||
00b6ef23 AW |
905 | |
906 | \f | |
907 | ||
26224b3f LC |
908 | char const * |
909 | scm_i_tag_name (scm_t_bits tag) | |
910 | { | |
74ec8d78 | 911 | switch (tag & 0x7f) /* 7 bits */ |
26224b3f LC |
912 | { |
913 | case scm_tcs_struct: | |
914 | return "struct"; | |
915 | case scm_tcs_cons_imcar: | |
916 | return "cons (immediate car)"; | |
917 | case scm_tcs_cons_nimcar: | |
918 | return "cons (non-immediate car)"; | |
5b46a8c2 | 919 | case scm_tc7_pointer: |
e2c2a699 | 920 | return "foreign"; |
c99de5aa AW |
921 | case scm_tc7_hashtable: |
922 | return "hashtable"; | |
26b26354 AW |
923 | case scm_tc7_weak_set: |
924 | return "weak-set"; | |
7005c60f AW |
925 | case scm_tc7_weak_table: |
926 | return "weak-table"; | |
9ea31741 AW |
927 | case scm_tc7_fluid: |
928 | return "fluid"; | |
929 | case scm_tc7_dynamic_state: | |
930 | return "dynamic state"; | |
6f3b0cc2 AW |
931 | case scm_tc7_frame: |
932 | return "frame"; | |
933 | case scm_tc7_objcode: | |
934 | return "objcode"; | |
935 | case scm_tc7_vm: | |
936 | return "vm"; | |
937 | case scm_tc7_vm_cont: | |
938 | return "vm continuation"; | |
26224b3f LC |
939 | case scm_tc7_wvect: |
940 | return "weak vector"; | |
941 | case scm_tc7_vector: | |
942 | return "vector"; | |
26224b3f LC |
943 | case scm_tc7_number: |
944 | switch (tag) | |
945 | { | |
946 | case scm_tc16_real: | |
947 | return "real"; | |
948 | break; | |
949 | case scm_tc16_big: | |
950 | return "bignum"; | |
951 | break; | |
952 | case scm_tc16_complex: | |
953 | return "complex number"; | |
954 | break; | |
955 | case scm_tc16_fraction: | |
956 | return "fraction"; | |
957 | break; | |
958 | } | |
959 | break; | |
960 | case scm_tc7_string: | |
961 | return "string"; | |
962 | break; | |
963 | case scm_tc7_stringbuf: | |
964 | return "string buffer"; | |
965 | break; | |
966 | case scm_tc7_symbol: | |
967 | return "symbol"; | |
968 | break; | |
969 | case scm_tc7_variable: | |
970 | return "variable"; | |
971 | break; | |
26224b3f LC |
972 | case scm_tc7_port: |
973 | return "port"; | |
974 | break; | |
975 | case scm_tc7_smob: | |
74ec8d78 AW |
976 | { |
977 | int k = 0xff & (tag >> 8); | |
978 | return (scm_smobs[k].name); | |
979 | } | |
26224b3f LC |
980 | break; |
981 | } | |
982 | ||
983 | return NULL; | |
984 | } | |
985 | ||
986 | ||
26224b3f LC |
987 | |
988 | \f | |
0f2d19dd JB |
989 | void |
990 | scm_init_gc () | |
0f2d19dd | 991 | { |
a82e7953 | 992 | /* `GC_INIT ()' was invoked in `scm_storage_prehistory ()'. */ |
d678e25c | 993 | |
f39448c5 | 994 | scm_after_gc_hook = scm_make_hook (SCM_INUM0); |
fde50407 | 995 | scm_c_define ("after-gc-hook", scm_after_gc_hook); |
939794ce | 996 | |
cc3546b0 AW |
997 | /* When the async is to run, the cdr of the gc_async pair gets set to |
998 | the asyncs queue of the current thread. */ | |
999 | after_gc_async_cell = scm_cons (scm_c_make_gsubr ("%after-gc-thunk", 0, 0, 0, | |
1000 | after_gc_async_thunk), | |
1001 | SCM_BOOL_F); | |
939794ce | 1002 | |
cc3546b0 | 1003 | scm_c_hook_add (&scm_before_gc_c_hook, queue_after_gc_hook, NULL, 0); |
00b6ef23 AW |
1004 | scm_c_hook_add (&scm_before_gc_c_hook, start_gc_timer, NULL, 0); |
1005 | scm_c_hook_add (&scm_after_gc_c_hook, accumulate_gc_timer, NULL, 0); | |
66b229d5 | 1006 | |
738c899e AW |
1007 | /* GC_get_heap_usage does not take a lock, and so can run in the GC |
1008 | start hook. */ | |
1009 | scm_c_hook_add (&scm_before_gc_c_hook, adjust_gc_frequency, NULL, 0); | |
738c899e | 1010 | |
cc3546b0 | 1011 | GC_set_start_callback (run_before_gc_c_hook); |
939794ce | 1012 | |
a0599745 | 1013 | #include "libguile/gc.x" |
0f2d19dd | 1014 | } |
89e00824 | 1015 | |
c8a1bdc4 HWN |
1016 | |
1017 | void | |
1018 | scm_gc_sweep (void) | |
1019 | #define FUNC_NAME "scm_gc_sweep" | |
1020 | { | |
26224b3f | 1021 | /* FIXME */ |
cd169c5a | 1022 | fprintf (stderr, "%s: doing nothing\n", FUNC_NAME); |
c8a1bdc4 | 1023 | } |
c8a1bdc4 HWN |
1024 | #undef FUNC_NAME |
1025 | ||
89e00824 ML |
1026 | /* |
1027 | Local Variables: | |
1028 | c-file-style: "gnu" | |
1029 | End: | |
1030 | */ |