replace port table with weak hash table. This simplifies
[bpt/guile.git] / libguile / gc.c
CommitLineData
2b829bbb 1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006 Free Software Foundation, Inc.
a00c95d9 2 *
73be1d9e
MV
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
a00c95d9 7 *
73be1d9e 8 * This library is distributed in the hope that it will be useful,
0f2d19dd 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
a00c95d9 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
92205699 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
73be1d9e 16 */
1bbd0b84 17
9de87eea 18#define _GNU_SOURCE
1bbd0b84 19
37ddcaf6
MD
20/* #define DEBUGINFO */
21
aa54a9b0
RB
22#if HAVE_CONFIG_H
23# include <config.h>
24#endif
56495472 25
0f2d19dd 26#include <stdio.h>
e6e2e95a 27#include <errno.h>
783e7774 28#include <string.h>
c8a1bdc4 29#include <assert.h>
e6e2e95a 30
a0599745 31#include "libguile/_scm.h"
0a7a7445 32#include "libguile/eval.h"
a0599745
MD
33#include "libguile/stime.h"
34#include "libguile/stackchk.h"
35#include "libguile/struct.h"
a0599745
MD
36#include "libguile/smob.h"
37#include "libguile/unif.h"
38#include "libguile/async.h"
39#include "libguile/ports.h"
40#include "libguile/root.h"
41#include "libguile/strings.h"
42#include "libguile/vectors.h"
801cb5e7 43#include "libguile/weaks.h"
686765af 44#include "libguile/hashtab.h"
ecf470a2 45#include "libguile/tags.h"
a0599745 46
c8a1bdc4 47#include "libguile/private-gc.h"
a0599745 48#include "libguile/validate.h"
1be6b49c 49#include "libguile/deprecation.h"
a0599745 50#include "libguile/gc.h"
9de87eea 51#include "libguile/dynwind.h"
fce59c93 52
bc9d9bb2 53#ifdef GUILE_DEBUG_MALLOC
a0599745 54#include "libguile/debug-malloc.h"
bc9d9bb2
MD
55#endif
56
0f2d19dd 57#ifdef HAVE_MALLOC_H
95b88819 58#include <malloc.h>
0f2d19dd
JB
59#endif
60
61#ifdef HAVE_UNISTD_H
95b88819 62#include <unistd.h>
0f2d19dd
JB
63#endif
64
fb50ef08
MD
65/* Lock this mutex before doing lazy sweeping.
66 */
b17e0ac3 67scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
fb50ef08 68
eae33935 69/* Set this to != 0 if every cell that is accessed shall be checked:
61045190 70 */
eab1b259
HWN
71int scm_debug_cell_accesses_p = 0;
72int scm_expensive_debug_cell_accesses_p = 0;
406c7d90 73
e81d98ec
DH
74/* Set this to 0 if no additional gc's shall be performed, otherwise set it to
75 * the number of cell accesses after which a gc shall be called.
76 */
eab1b259 77int scm_debug_cells_gc_interval = 0;
e81d98ec 78
eab1b259
HWN
79/*
80 Global variable, so you can switch it off at runtime by setting
81 scm_i_cell_validation_already_running.
406c7d90 82 */
eab1b259
HWN
83int scm_i_cell_validation_already_running ;
84
85#if (SCM_DEBUG_CELL_ACCESSES == 1)
86
87
88/*
89
90 Assert that the given object is a valid reference to a valid cell. This
91 test involves to determine whether the object is a cell pointer, whether
92 this pointer actually points into a heap segment and whether the cell
93 pointed to is not a free cell. Further, additional garbage collections may
94 get executed after a user defined number of cell accesses. This helps to
95 find places in the C code where references are dropped for extremely short
96 periods.
97
98*/
406c7d90 99void
eab1b259 100scm_i_expensive_validation_check (SCM cell)
406c7d90 101{
eab1b259
HWN
102 if (!scm_in_heap_p (cell))
103 {
104 fprintf (stderr, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
105 (unsigned long) SCM_UNPACK (cell));
106 abort ();
107 }
108
109 /* If desired, perform additional garbage collections after a user
110 * defined number of cell accesses.
111 */
112 if (scm_debug_cells_gc_interval)
113 {
114 static unsigned int counter = 0;
61045190 115
eab1b259
HWN
116 if (counter != 0)
117 {
118 --counter;
119 }
120 else
121 {
122 counter = scm_debug_cells_gc_interval;
b17e0ac3 123 scm_gc ();
eab1b259
HWN
124 }
125 }
126}
127
128void
129scm_assert_cell_valid (SCM cell)
130{
131 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
406c7d90 132 {
eab1b259 133 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
406c7d90 134
c8a1bdc4 135 /*
eab1b259
HWN
136 During GC, no user-code should be run, and the guile core
137 should use non-protected accessors.
138 */
c8a1bdc4 139 if (scm_gc_running_p)
eab1b259 140 return;
c8a1bdc4
HWN
141
142 /*
eab1b259
HWN
143 Only scm_in_heap_p and rescanning the heap is wildly
144 expensive.
145 */
146 if (scm_expensive_debug_cell_accesses_p)
147 scm_i_expensive_validation_check (cell);
c8a1bdc4
HWN
148
149 if (!SCM_GC_MARK_P (cell))
406c7d90 150 {
c8a1bdc4
HWN
151 fprintf (stderr,
152 "scm_assert_cell_valid: this object is unmarked. \n"
153 "It has been garbage-collected in the last GC run: "
154 "%lux\n",
1be6b49c 155 (unsigned long) SCM_UNPACK (cell));
406c7d90
DH
156 abort ();
157 }
c8a1bdc4 158
eab1b259 159 scm_i_cell_validation_already_running = 0; /* re-enable */
406c7d90
DH
160 }
161}
162
163
eab1b259 164
406c7d90
DH
165SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
166 (SCM flag),
1e6808ea 167 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
eab1b259 168 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
e81d98ec 169 "but no additional calls to garbage collection are issued.\n"
eab1b259 170 "If @var{flag} is a number, strict cell access checking is enabled,\n"
e81d98ec
DH
171 "with an additional garbage collection after the given\n"
172 "number of cell accesses.\n"
1e6808ea
MG
173 "This procedure only exists when the compile-time flag\n"
174 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
406c7d90
DH
175#define FUNC_NAME s_scm_set_debug_cell_accesses_x
176{
7888309b 177 if (scm_is_false (flag))
eab1b259
HWN
178 {
179 scm_debug_cell_accesses_p = 0;
180 }
bc36d050 181 else if (scm_is_eq (flag, SCM_BOOL_T))
eab1b259
HWN
182 {
183 scm_debug_cells_gc_interval = 0;
184 scm_debug_cell_accesses_p = 1;
185 scm_expensive_debug_cell_accesses_p = 0;
186 }
e11e83f3 187 else
eab1b259 188 {
e11e83f3 189 scm_debug_cells_gc_interval = scm_to_signed_integer (flag, 0, INT_MAX);
eab1b259
HWN
190 scm_debug_cell_accesses_p = 1;
191 scm_expensive_debug_cell_accesses_p = 1;
192 }
406c7d90
DH
193 return SCM_UNSPECIFIED;
194}
195#undef FUNC_NAME
0f2d19dd 196
ecf470a2 197
c8a1bdc4 198#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
0f2d19dd
JB
199
200\f
945fec60 201
0f2d19dd
JB
202
203/* scm_mtrigger
539b08a4 204 * is the number of bytes of malloc allocation needed to trigger gc.
0f2d19dd 205 */
c014a02e 206unsigned long scm_mtrigger;
0f2d19dd 207
0f2d19dd
JB
208/* GC Statistics Keeping
209 */
f2893a25 210unsigned long scm_cells_allocated = 0;
93632e3c 211unsigned long scm_last_cells_allocated = 0;
c014a02e 212unsigned long scm_mallocated = 0;
d9f71a07
LC
213
214/* Global GC sweep statistics since the last full GC. */
215static scm_t_sweep_statistics scm_i_gc_sweep_stats = { 0, 0 };
216static scm_t_sweep_statistics scm_i_gc_sweep_stats_1 = { 0, 0 };
217
218/* Total count of cells marked/swept. */
219static double scm_gc_cells_marked_acc = 0.;
220static double scm_gc_cells_swept_acc = 0.;
93632e3c 221static double scm_gc_cells_allocated_acc = 0.;
d9f71a07
LC
222
223static unsigned long scm_gc_time_taken = 0;
c014a02e 224static unsigned long t_before_gc;
d9f71a07
LC
225static unsigned long scm_gc_mark_time_taken = 0;
226
227static unsigned long scm_gc_times = 0;
228
229static int scm_gc_cell_yield_percentage = 0;
230static unsigned long protected_obj_count = 0;
231
232/* The following are accessed from `gc-malloc.c' and `gc-card.c'. */
c2cbcc57 233int scm_gc_malloc_yield_percentage = 0;
d9f71a07 234unsigned long scm_gc_malloc_collected = 0;
c2cbcc57 235
0f2d19dd
JB
236
237SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
238SCM_SYMBOL (sym_heap_size, "cell-heap-size");
239SCM_SYMBOL (sym_mallocated, "bytes-malloced");
240SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
241SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
242SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
c9b0d4b0 243SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
c9b0d4b0
ML
244SCM_SYMBOL (sym_times, "gc-times");
245SCM_SYMBOL (sym_cells_marked, "cells-marked");
246SCM_SYMBOL (sym_cells_swept, "cells-swept");
c2cbcc57
HWN
247SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
248SCM_SYMBOL (sym_cell_yield, "cell-yield");
7eec4c37 249SCM_SYMBOL (sym_protected_objects, "protected-objects");
93632e3c 250SCM_SYMBOL (sym_total_cells_allocated, "total-cells-allocated");
cf2d30f6 251
d3dd80ab 252
cf2d30f6 253/* Number of calls to SCM_NEWCELL since startup. */
c8a1bdc4
HWN
254unsigned scm_newcell_count;
255unsigned scm_newcell2_count;
b37fe1c5 256
b37fe1c5 257
0f2d19dd
JB
258/* {Scheme Interface to GC}
259 */
1367aa5e
HWN
260static SCM
261tag_table_to_type_alist (void *closure, SCM key, SCM val, SCM acc)
262{
8fecbb19 263 if (scm_is_integer (key))
8a00ba71 264 {
3e2073bd 265 int c_tag = scm_to_int (key);
8fecbb19
HWN
266
267 char const * name = scm_i_tag_name (c_tag);
268 if (name != NULL)
269 {
270 key = scm_from_locale_string (name);
271 }
272 else
273 {
274 char s[100];
275 sprintf (s, "tag %d", c_tag);
276 key = scm_from_locale_string (s);
277 }
8a00ba71 278 }
8fecbb19 279
1367aa5e
HWN
280 return scm_cons (scm_cons (key, val), acc);
281}
282
283SCM_DEFINE (scm_gc_live_object_stats, "gc-live-object-stats", 0, 0, 0,
284 (),
285 "Return an alist of statistics of the current live objects. ")
286#define FUNC_NAME s_scm_gc_live_object_stats
287{
288 SCM tab = scm_make_hash_table (scm_from_int (57));
b01532af
NJ
289 SCM alist;
290
1367aa5e
HWN
291 scm_i_all_segments_statistics (tab);
292
b01532af 293 alist
1367aa5e
HWN
294 = scm_internal_hash_fold (&tag_table_to_type_alist, NULL, SCM_EOL, tab);
295
296 return alist;
297}
298#undef FUNC_NAME
299
c2cbcc57 300extern int scm_gc_malloc_yield_percentage;
a00c95d9 301SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 302 (),
1e6808ea 303 "Return an association list of statistics about Guile's current\n"
c8a1bdc4 304 "use of storage.\n")
1bbd0b84 305#define FUNC_NAME s_scm_gc_stats
0f2d19dd 306{
c8a1bdc4
HWN
307 long i = 0;
308 SCM heap_segs = SCM_EOL ;
c014a02e
ML
309 unsigned long int local_scm_mtrigger;
310 unsigned long int local_scm_mallocated;
311 unsigned long int local_scm_heap_size;
c2cbcc57
HWN
312 int local_scm_gc_cell_yield_percentage;
313 int local_scm_gc_malloc_yield_percentage;
f2893a25 314 unsigned long int local_scm_cells_allocated;
c014a02e
ML
315 unsigned long int local_scm_gc_time_taken;
316 unsigned long int local_scm_gc_times;
317 unsigned long int local_scm_gc_mark_time_taken;
7eec4c37 318 unsigned long int local_protected_obj_count;
c9b0d4b0
ML
319 double local_scm_gc_cells_swept;
320 double local_scm_gc_cells_marked;
93632e3c 321 double local_scm_total_cells_allocated;
0f2d19dd 322 SCM answer;
c8a1bdc4
HWN
323 unsigned long *bounds = 0;
324 int table_size = scm_i_heap_segment_table_size;
9de87eea 325 SCM_CRITICAL_SECTION_START;
939794ce 326
c8a1bdc4
HWN
327 /*
328 temporarily store the numbers, so as not to cause GC.
7febb4a2 329 */
c8a1bdc4 330
3e2073bd 331 bounds = malloc (sizeof (unsigned long) * table_size * 2);
c8a1bdc4
HWN
332 if (!bounds)
333 abort();
334 for (i = table_size; i--; )
335 {
336 bounds[2*i] = (unsigned long)scm_i_heap_segment_table[i]->bounds[0];
337 bounds[2*i+1] = (unsigned long)scm_i_heap_segment_table[i]->bounds[1];
338 }
0f2d19dd 339
4c9419ac 340
c8a1bdc4
HWN
341 /* Below, we cons to produce the resulting list. We want a snapshot of
342 * the heap situation before consing.
343 */
344 local_scm_mtrigger = scm_mtrigger;
345 local_scm_mallocated = scm_mallocated;
346 local_scm_heap_size = SCM_HEAP_SIZE;
539b08a4 347
c8a1bdc4
HWN
348 local_scm_cells_allocated = scm_cells_allocated;
349
350 local_scm_gc_time_taken = scm_gc_time_taken;
351 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
352 local_scm_gc_times = scm_gc_times;
c2cbcc57
HWN
353 local_scm_gc_malloc_yield_percentage = scm_gc_malloc_yield_percentage;
354 local_scm_gc_cell_yield_percentage= scm_gc_cell_yield_percentage;
7eec4c37 355 local_protected_obj_count = protected_obj_count;
c2cbcc57
HWN
356 local_scm_gc_cells_swept =
357 (double) scm_gc_cells_swept_acc
d9f71a07 358 + (double) scm_i_gc_sweep_stats.swept;
c8a1bdc4 359 local_scm_gc_cells_marked = scm_gc_cells_marked_acc
d9f71a07
LC
360 +(double) scm_i_gc_sweep_stats.swept
361 -(double) scm_i_gc_sweep_stats.collected;
0f2d19dd 362
93632e3c
HWN
363 local_scm_total_cells_allocated = scm_gc_cells_allocated_acc
364 + (double) (scm_cells_allocated - scm_last_cells_allocated);
365
c8a1bdc4
HWN
366 for (i = table_size; i--;)
367 {
b9bd8526
MV
368 heap_segs = scm_cons (scm_cons (scm_from_ulong (bounds[2*i]),
369 scm_from_ulong (bounds[2*i+1])),
c8a1bdc4
HWN
370 heap_segs);
371 }
33b320ae
NJ
372 /* njrev: can any of these scm_cons's or scm_list_n signal a memory
373 error? If so we need a frame here. */
b9bd8526
MV
374 answer =
375 scm_list_n (scm_cons (sym_gc_time_taken,
376 scm_from_ulong (local_scm_gc_time_taken)),
377 scm_cons (sym_cells_allocated,
378 scm_from_ulong (local_scm_cells_allocated)),
93632e3c
HWN
379 scm_cons (sym_total_cells_allocated,
380 scm_from_double (local_scm_total_cells_allocated)),
b9bd8526
MV
381 scm_cons (sym_heap_size,
382 scm_from_ulong (local_scm_heap_size)),
383 scm_cons (sym_mallocated,
384 scm_from_ulong (local_scm_mallocated)),
385 scm_cons (sym_mtrigger,
386 scm_from_ulong (local_scm_mtrigger)),
387 scm_cons (sym_times,
388 scm_from_ulong (local_scm_gc_times)),
389 scm_cons (sym_gc_mark_time_taken,
390 scm_from_ulong (local_scm_gc_mark_time_taken)),
391 scm_cons (sym_cells_marked,
392 scm_from_double (local_scm_gc_cells_marked)),
393 scm_cons (sym_cells_swept,
394 scm_from_double (local_scm_gc_cells_swept)),
395 scm_cons (sym_malloc_yield,
396 scm_from_long(local_scm_gc_malloc_yield_percentage)),
397 scm_cons (sym_cell_yield,
398 scm_from_long (local_scm_gc_cell_yield_percentage)),
399 scm_cons (sym_protected_objects,
400 scm_from_ulong (local_protected_obj_count)),
401 scm_cons (sym_heap_segments, heap_segs),
93632e3c 402
b9bd8526 403 SCM_UNDEFINED);
9de87eea 404 SCM_CRITICAL_SECTION_END;
c8a1bdc4
HWN
405
406 free (bounds);
407 return answer;
0f2d19dd 408}
c8a1bdc4 409#undef FUNC_NAME
0f2d19dd 410
d9f71a07
LC
411/* Update the global sweeping/collection statistics by adding SWEEP_STATS to
412 SCM_I_GC_SWEEP_STATS and updating related variables. */
413static inline void
414gc_update_stats (scm_t_sweep_statistics sweep_stats)
415{
416 /* CELLS SWEPT is another word for the number of cells that were examined
417 during GC. YIELD is the number that we cleaned out. MARKED is the number
418 that weren't cleaned. */
419
420 scm_gc_cell_yield_percentage = (sweep_stats.collected * 100) / SCM_HEAP_SIZE;
421
422 scm_i_sweep_statistics_sum (&scm_i_gc_sweep_stats, sweep_stats);
423
424 if ((scm_i_gc_sweep_stats.collected > scm_i_gc_sweep_stats.swept)
425 || (scm_cells_allocated < sweep_stats.collected))
426 {
427 printf ("internal GC error, please report to `"
428 PACKAGE_BUGREPORT "'\n");
429 abort ();
430 }
431
93632e3c
HWN
432 scm_gc_cells_allocated_acc +=
433 (double) (scm_cells_allocated - scm_last_cells_allocated);
434
d9f71a07 435 scm_cells_allocated -= sweep_stats.collected;
93632e3c 436 scm_last_cells_allocated = scm_cells_allocated;
d9f71a07
LC
437}
438
c8a1bdc4
HWN
439static void
440gc_start_stats (const char *what SCM_UNUSED)
e4a7824f 441{
c8a1bdc4 442 t_before_gc = scm_c_get_internal_run_time ();
539b08a4 443
c8a1bdc4 444 scm_gc_malloc_collected = 0;
e4a7824f 445}
acf4331f 446
c8a1bdc4 447static void
4c7016dc 448gc_end_stats (scm_t_sweep_statistics sweep_stats)
0f2d19dd 449{
c8a1bdc4 450 unsigned long t = scm_c_get_internal_run_time ();
d9f71a07 451
c8a1bdc4 452 scm_gc_time_taken += (t - t_before_gc);
539b08a4 453
d9f71a07
LC
454 /* Reset the number of cells swept/collected since the last full GC. */
455 scm_i_gc_sweep_stats_1 = scm_i_gc_sweep_stats;
456 scm_i_gc_sweep_stats.collected = scm_i_gc_sweep_stats.swept = 0;
4c7016dc 457
d9f71a07 458 gc_update_stats (sweep_stats);
4c7016dc 459
d9f71a07
LC
460 scm_gc_cells_marked_acc += (double) scm_i_gc_sweep_stats.swept
461 - (double) scm_i_gc_sweep_stats.collected;
462 scm_gc_cells_swept_acc += (double) scm_i_gc_sweep_stats.swept;
4c7016dc 463
c8a1bdc4 464 ++scm_gc_times;
0f2d19dd 465}
acf4331f 466
0f2d19dd 467
c8a1bdc4
HWN
468SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
469 (SCM obj),
470 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
471 "returned by this function for @var{obj}")
472#define FUNC_NAME s_scm_object_address
c68296f8 473{
b9bd8526 474 return scm_from_ulong (SCM_UNPACK (obj));
c68296f8 475}
c8a1bdc4 476#undef FUNC_NAME
c68296f8 477
1be6b49c 478
c8a1bdc4
HWN
479SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
480 (),
481 "Scans all of SCM objects and reclaims for further use those that are\n"
482 "no longer accessible.")
483#define FUNC_NAME s_scm_gc
484{
b17e0ac3
MV
485 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
486 scm_gc_running_p = 1;
487 scm_i_gc ("call");
33b320ae
NJ
488 /* njrev: It looks as though other places, e.g. scm_realloc,
489 can call scm_i_gc without acquiring the sweep mutex. Does this
490 matter? Also scm_i_gc (or its descendants) touch the
491 scm_sys_protects, which are protected in some cases
492 (e.g. scm_permobjs above in scm_gc_stats) by a critical section,
493 not by the sweep mutex. Shouldn't all the GC-relevant objects be
494 protected in the same way? */
b17e0ac3
MV
495 scm_gc_running_p = 0;
496 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
497 scm_c_hook_run (&scm_after_gc_c_hook, 0);
c8a1bdc4 498 return SCM_UNSPECIFIED;
9d47a1e6 499}
c8a1bdc4 500#undef FUNC_NAME
9d47a1e6 501
c68296f8
MV
502
503\f
0f2d19dd 504
b17e0ac3
MV
505/* The master is global and common while the freelist will be
506 * individual for each thread.
0f2d19dd
JB
507 */
508
c8a1bdc4
HWN
509SCM
510scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
0f2d19dd 511{
c8a1bdc4 512 SCM cell;
b17e0ac3 513 int did_gc = 0;
4c7016dc
HWN
514 scm_t_sweep_statistics sweep_stats;
515
9de87eea 516 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
b17e0ac3 517 scm_gc_running_p = 1;
9bc4701c 518
4c7016dc 519 *free_cells = scm_i_sweep_some_segments (freelist, &sweep_stats);
d9f71a07 520 gc_update_stats (sweep_stats);
4c7016dc 521
c8a1bdc4
HWN
522 if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
523 {
d9f71a07
LC
524 freelist->heap_segment_idx =
525 scm_i_get_new_heap_segment (freelist,
526 scm_i_gc_sweep_stats,
527 abort_on_error);
528
4c7016dc 529 *free_cells = scm_i_sweep_some_segments (freelist, &sweep_stats);
d9f71a07 530 gc_update_stats (sweep_stats);
c8a1bdc4 531 }
acb0a19c 532
b17e0ac3 533 if (*free_cells == SCM_EOL)
c8a1bdc4
HWN
534 {
535 /*
b17e0ac3 536 with the advent of lazy sweep, GC yield is only known just
c8a1bdc4
HWN
537 before doing the GC.
538 */
d9f71a07
LC
539 scm_i_adjust_min_yield (freelist,
540 scm_i_gc_sweep_stats,
541 scm_i_gc_sweep_stats_1);
c8a1bdc4
HWN
542
543 /*
544 out of fresh cells. Try to get some new ones.
545 */
0f2d19dd 546
b17e0ac3
MV
547 did_gc = 1;
548 scm_i_gc ("cells");
a00c95d9 549
4c7016dc 550 *free_cells = scm_i_sweep_some_segments (freelist, &sweep_stats);
d9f71a07 551 gc_update_stats (sweep_stats);
c8a1bdc4
HWN
552 }
553
554 if (*free_cells == SCM_EOL)
555 {
556 /*
557 failed getting new cells. Get new juice or die.
558 */
d9f71a07
LC
559 freelist->heap_segment_idx =
560 scm_i_get_new_heap_segment (freelist,
561 scm_i_gc_sweep_stats,
562 abort_on_error);
563
4c7016dc 564 *free_cells = scm_i_sweep_some_segments (freelist, &sweep_stats);
d9f71a07 565 gc_update_stats (sweep_stats);
c8a1bdc4
HWN
566 }
567
568 if (*free_cells == SCM_EOL)
569 abort ();
0f2d19dd 570
c8a1bdc4 571 cell = *free_cells;
0f2d19dd 572
c8a1bdc4 573 *free_cells = SCM_FREE_CELL_CDR (cell);
eab1b259 574
b17e0ac3 575 scm_gc_running_p = 0;
9de87eea 576 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
eab1b259 577
b17e0ac3
MV
578 if (did_gc)
579 scm_c_hook_run (&scm_after_gc_c_hook, 0);
580
c8a1bdc4
HWN
581 return cell;
582}
4a4c9785 583
4a4c9785 584
c8a1bdc4
HWN
585scm_t_c_hook scm_before_gc_c_hook;
586scm_t_c_hook scm_before_mark_c_hook;
587scm_t_c_hook scm_before_sweep_c_hook;
588scm_t_c_hook scm_after_sweep_c_hook;
589scm_t_c_hook scm_after_gc_c_hook;
4a4c9785 590
b17e0ac3
MV
591/* Must be called while holding scm_i_sweep_mutex.
592 */
593
c8a1bdc4 594void
b17e0ac3 595scm_i_gc (const char *what)
c8a1bdc4 596{
4c7016dc
HWN
597 scm_t_sweep_statistics sweep_stats;
598
9de87eea
MV
599 scm_i_thread_put_to_sleep ();
600
c8a1bdc4 601 scm_c_hook_run (&scm_before_gc_c_hook, 0);
a00c95d9 602
c8a1bdc4
HWN
603#ifdef DEBUGINFO
604 fprintf (stderr,"gc reason %s\n", what);
605
606 fprintf (stderr,
d2e53ed6 607 scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist))
c8a1bdc4 608 ? "*"
d2e53ed6 609 : (scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist2)) ? "o" : "m"));
c8a1bdc4 610#endif
4c48ba06 611
c8a1bdc4 612 gc_start_stats (what);
a00c95d9 613
1367aa5e
HWN
614 /*
615 Set freelists to NULL so scm_cons() always triggers gc, causing
b17e0ac3 616 the assertion above to fail.
1367aa5e
HWN
617 */
618 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
619 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
620
c8a1bdc4
HWN
621 /*
622 Let's finish the sweep. The conservative GC might point into the
623 garbage, and marking that would create a mess.
624 */
4c7016dc
HWN
625 scm_i_sweep_all_segments ("GC", &sweep_stats);
626
627 /* Invariant: the number of cells collected (i.e., freed) must always be
628 lower than or equal to the number of cells "swept" (i.e., visited). */
629 assert (sweep_stats.collected <= sweep_stats.swept);
630
c8a1bdc4 631 if (scm_mallocated < scm_i_deprecated_memory_return)
b6efc951 632 {
c8a1bdc4
HWN
633 /* The byte count of allocated objects has underflowed. This is
634 probably because you forgot to report the sizes of objects you
635 have allocated, by calling scm_done_malloc or some such. When
636 the GC freed them, it subtracted their size from
637 scm_mallocated, which underflowed. */
638 fprintf (stderr,
639 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
640 "This is probably because the GC hasn't been correctly informed\n"
641 "about object sizes\n");
b6efc951
DH
642 abort ();
643 }
c8a1bdc4 644 scm_mallocated -= scm_i_deprecated_memory_return;
0f2d19dd 645
c8a1bdc4 646
b17e0ac3 647 /* Mark */
b6efc951 648
b17e0ac3 649 scm_c_hook_run (&scm_before_mark_c_hook, 0);
c8a1bdc4 650 scm_mark_all ();
c2cbcc57 651 scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
c8a1bdc4 652
b17e0ac3 653 /* Sweep
ffd72400 654
b17e0ac3
MV
655 TODO: the after_sweep hook should probably be moved to just before
656 the mark, since that's where the sweep is finished in lazy
657 sweeping.
c35738c1
MD
658
659 MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
660 original meaning implied at least two things: that it would be
661 called when
662
663 1. the freelist is re-initialized (no evaluation possible, though)
664
665 and
666
667 2. the heap is "fresh"
668 (it is well-defined what data is used and what is not)
669
670 Neither of these conditions would hold just before the mark phase.
671
672 Of course, the lazy sweeping has muddled the distinction between
673 scm_before_sweep_c_hook and scm_after_sweep_c_hook, but even if
674 there were no difference, it would still be useful to have two
675 distinct classes of hook functions since this can prevent some
676 bad interference when several modules adds gc hooks.
ffd72400 677 */
b17e0ac3
MV
678
679 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
680 scm_gc_sweep ();
c8a1bdc4 681 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
b17e0ac3 682
4c7016dc 683 gc_end_stats (sweep_stats);
c8a1bdc4 684
fb50ef08 685 scm_i_thread_wake_up ();
ffd72400 686
eab1b259
HWN
687 /*
688 For debugging purposes, you could do
689 scm_i_sweep_all_segments("debug"), but then the remains of the
690 cell aren't left to analyse.
691 */
692}
0f2d19dd 693
4c7016dc 694
0f2d19dd
JB
695\f
696/* {GC Protection Helper Functions}
697 */
698
699
5d2b97cd
DH
700/*
701 * If within a function you need to protect one or more scheme objects from
702 * garbage collection, pass them as parameters to one of the
703 * scm_remember_upto_here* functions below. These functions don't do
704 * anything, but since the compiler does not know that they are actually
705 * no-ops, it will generate code that calls these functions with the given
706 * parameters. Therefore, you can be sure that the compiler will keep those
707 * scheme values alive (on the stack or in a register) up to the point where
708 * scm_remember_upto_here* is called. In other words, place the call to
592996c9 709 * scm_remember_upto_here* _behind_ the last code in your function, that
5d2b97cd
DH
710 * depends on the scheme object to exist.
711 *
8c494e99
DH
712 * Example: We want to make sure that the string object str does not get
713 * garbage collected during the execution of 'some_function' in the code
714 * below, because otherwise the characters belonging to str would be freed and
5d2b97cd
DH
715 * 'some_function' might access freed memory. To make sure that the compiler
716 * keeps str alive on the stack or in a register such that it is visible to
717 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
718 * call to 'some_function'. Note that this would not be necessary if str was
719 * used anyway after the call to 'some_function'.
eb01cb64 720 * char *chars = scm_i_string_chars (str);
5d2b97cd
DH
721 * some_function (chars);
722 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
723 */
724
9e1569bd
KR
725/* Remove any macro versions of these while defining the functions.
726 Functions are always included in the library, for upward binary
727 compatibility and in case combinations of GCC and non-GCC are used. */
728#undef scm_remember_upto_here_1
729#undef scm_remember_upto_here_2
730
5d2b97cd 731void
e81d98ec 732scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
5d2b97cd
DH
733{
734 /* Empty. Protects a single object from garbage collection. */
735}
736
737void
e81d98ec 738scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
5d2b97cd
DH
739{
740 /* Empty. Protects two objects from garbage collection. */
741}
742
743void
e81d98ec 744scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
5d2b97cd
DH
745{
746 /* Empty. Protects any number of objects from garbage collection. */
747}
748
c209c88e 749/*
41b0806d
GB
750 These crazy functions prevent garbage collection
751 of arguments after the first argument by
752 ensuring they remain live throughout the
753 function because they are used in the last
754 line of the code block.
755 It'd be better to have a nice compiler hint to
756 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
757SCM
758scm_return_first (SCM elt, ...)
0f2d19dd
JB
759{
760 return elt;
761}
762
41b0806d
GB
763int
764scm_return_first_int (int i, ...)
765{
766 return i;
767}
768
0f2d19dd 769
0f2d19dd 770SCM
6e8d25a6 771scm_permanent_object (SCM obj)
0f2d19dd 772{
9de87eea
MV
773 SCM cell = scm_cons (obj, SCM_EOL);
774 SCM_CRITICAL_SECTION_START;
775 SCM_SETCDR (cell, scm_permobjs);
776 scm_permobjs = cell;
777 SCM_CRITICAL_SECTION_END;
0f2d19dd
JB
778 return obj;
779}
780
781
7bd4fbe2
MD
782/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
783 other references are dropped, until the object is unprotected by calling
6b1b030e 784 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
7bd4fbe2
MD
785 i. e. it is possible to protect the same object several times, but it is
786 necessary to unprotect the object the same number of times to actually get
787 the object unprotected. It is an error to unprotect an object more often
788 than it has been protected before. The function scm_protect_object returns
789 OBJ.
790*/
791
792/* Implementation note: For every object X, there is a counter which
6b1b030e 793 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
7bd4fbe2 794*/
686765af 795
7eec4c37
HWN
796
797
ef290276 798SCM
6b1b030e 799scm_gc_protect_object (SCM obj)
ef290276 800{
686765af 801 SCM handle;
9d47a1e6 802
686765af 803 /* This critical section barrier will be replaced by a mutex. */
33b320ae
NJ
804 /* njrev: Indeed; if my comment above is correct, there is the same
805 critsec/mutex inconsistency here. */
9de87eea 806 SCM_CRITICAL_SECTION_START;
9d47a1e6 807
e11e83f3
MV
808 handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
809 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
9d47a1e6 810
7eec4c37
HWN
811 protected_obj_count ++;
812
9de87eea 813 SCM_CRITICAL_SECTION_END;
9d47a1e6 814
ef290276
JB
815 return obj;
816}
817
818
819/* Remove any protection for OBJ established by a prior call to
dab7f566 820 scm_protect_object. This function returns OBJ.
ef290276 821
dab7f566 822 See scm_protect_object for more information. */
ef290276 823SCM
6b1b030e 824scm_gc_unprotect_object (SCM obj)
ef290276 825{
686765af 826 SCM handle;
9d47a1e6 827
686765af 828 /* This critical section barrier will be replaced by a mutex. */
33b320ae 829 /* njrev: and again. */
9de87eea 830 SCM_CRITICAL_SECTION_START;
9d47a1e6 831
0ff7e3ff
HWN
832 if (scm_gc_running_p)
833 {
834 fprintf (stderr, "scm_unprotect_object called during GC.\n");
835 abort ();
836 }
b17e0ac3 837
686765af 838 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 839
7888309b 840 if (scm_is_false (handle))
686765af 841 {
0f0f0899
MD
842 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
843 abort ();
686765af 844 }
6a199940
DH
845 else
846 {
e11e83f3 847 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
bc36d050 848 if (scm_is_eq (count, scm_from_int (0)))
6a199940
DH
849 scm_hashq_remove_x (scm_protects, obj);
850 else
1be6b49c 851 SCM_SETCDR (handle, count);
6a199940 852 }
7eec4c37 853 protected_obj_count --;
686765af 854
9de87eea 855 SCM_CRITICAL_SECTION_END;
ef290276
JB
856
857 return obj;
858}
859
6b1b030e
ML
860void
861scm_gc_register_root (SCM *p)
862{
863 SCM handle;
b9bd8526 864 SCM key = scm_from_ulong ((unsigned long) p);
eae33935 865
6b1b030e 866 /* This critical section barrier will be replaced by a mutex. */
33b320ae 867 /* njrev: and again. */
9de87eea 868 SCM_CRITICAL_SECTION_START;
6b1b030e 869
e11e83f3
MV
870 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key,
871 scm_from_int (0));
33b320ae 872 /* njrev: note also that the above can probably signal an error */
e11e83f3 873 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
6b1b030e 874
9de87eea 875 SCM_CRITICAL_SECTION_END;
6b1b030e
ML
876}
877
878void
879scm_gc_unregister_root (SCM *p)
880{
881 SCM handle;
b9bd8526 882 SCM key = scm_from_ulong ((unsigned long) p);
6b1b030e
ML
883
884 /* This critical section barrier will be replaced by a mutex. */
33b320ae 885 /* njrev: and again. */
9de87eea 886 SCM_CRITICAL_SECTION_START;
6b1b030e
ML
887
888 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
889
7888309b 890 if (scm_is_false (handle))
6b1b030e
ML
891 {
892 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
893 abort ();
894 }
895 else
896 {
e11e83f3 897 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
bc36d050 898 if (scm_is_eq (count, scm_from_int (0)))
6b1b030e
ML
899 scm_hashv_remove_x (scm_gc_registered_roots, key);
900 else
901 SCM_SETCDR (handle, count);
902 }
903
9de87eea 904 SCM_CRITICAL_SECTION_END;
6b1b030e
ML
905}
906
907void
908scm_gc_register_roots (SCM *b, unsigned long n)
909{
910 SCM *p = b;
911 for (; p < b + n; ++p)
912 scm_gc_register_root (p);
913}
914
915void
916scm_gc_unregister_roots (SCM *b, unsigned long n)
917{
918 SCM *p = b;
919 for (; p < b + n; ++p)
920 scm_gc_unregister_root (p);
921}
922
04a98cff 923int scm_i_terminating;
c45acc34 924
0f2d19dd 925\f
a00c95d9 926
4c48ba06 927
c8a1bdc4
HWN
928/*
929 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
930 */
85db4a2c
DH
931
932/* Get an integer from an environment variable. */
c8a1bdc4
HWN
933int
934scm_getenv_int (const char *var, int def)
85db4a2c 935{
c8a1bdc4
HWN
936 char *end = 0;
937 char *val = getenv (var);
938 long res = def;
85db4a2c
DH
939 if (!val)
940 return def;
941 res = strtol (val, &end, 10);
942 if (end == val)
943 return def;
944 return res;
945}
946
c35738c1
MD
947void
948scm_storage_prehistory ()
949{
950 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
951 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
952 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
953 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
954 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
955}
85db4a2c 956
9de87eea 957scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
eb01cb64 958
4a4c9785 959int
85db4a2c 960scm_init_storage ()
0f2d19dd 961{
1be6b49c 962 size_t j;
0f2d19dd
JB
963
964 j = SCM_NUM_PROTECTS;
965 while (j)
966 scm_sys_protects[--j] = SCM_BOOL_F;
4a4c9785 967
c8a1bdc4
HWN
968 scm_gc_init_freelist();
969 scm_gc_init_malloc ();
0f2d19dd
JB
970
971 j = SCM_HEAP_SEG_SIZE;
c8a1bdc4 972
9de87eea
MV
973#if 0
974 /* We can't have a cleanup handler since we have no thread to run it
975 in. */
976
a18bcd0e 977#ifdef HAVE_ATEXIT
c45acc34 978 atexit (cleanup);
e52ceaac
MD
979#else
980#ifdef HAVE_ON_EXIT
981 on_exit (cleanup, 0);
982#endif
9de87eea
MV
983#endif
984
a18bcd0e 985#endif
0f2d19dd 986
e4da0740 987 scm_stand_in_procs = scm_make_weak_key_hash_table (scm_from_int (257));
0f2d19dd 988 scm_permobjs = SCM_EOL;
00ffa0e7 989 scm_protects = scm_c_make_hash_table (31);
6b1b030e 990 scm_gc_registered_roots = scm_c_make_hash_table (31);
d6884e63 991
0f2d19dd
JB
992 return 0;
993}
939794ce 994
0f2d19dd
JB
995\f
996
939794ce
DH
997SCM scm_after_gc_hook;
998
939794ce
DH
999static SCM gc_async;
1000
939794ce
DH
1001/* The function gc_async_thunk causes the execution of the after-gc-hook. It
1002 * is run after the gc, as soon as the asynchronous events are handled by the
1003 * evaluator.
1004 */
1005static SCM
1006gc_async_thunk (void)
1007{
1008 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
939794ce
DH
1009 return SCM_UNSPECIFIED;
1010}
1011
1012
1013/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
1014 * the garbage collection. The only purpose of this function is to mark the
1015 * gc_async (which will eventually lead to the execution of the
1016 * gc_async_thunk).
1017 */
1018static void *
e81d98ec
DH
1019mark_gc_async (void * hook_data SCM_UNUSED,
1020 void *func_data SCM_UNUSED,
1021 void *data SCM_UNUSED)
1022{
1023 /* If cell access debugging is enabled, the user may choose to perform
1024 * additional garbage collections after an arbitrary number of cell
1025 * accesses. We don't want the scheme level after-gc-hook to be performed
1026 * for each of these garbage collections for the following reason: The
1027 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
1028 * after-gc-hook was performed with every gc, and if the gc was performed
1029 * after a very small number of cell accesses, then the number of cell
1030 * accesses during the execution of the after-gc-hook will suffice to cause
1031 * the execution of the next gc. Then, guile would keep executing the
1032 * after-gc-hook over and over again, and would never come to do other
1033 * things.
eae33935 1034 *
e81d98ec
DH
1035 * To overcome this problem, if cell access debugging with additional
1036 * garbage collections is enabled, the after-gc-hook is never run by the
1037 * garbage collecter. When running guile with cell access debugging and the
1038 * execution of the after-gc-hook is desired, then it is necessary to run
1039 * the hook explicitly from the user code. This has the effect, that from
1040 * the scheme level point of view it seems that garbage collection is
1041 * performed with a much lower frequency than it actually is. Obviously,
1042 * this will not work for code that depends on a fixed one to one
1043 * relationship between the execution counts of the C level garbage
1044 * collection hooks and the execution count of the scheme level
1045 * after-gc-hook.
1046 */
9de87eea 1047
e81d98ec 1048#if (SCM_DEBUG_CELL_ACCESSES == 1)
eab1b259 1049 if (scm_debug_cells_gc_interval == 0)
e81d98ec
DH
1050 scm_system_async_mark (gc_async);
1051#else
939794ce 1052 scm_system_async_mark (gc_async);
e81d98ec
DH
1053#endif
1054
939794ce
DH
1055 return NULL;
1056}
1057
0f2d19dd
JB
1058void
1059scm_init_gc ()
0f2d19dd 1060{
c8a1bdc4 1061 scm_gc_init_mark ();
d678e25c 1062
fde50407
ML
1063 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
1064 scm_c_define ("after-gc-hook", scm_after_gc_hook);
939794ce 1065
2592c4c7
MV
1066 gc_async = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
1067 gc_async_thunk);
939794ce
DH
1068
1069 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
1070
a0599745 1071#include "libguile/gc.x"
0f2d19dd 1072}
89e00824 1073
9a5fa6e9
NJ
1074#ifdef __ia64__
1075# ifdef __hpux
1076# include <sys/param.h>
1077# include <sys/pstat.h>
1078void *
1079scm_ia64_register_backing_store_base (void)
1080{
1081 struct pst_vm_status vm_status;
1082 int i = 0;
1083 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
1084 if (vm_status.pst_type == PS_RSESTACK)
1085 return (void *) vm_status.pst_vaddr;
1086 abort ();
1087}
1088void *
1089scm_ia64_ar_bsp (const void *ctx)
1090{
1091 uint64_t bsp;
1092 __uc_get_ar_bsp(ctx, &bsp);
1093 return (void *) bsp;
1094}
1095# endif /* hpux */
1096# ifdef linux
1097# include <ucontext.h>
1098void *
1099scm_ia64_register_backing_store_base (void)
1100{
1101 extern void *__libc_ia64_register_backing_store_base;
1102 return __libc_ia64_register_backing_store_base;
1103}
1104void *
1105scm_ia64_ar_bsp (const void *opaque)
1106{
1107 ucontext_t *ctx = opaque;
1108 return (void *) ctx->uc_mcontext.sc_ar_bsp;
1109}
1110# endif /* linux */
1111#endif /* __ia64__ */
c8a1bdc4
HWN
1112
1113void
1114scm_gc_sweep (void)
1115#define FUNC_NAME "scm_gc_sweep"
1116{
1117 scm_i_deprecated_memory_return = 0;
1118
1119 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist);
1120 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2);
1121
1122 /*
1123 NOTHING HERE: LAZY SWEEPING !
1124 */
1125 scm_i_reset_segments ();
1126
9bc4701c
MD
1127 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
1128 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
392d2833
MD
1129
1130 /* Invalidate the freelists of other threads. */
1131 scm_i_thread_invalidate_freelists ();
c8a1bdc4
HWN
1132}
1133
1134#undef FUNC_NAME
1135
1136
56495472 1137
89e00824
ML
1138/*
1139 Local Variables:
1140 c-file-style: "gnu"
1141 End:
1142*/