Revert "(scm_shell_usage): Note need for subscription to bug-guile@gnu.org."
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 /* #define DEBUGINFO */
19
20 #ifdef HAVE_CONFIG_H
21 # include <config.h>
22 #endif
23
24 #include <stdio.h>
25 #include <errno.h>
26 #include <string.h>
27 #include <assert.h>
28
29 #include "libguile/_scm.h"
30 #include "libguile/eval.h"
31 #include "libguile/stime.h"
32 #include "libguile/stackchk.h"
33 #include "libguile/struct.h"
34 #include "libguile/smob.h"
35 #include "libguile/unif.h"
36 #include "libguile/async.h"
37 #include "libguile/ports.h"
38 #include "libguile/root.h"
39 #include "libguile/strings.h"
40 #include "libguile/vectors.h"
41 #include "libguile/weaks.h"
42 #include "libguile/hashtab.h"
43 #include "libguile/tags.h"
44
45 #include "libguile/private-gc.h"
46 #include "libguile/validate.h"
47 #include "libguile/deprecation.h"
48 #include "libguile/gc.h"
49 #include "libguile/dynwind.h"
50
51 #ifdef GUILE_DEBUG_MALLOC
52 #include "libguile/debug-malloc.h"
53 #endif
54
55 #ifdef HAVE_MALLOC_H
56 #include <malloc.h>
57 #endif
58
59 #ifdef HAVE_UNISTD_H
60 #include <unistd.h>
61 #endif
62
63 /* Lock this mutex before doing lazy sweeping.
64 */
65 scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
66
67 /* Set this to != 0 if every cell that is accessed shall be checked:
68 */
69 int scm_debug_cell_accesses_p = 0;
70 int scm_expensive_debug_cell_accesses_p = 0;
71
72 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
73 * the number of cell accesses after which a gc shall be called.
74 */
75 int scm_debug_cells_gc_interval = 0;
76
77 /*
78 Global variable, so you can switch it off at runtime by setting
79 scm_i_cell_validation_already_running.
80 */
81 int scm_i_cell_validation_already_running ;
82
83 #if (SCM_DEBUG_CELL_ACCESSES == 1)
84
85
86 /*
87
88 Assert that the given object is a valid reference to a valid cell. This
89 test involves to determine whether the object is a cell pointer, whether
90 this pointer actually points into a heap segment and whether the cell
91 pointed to is not a free cell. Further, additional garbage collections may
92 get executed after a user defined number of cell accesses. This helps to
93 find places in the C code where references are dropped for extremely short
94 periods.
95
96 */
97 void
98 scm_i_expensive_validation_check (SCM cell)
99 {
100 if (!scm_in_heap_p (cell))
101 {
102 fprintf (stderr, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
103 (unsigned long) SCM_UNPACK (cell));
104 abort ();
105 }
106
107 /* If desired, perform additional garbage collections after a user
108 * defined number of cell accesses.
109 */
110 if (scm_debug_cells_gc_interval)
111 {
112 static unsigned int counter = 0;
113
114 if (counter != 0)
115 {
116 --counter;
117 }
118 else
119 {
120 counter = scm_debug_cells_gc_interval;
121 scm_gc ();
122 }
123 }
124 }
125
126 void
127 scm_assert_cell_valid (SCM cell)
128 {
129 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
130 {
131 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
132
133 /*
134 During GC, no user-code should be run, and the guile core
135 should use non-protected accessors.
136 */
137 if (scm_gc_running_p)
138 return;
139
140 /*
141 Only scm_in_heap_p and rescanning the heap is wildly
142 expensive.
143 */
144 if (scm_expensive_debug_cell_accesses_p)
145 scm_i_expensive_validation_check (cell);
146 #if (SCM_DEBUG_MARKING_API == 0)
147 if (!SCM_GC_MARK_P (cell))
148 {
149 fprintf (stderr,
150 "scm_assert_cell_valid: this object is unmarked. \n"
151 "It has been garbage-collected in the last GC run: "
152 "%lux\n",
153 (unsigned long) SCM_UNPACK (cell));
154 abort ();
155 }
156 #endif /* SCM_DEBUG_MARKING_API */
157
158 scm_i_cell_validation_already_running = 0; /* re-enable */
159 }
160 }
161
162
163
164 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
165 (SCM flag),
166 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
167 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
168 "but no additional calls to garbage collection are issued.\n"
169 "If @var{flag} is a number, strict cell access checking is enabled,\n"
170 "with an additional garbage collection after the given\n"
171 "number of cell accesses.\n"
172 "This procedure only exists when the compile-time flag\n"
173 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
174 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
175 {
176 if (scm_is_false (flag))
177 {
178 scm_debug_cell_accesses_p = 0;
179 }
180 else if (scm_is_eq (flag, SCM_BOOL_T))
181 {
182 scm_debug_cells_gc_interval = 0;
183 scm_debug_cell_accesses_p = 1;
184 scm_expensive_debug_cell_accesses_p = 0;
185 }
186 else
187 {
188 scm_debug_cells_gc_interval = scm_to_signed_integer (flag, 0, INT_MAX);
189 scm_debug_cell_accesses_p = 1;
190 scm_expensive_debug_cell_accesses_p = 1;
191 }
192 return SCM_UNSPECIFIED;
193 }
194 #undef FUNC_NAME
195
196
197 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
198
199 \f
200
201
202 /* scm_mtrigger
203 * is the number of bytes of malloc allocation needed to trigger gc.
204 */
205 unsigned long scm_mtrigger;
206
207 /* GC Statistics Keeping
208 */
209 unsigned long scm_cells_allocated = 0;
210 unsigned long scm_last_cells_allocated = 0;
211 unsigned long scm_mallocated = 0;
212 long int scm_i_find_heap_calls = 0;
213 /* Global GC sweep statistics since the last full GC. */
214 scm_t_sweep_statistics scm_i_gc_sweep_stats = { 0, 0 };
215
216 /* Total count of cells marked/swept. */
217 static double scm_gc_cells_marked_acc = 0.;
218 static double scm_gc_cells_marked_conservatively_acc = 0.;
219 static double scm_gc_cells_swept_acc = 0.;
220 static double scm_gc_cells_allocated_acc = 0.;
221
222 static unsigned long scm_gc_time_taken = 0;
223 static unsigned long scm_gc_mark_time_taken = 0;
224
225 static unsigned long scm_gc_times = 0;
226
227 static int scm_gc_cell_yield_percentage = 0;
228 static unsigned long protected_obj_count = 0;
229
230 /* The following are accessed from `gc-malloc.c' and `gc-card.c'. */
231 int scm_gc_malloc_yield_percentage = 0;
232 unsigned long scm_gc_malloc_collected = 0;
233
234
235 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
236 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
237 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
238 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
239 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
240 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
241 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
242 SCM_SYMBOL (sym_times, "gc-times");
243 SCM_SYMBOL (sym_cells_marked, "cells-marked");
244 SCM_SYMBOL (sym_cells_marked_conservatively, "cells-marked-conservatively");
245 SCM_SYMBOL (sym_cells_swept, "cells-swept");
246 SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
247 SCM_SYMBOL (sym_cell_yield, "cell-yield");
248 SCM_SYMBOL (sym_protected_objects, "protected-objects");
249 SCM_SYMBOL (sym_total_cells_allocated, "total-cells-allocated");
250
251
252 /* Number of calls to SCM_NEWCELL since startup. */
253 unsigned scm_newcell_count;
254 unsigned scm_newcell2_count;
255
256
257 /* {Scheme Interface to GC}
258 */
259 static SCM
260 tag_table_to_type_alist (void *closure, SCM key, SCM val, SCM acc)
261 {
262 if (scm_is_integer (key))
263 {
264 int c_tag = scm_to_int (key);
265
266 char const * name = scm_i_tag_name (c_tag);
267 if (name != NULL)
268 {
269 key = scm_from_locale_string (name);
270 }
271 else
272 {
273 char s[100];
274 sprintf (s, "tag %d", c_tag);
275 key = scm_from_locale_string (s);
276 }
277 }
278
279 return scm_cons (scm_cons (key, val), acc);
280 }
281
282 SCM_DEFINE (scm_gc_live_object_stats, "gc-live-object-stats", 0, 0, 0,
283 (),
284 "Return an alist of statistics of the current live objects. ")
285 #define FUNC_NAME s_scm_gc_live_object_stats
286 {
287 SCM tab = scm_make_hash_table (scm_from_int (57));
288 SCM alist;
289
290 scm_i_all_segments_statistics (tab);
291
292 alist
293 = scm_internal_hash_fold (&tag_table_to_type_alist, NULL, SCM_EOL, tab);
294
295 return alist;
296 }
297 #undef FUNC_NAME
298
299 extern int scm_gc_malloc_yield_percentage;
300 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
301 (),
302 "Return an association list of statistics about Guile's current\n"
303 "use of storage.\n")
304 #define FUNC_NAME s_scm_gc_stats
305 {
306 long i = 0;
307 SCM heap_segs = SCM_EOL ;
308 unsigned long int local_scm_mtrigger;
309 unsigned long int local_scm_mallocated;
310 unsigned long int local_scm_heap_size;
311 int local_scm_gc_cell_yield_percentage;
312 int local_scm_gc_malloc_yield_percentage;
313 unsigned long int local_scm_cells_allocated;
314 unsigned long int local_scm_gc_time_taken;
315 unsigned long int local_scm_gc_times;
316 unsigned long int local_scm_gc_mark_time_taken;
317 unsigned long int local_protected_obj_count;
318 double local_scm_gc_cells_swept;
319 double local_scm_gc_cells_marked;
320 double local_scm_gc_cells_marked_conservatively;
321 double local_scm_total_cells_allocated;
322 SCM answer;
323 unsigned long *bounds = 0;
324 int table_size = 0;
325 SCM_CRITICAL_SECTION_START;
326
327 bounds = scm_i_segment_table_info (&table_size);
328
329 /* Below, we cons to produce the resulting list. We want a snapshot of
330 * the heap situation before consing.
331 */
332 local_scm_mtrigger = scm_mtrigger;
333 local_scm_mallocated = scm_mallocated;
334 local_scm_heap_size =
335 (scm_i_master_freelist.heap_total_cells + scm_i_master_freelist2.heap_total_cells);
336
337 local_scm_cells_allocated =
338 scm_cells_allocated + scm_i_gc_sweep_stats.collected;
339
340 local_scm_gc_time_taken = scm_gc_time_taken;
341 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
342 local_scm_gc_times = scm_gc_times;
343 local_scm_gc_malloc_yield_percentage = scm_gc_malloc_yield_percentage;
344 local_scm_gc_cell_yield_percentage = scm_gc_cell_yield_percentage;
345 local_protected_obj_count = protected_obj_count;
346 local_scm_gc_cells_swept =
347 (double) scm_gc_cells_swept_acc
348 + (double) scm_i_gc_sweep_stats.swept;
349 local_scm_gc_cells_marked = scm_gc_cells_marked_acc
350 + (double) scm_i_gc_sweep_stats.swept
351 - (double) scm_i_gc_sweep_stats.collected;
352 local_scm_gc_cells_marked_conservatively
353 = scm_gc_cells_marked_conservatively_acc;
354
355 local_scm_total_cells_allocated = scm_gc_cells_allocated_acc
356 + (double) scm_i_gc_sweep_stats.collected;
357
358 for (i = table_size; i--;)
359 {
360 heap_segs = scm_cons (scm_cons (scm_from_ulong (bounds[2*i]),
361 scm_from_ulong (bounds[2*i+1])),
362 heap_segs);
363 }
364
365 /* njrev: can any of these scm_cons's or scm_list_n signal a memory
366 error? If so we need a frame here. */
367 answer =
368 scm_list_n (scm_cons (sym_gc_time_taken,
369 scm_from_ulong (local_scm_gc_time_taken)),
370 scm_cons (sym_cells_allocated,
371 scm_from_ulong (local_scm_cells_allocated)),
372 scm_cons (sym_total_cells_allocated,
373 scm_from_double (local_scm_total_cells_allocated)),
374 scm_cons (sym_heap_size,
375 scm_from_ulong (local_scm_heap_size)),
376 scm_cons (sym_cells_marked_conservatively,
377 scm_from_ulong (local_scm_gc_cells_marked_conservatively)),
378 scm_cons (sym_mallocated,
379 scm_from_ulong (local_scm_mallocated)),
380 scm_cons (sym_mtrigger,
381 scm_from_ulong (local_scm_mtrigger)),
382 scm_cons (sym_times,
383 scm_from_ulong (local_scm_gc_times)),
384 scm_cons (sym_gc_mark_time_taken,
385 scm_from_ulong (local_scm_gc_mark_time_taken)),
386 scm_cons (sym_cells_marked,
387 scm_from_double (local_scm_gc_cells_marked)),
388 scm_cons (sym_cells_swept,
389 scm_from_double (local_scm_gc_cells_swept)),
390 scm_cons (sym_malloc_yield,
391 scm_from_long (local_scm_gc_malloc_yield_percentage)),
392 scm_cons (sym_cell_yield,
393 scm_from_long (local_scm_gc_cell_yield_percentage)),
394 scm_cons (sym_protected_objects,
395 scm_from_ulong (local_protected_obj_count)),
396 scm_cons (sym_heap_segments, heap_segs),
397 SCM_UNDEFINED);
398 SCM_CRITICAL_SECTION_END;
399
400 free (bounds);
401 return answer;
402 }
403 #undef FUNC_NAME
404
405 /*
406 Update nice-to-know-statistics.
407 */
408 static void
409 gc_end_stats ()
410 {
411 /* CELLS SWEPT is another word for the number of cells that were examined
412 during GC. YIELD is the number that we cleaned out. MARKED is the number
413 that weren't cleaned. */
414 scm_gc_cell_yield_percentage = (scm_i_gc_sweep_stats.collected * 100) /
415 (scm_i_master_freelist.heap_total_cells + scm_i_master_freelist2.heap_total_cells);
416
417 scm_gc_cells_allocated_acc +=
418 (double) scm_i_gc_sweep_stats.collected;
419 scm_gc_cells_marked_acc += (double) scm_i_last_marked_cell_count;
420 scm_gc_cells_marked_conservatively_acc += (double) scm_i_find_heap_calls;
421 scm_gc_cells_swept_acc += (double) scm_i_gc_sweep_stats.swept;
422
423 ++scm_gc_times;
424 }
425
426 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
427 (SCM obj),
428 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
429 "returned by this function for @var{obj}")
430 #define FUNC_NAME s_scm_object_address
431 {
432 return scm_from_ulong (SCM_UNPACK (obj));
433 }
434 #undef FUNC_NAME
435
436
437 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
438 (),
439 "Scans all of SCM objects and reclaims for further use those that are\n"
440 "no longer accessible.")
441 #define FUNC_NAME s_scm_gc
442 {
443 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
444 scm_gc_running_p = 1;
445 scm_i_gc ("call");
446 /* njrev: It looks as though other places, e.g. scm_realloc,
447 can call scm_i_gc without acquiring the sweep mutex. Does this
448 matter? Also scm_i_gc (or its descendants) touch the
449 scm_sys_protects, which are protected in some cases
450 (e.g. scm_permobjs above in scm_gc_stats) by a critical section,
451 not by the sweep mutex. Shouldn't all the GC-relevant objects be
452 protected in the same way? */
453 scm_gc_running_p = 0;
454 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
455 scm_c_hook_run (&scm_after_gc_c_hook, 0);
456 return SCM_UNSPECIFIED;
457 }
458 #undef FUNC_NAME
459
460
461 \f
462
463 /* The master is global and common while the freelist will be
464 * individual for each thread.
465 */
466
467 SCM
468 scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
469 {
470 SCM cell;
471 int did_gc = 0;
472
473 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
474 scm_gc_running_p = 1;
475
476 *free_cells = scm_i_sweep_for_freelist (freelist);
477 if (*free_cells == SCM_EOL)
478 {
479 float delta = scm_i_gc_heap_size_delta (freelist);
480 if (delta > 0.0)
481 {
482 size_t bytes = ((unsigned long) delta) * sizeof (scm_t_cell);
483 freelist->heap_segment_idx =
484 scm_i_get_new_heap_segment (freelist, bytes, abort_on_error);
485
486 *free_cells = scm_i_sweep_for_freelist (freelist);
487 }
488 }
489
490 if (*free_cells == SCM_EOL)
491 {
492 /*
493 out of fresh cells. Try to get some new ones.
494 */
495 char reason[] = "0-cells";
496 reason[0] += freelist->span;
497
498 did_gc = 1;
499 scm_i_gc (reason);
500
501 *free_cells = scm_i_sweep_for_freelist (freelist);
502 }
503
504 if (*free_cells == SCM_EOL)
505 {
506 /*
507 failed getting new cells. Get new juice or die.
508 */
509 float delta = scm_i_gc_heap_size_delta (freelist);
510 assert (delta > 0.0);
511 size_t bytes = ((unsigned long) delta) * sizeof (scm_t_cell);
512 freelist->heap_segment_idx =
513 scm_i_get_new_heap_segment (freelist, bytes, abort_on_error);
514
515 *free_cells = scm_i_sweep_for_freelist (freelist);
516 }
517
518 if (*free_cells == SCM_EOL)
519 abort ();
520
521 cell = *free_cells;
522
523 *free_cells = SCM_FREE_CELL_CDR (cell);
524
525 scm_gc_running_p = 0;
526 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
527
528 if (did_gc)
529 scm_c_hook_run (&scm_after_gc_c_hook, 0);
530
531 return cell;
532 }
533
534
535 scm_t_c_hook scm_before_gc_c_hook;
536 scm_t_c_hook scm_before_mark_c_hook;
537 scm_t_c_hook scm_before_sweep_c_hook;
538 scm_t_c_hook scm_after_sweep_c_hook;
539 scm_t_c_hook scm_after_gc_c_hook;
540
541 static void
542 scm_check_deprecated_memory_return ()
543 {
544 if (scm_mallocated < scm_i_deprecated_memory_return)
545 {
546 /* The byte count of allocated objects has underflowed. This is
547 probably because you forgot to report the sizes of objects you
548 have allocated, by calling scm_done_malloc or some such. When
549 the GC freed them, it subtracted their size from
550 scm_mallocated, which underflowed. */
551 fprintf (stderr,
552 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
553 "This is probably because the GC hasn't been correctly informed\n"
554 "about object sizes\n");
555 abort ();
556 }
557 scm_mallocated -= scm_i_deprecated_memory_return;
558 scm_i_deprecated_memory_return = 0;
559 }
560
561 long int scm_i_last_marked_cell_count;
562
563 /* Must be called while holding scm_i_sweep_mutex.
564
565 This function is fairly long, but it touches various global
566 variables. To not obscure the side effects on global variables,
567 this function has not been split up.
568 */
569 void
570 scm_i_gc (const char *what)
571 {
572 unsigned long t_before_gc = 0;
573
574 scm_i_thread_put_to_sleep ();
575
576 scm_c_hook_run (&scm_before_gc_c_hook, 0);
577
578 #ifdef DEBUGINFO
579 fprintf (stderr,"gc reason %s\n", what);
580 fprintf (stderr,
581 scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist))
582 ? "*"
583 : (scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist2)) ? "o" : "m"));
584 #endif
585
586 t_before_gc = scm_c_get_internal_run_time ();
587 scm_gc_malloc_collected = 0;
588
589 /*
590 Set freelists to NULL so scm_cons () always triggers gc, causing
591 the assertion above to fail.
592 */
593 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
594 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
595
596 /*
597 Let's finish the sweep. The conservative GC might point into the
598 garbage, and marking that would create a mess.
599 */
600 scm_i_sweep_all_segments ("GC", &scm_i_gc_sweep_stats);
601 scm_check_deprecated_memory_return ();
602
603 #if (SCM_DEBUG_CELL_ACCESSES == 0 && SCM_SIZEOF_UNSIGNED_LONG == 4)
604 /* Sanity check our numbers. */
605 /* TODO(hanwen): figure out why the stats are off on x64_64. */
606 /* If this was not true, someone touched mark bits outside of the
607 mark phase. */
608 if (scm_i_last_marked_cell_count != scm_i_marked_count ())
609 {
610 static char msg[] =
611 "The number of marked objects changed since the last GC: %d vs %d.";
612 /* At some point, we should probably use a deprecation warning. */
613 fprintf(stderr, msg, scm_i_last_marked_cell_count, scm_i_marked_count ());
614 }
615 assert (scm_i_gc_sweep_stats.swept
616 == (scm_i_master_freelist.heap_total_cells
617 + scm_i_master_freelist2.heap_total_cells));
618 assert (scm_i_gc_sweep_stats.collected + scm_i_last_marked_cell_count
619 == scm_i_gc_sweep_stats.swept);
620 #endif /* SCM_DEBUG_CELL_ACCESSES */
621
622 /* Mark */
623 scm_c_hook_run (&scm_before_mark_c_hook, 0);
624
625 scm_mark_all ();
626 scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
627
628 scm_i_last_marked_cell_count = scm_cells_allocated = scm_i_marked_count ();
629
630 /* Sweep
631
632 TODO: the after_sweep hook should probably be moved to just before
633 the mark, since that's where the sweep is finished in lazy
634 sweeping.
635
636 MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
637 original meaning implied at least two things: that it would be
638 called when
639
640 1. the freelist is re-initialized (no evaluation possible, though)
641
642 and
643
644 2. the heap is "fresh"
645 (it is well-defined what data is used and what is not)
646
647 Neither of these conditions would hold just before the mark phase.
648
649 Of course, the lazy sweeping has muddled the distinction between
650 scm_before_sweep_c_hook and scm_after_sweep_c_hook, but even if
651 there were no difference, it would still be useful to have two
652 distinct classes of hook functions since this can prevent some
653 bad interference when several modules adds gc hooks.
654 */
655 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
656
657 /*
658 Nothing here: lazy sweeping.
659 */
660 scm_i_reset_segments ();
661
662 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
663 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
664
665 /* Invalidate the freelists of other threads. */
666 scm_i_thread_invalidate_freelists ();
667
668 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
669
670 gc_end_stats ();
671
672 scm_i_gc_sweep_stats.collected = scm_i_gc_sweep_stats.swept = 0;
673 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist);
674 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2);
675
676 /* Arguably, this statistic is fairly useless: marking will dominate
677 the time taken.
678 */
679 scm_gc_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
680
681 scm_i_thread_wake_up ();
682 /*
683 For debugging purposes, you could do
684 scm_i_sweep_all_segments ("debug"), but then the remains of the
685 cell aren't left to analyse.
686 */
687 }
688
689
690 \f
691 /* {GC Protection Helper Functions}
692 */
693
694
695 /*
696 * If within a function you need to protect one or more scheme objects from
697 * garbage collection, pass them as parameters to one of the
698 * scm_remember_upto_here* functions below. These functions don't do
699 * anything, but since the compiler does not know that they are actually
700 * no-ops, it will generate code that calls these functions with the given
701 * parameters. Therefore, you can be sure that the compiler will keep those
702 * scheme values alive (on the stack or in a register) up to the point where
703 * scm_remember_upto_here* is called. In other words, place the call to
704 * scm_remember_upto_here* _behind_ the last code in your function, that
705 * depends on the scheme object to exist.
706 *
707 * Example: We want to make sure that the string object str does not get
708 * garbage collected during the execution of 'some_function' in the code
709 * below, because otherwise the characters belonging to str would be freed and
710 * 'some_function' might access freed memory. To make sure that the compiler
711 * keeps str alive on the stack or in a register such that it is visible to
712 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
713 * call to 'some_function'. Note that this would not be necessary if str was
714 * used anyway after the call to 'some_function'.
715 * char *chars = scm_i_string_chars (str);
716 * some_function (chars);
717 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
718 */
719
720 /* Remove any macro versions of these while defining the functions.
721 Functions are always included in the library, for upward binary
722 compatibility and in case combinations of GCC and non-GCC are used. */
723 #undef scm_remember_upto_here_1
724 #undef scm_remember_upto_here_2
725
726 void
727 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
728 {
729 /* Empty. Protects a single object from garbage collection. */
730 }
731
732 void
733 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
734 {
735 /* Empty. Protects two objects from garbage collection. */
736 }
737
738 void
739 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
740 {
741 /* Empty. Protects any number of objects from garbage collection. */
742 }
743
744 /*
745 These crazy functions prevent garbage collection
746 of arguments after the first argument by
747 ensuring they remain live throughout the
748 function because they are used in the last
749 line of the code block.
750 It'd be better to have a nice compiler hint to
751 aid the conservative stack-scanning GC. --03/09/00 gjb */
752 SCM
753 scm_return_first (SCM elt, ...)
754 {
755 return elt;
756 }
757
758 int
759 scm_return_first_int (int i, ...)
760 {
761 return i;
762 }
763
764
765 SCM
766 scm_permanent_object (SCM obj)
767 {
768 SCM cell = scm_cons (obj, SCM_EOL);
769 SCM_CRITICAL_SECTION_START;
770 SCM_SETCDR (cell, scm_permobjs);
771 scm_permobjs = cell;
772 SCM_CRITICAL_SECTION_END;
773 return obj;
774 }
775
776
777 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
778 other references are dropped, until the object is unprotected by calling
779 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
780 i. e. it is possible to protect the same object several times, but it is
781 necessary to unprotect the object the same number of times to actually get
782 the object unprotected. It is an error to unprotect an object more often
783 than it has been protected before. The function scm_protect_object returns
784 OBJ.
785 */
786
787 /* Implementation note: For every object X, there is a counter which
788 scm_gc_protect_object (X) increments and scm_gc_unprotect_object (X) decrements.
789 */
790
791
792
793 SCM
794 scm_gc_protect_object (SCM obj)
795 {
796 SCM handle;
797
798 /* This critical section barrier will be replaced by a mutex. */
799 /* njrev: Indeed; if my comment above is correct, there is the same
800 critsec/mutex inconsistency here. */
801 SCM_CRITICAL_SECTION_START;
802
803 handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
804 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
805
806 protected_obj_count ++;
807
808 SCM_CRITICAL_SECTION_END;
809
810 return obj;
811 }
812
813
814 /* Remove any protection for OBJ established by a prior call to
815 scm_protect_object. This function returns OBJ.
816
817 See scm_protect_object for more information. */
818 SCM
819 scm_gc_unprotect_object (SCM obj)
820 {
821 SCM handle;
822
823 /* This critical section barrier will be replaced by a mutex. */
824 /* njrev: and again. */
825 SCM_CRITICAL_SECTION_START;
826
827 if (scm_gc_running_p)
828 {
829 fprintf (stderr, "scm_unprotect_object called during GC.\n");
830 abort ();
831 }
832
833 handle = scm_hashq_get_handle (scm_protects, obj);
834
835 if (scm_is_false (handle))
836 {
837 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
838 abort ();
839 }
840 else
841 {
842 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
843 if (scm_is_eq (count, scm_from_int (0)))
844 scm_hashq_remove_x (scm_protects, obj);
845 else
846 SCM_SETCDR (handle, count);
847 }
848 protected_obj_count --;
849
850 SCM_CRITICAL_SECTION_END;
851
852 return obj;
853 }
854
855 void
856 scm_gc_register_root (SCM *p)
857 {
858 SCM handle;
859 SCM key = scm_from_ulong ((unsigned long) p);
860
861 /* This critical section barrier will be replaced by a mutex. */
862 /* njrev: and again. */
863 SCM_CRITICAL_SECTION_START;
864
865 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key,
866 scm_from_int (0));
867 /* njrev: note also that the above can probably signal an error */
868 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
869
870 SCM_CRITICAL_SECTION_END;
871 }
872
873 void
874 scm_gc_unregister_root (SCM *p)
875 {
876 SCM handle;
877 SCM key = scm_from_ulong ((unsigned long) p);
878
879 /* This critical section barrier will be replaced by a mutex. */
880 /* njrev: and again. */
881 SCM_CRITICAL_SECTION_START;
882
883 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
884
885 if (scm_is_false (handle))
886 {
887 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
888 abort ();
889 }
890 else
891 {
892 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
893 if (scm_is_eq (count, scm_from_int (0)))
894 scm_hashv_remove_x (scm_gc_registered_roots, key);
895 else
896 SCM_SETCDR (handle, count);
897 }
898
899 SCM_CRITICAL_SECTION_END;
900 }
901
902 void
903 scm_gc_register_roots (SCM *b, unsigned long n)
904 {
905 SCM *p = b;
906 for (; p < b + n; ++p)
907 scm_gc_register_root (p);
908 }
909
910 void
911 scm_gc_unregister_roots (SCM *b, unsigned long n)
912 {
913 SCM *p = b;
914 for (; p < b + n; ++p)
915 scm_gc_unregister_root (p);
916 }
917
918 int scm_i_terminating;
919
920 \f
921
922
923 /*
924 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
925 */
926
927 /* Get an integer from an environment variable. */
928 int
929 scm_getenv_int (const char *var, int def)
930 {
931 char *end = 0;
932 char *val = getenv (var);
933 long res = def;
934 if (!val)
935 return def;
936 res = strtol (val, &end, 10);
937 if (end == val)
938 return def;
939 return res;
940 }
941
942 void
943 scm_storage_prehistory ()
944 {
945 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
946 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
947 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
948 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
949 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
950 }
951
952 scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
953
954 int
955 scm_init_storage ()
956 {
957 size_t j;
958
959 j = SCM_NUM_PROTECTS;
960 while (j)
961 scm_sys_protects[--j] = SCM_BOOL_F;
962
963 scm_gc_init_freelist ();
964 scm_gc_init_malloc ();
965
966 #if 0
967 /* We can't have a cleanup handler since we have no thread to run it
968 in. */
969
970 #ifdef HAVE_ATEXIT
971 atexit (cleanup);
972 #else
973 #ifdef HAVE_ON_EXIT
974 on_exit (cleanup, 0);
975 #endif
976 #endif
977
978 #endif
979
980 scm_stand_in_procs = scm_make_weak_key_hash_table (scm_from_int (257));
981 scm_permobjs = SCM_EOL;
982 scm_protects = scm_c_make_hash_table (31);
983 scm_gc_registered_roots = scm_c_make_hash_table (31);
984
985 return 0;
986 }
987
988 \f
989
990 SCM scm_after_gc_hook;
991
992 static SCM gc_async;
993
994 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
995 * is run after the gc, as soon as the asynchronous events are handled by the
996 * evaluator.
997 */
998 static SCM
999 gc_async_thunk (void)
1000 {
1001 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
1002 return SCM_UNSPECIFIED;
1003 }
1004
1005
1006 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
1007 * the garbage collection. The only purpose of this function is to mark the
1008 * gc_async (which will eventually lead to the execution of the
1009 * gc_async_thunk).
1010 */
1011 static void *
1012 mark_gc_async (void * hook_data SCM_UNUSED,
1013 void *fn_data SCM_UNUSED,
1014 void *data SCM_UNUSED)
1015 {
1016 /* If cell access debugging is enabled, the user may choose to perform
1017 * additional garbage collections after an arbitrary number of cell
1018 * accesses. We don't want the scheme level after-gc-hook to be performed
1019 * for each of these garbage collections for the following reason: The
1020 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
1021 * after-gc-hook was performed with every gc, and if the gc was performed
1022 * after a very small number of cell accesses, then the number of cell
1023 * accesses during the execution of the after-gc-hook will suffice to cause
1024 * the execution of the next gc. Then, guile would keep executing the
1025 * after-gc-hook over and over again, and would never come to do other
1026 * things.
1027 *
1028 * To overcome this problem, if cell access debugging with additional
1029 * garbage collections is enabled, the after-gc-hook is never run by the
1030 * garbage collecter. When running guile with cell access debugging and the
1031 * execution of the after-gc-hook is desired, then it is necessary to run
1032 * the hook explicitly from the user code. This has the effect, that from
1033 * the scheme level point of view it seems that garbage collection is
1034 * performed with a much lower frequency than it actually is. Obviously,
1035 * this will not work for code that depends on a fixed one to one
1036 * relationship between the execution counts of the C level garbage
1037 * collection hooks and the execution count of the scheme level
1038 * after-gc-hook.
1039 */
1040
1041 #if (SCM_DEBUG_CELL_ACCESSES == 1)
1042 if (scm_debug_cells_gc_interval == 0)
1043 scm_system_async_mark (gc_async);
1044 #else
1045 scm_system_async_mark (gc_async);
1046 #endif
1047
1048 return NULL;
1049 }
1050
1051 void
1052 scm_init_gc ()
1053 {
1054 scm_gc_init_mark ();
1055
1056 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
1057 scm_c_define ("after-gc-hook", scm_after_gc_hook);
1058
1059 gc_async = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
1060 gc_async_thunk);
1061
1062 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
1063
1064 #include "libguile/gc.x"
1065 }
1066
1067 #ifdef __ia64__
1068 # ifdef __hpux
1069 # include <sys/param.h>
1070 # include <sys/pstat.h>
1071 void *
1072 scm_ia64_register_backing_store_base (void)
1073 {
1074 struct pst_vm_status vm_status;
1075 int i = 0;
1076 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
1077 if (vm_status.pst_type == PS_RSESTACK)
1078 return (void *) vm_status.pst_vaddr;
1079 abort ();
1080 }
1081 void *
1082 scm_ia64_ar_bsp (const void *ctx)
1083 {
1084 uint64_t bsp;
1085 __uc_get_ar_bsp (ctx, &bsp);
1086 return (void *) bsp;
1087 }
1088 # endif /* hpux */
1089 # ifdef linux
1090 # include <ucontext.h>
1091 void *
1092 scm_ia64_register_backing_store_base (void)
1093 {
1094 extern void *__libc_ia64_register_backing_store_base;
1095 return __libc_ia64_register_backing_store_base;
1096 }
1097 void *
1098 scm_ia64_ar_bsp (const void *opaque)
1099 {
1100 const ucontext_t *ctx = opaque;
1101 return (void *) ctx->uc_mcontext.sc_ar_bsp;
1102 }
1103 # endif /* linux */
1104 #endif /* __ia64__ */
1105
1106 void
1107 scm_gc_sweep (void)
1108 #define FUNC_NAME "scm_gc_sweep"
1109 {
1110 }
1111
1112 #undef FUNC_NAME
1113
1114
1115
1116 /*
1117 Local Variables:
1118 c-file-style: "gnu"
1119 End:
1120 */