1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006, 2008 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 /* #define DEBUGINFO */
31 #include "libguile/_scm.h"
32 #include "libguile/eval.h"
33 #include "libguile/stime.h"
34 #include "libguile/stackchk.h"
35 #include "libguile/struct.h"
36 #include "libguile/smob.h"
37 #include "libguile/unif.h"
38 #include "libguile/async.h"
39 #include "libguile/ports.h"
40 #include "libguile/root.h"
41 #include "libguile/strings.h"
42 #include "libguile/vectors.h"
43 #include "libguile/weaks.h"
44 #include "libguile/hashtab.h"
45 #include "libguile/tags.h"
47 #include "libguile/private-gc.h"
48 #include "libguile/validate.h"
49 #include "libguile/deprecation.h"
50 #include "libguile/gc.h"
51 #include "libguile/dynwind.h"
53 #ifdef GUILE_DEBUG_MALLOC
54 #include "libguile/debug-malloc.h"
65 /* Lock this mutex before doing lazy sweeping.
67 scm_i_pthread_mutex_t scm_i_sweep_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
69 /* Set this to != 0 if every cell that is accessed shall be checked:
71 int scm_debug_cell_accesses_p
= 0;
72 int scm_expensive_debug_cell_accesses_p
= 0;
74 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
75 * the number of cell accesses after which a gc shall be called.
77 int scm_debug_cells_gc_interval
= 0;
80 Global variable, so you can switch it off at runtime by setting
81 scm_i_cell_validation_already_running.
83 int scm_i_cell_validation_already_running
;
85 #if (SCM_DEBUG_CELL_ACCESSES == 1)
90 Assert that the given object is a valid reference to a valid cell. This
91 test involves to determine whether the object is a cell pointer, whether
92 this pointer actually points into a heap segment and whether the cell
93 pointed to is not a free cell. Further, additional garbage collections may
94 get executed after a user defined number of cell accesses. This helps to
95 find places in the C code where references are dropped for extremely short
100 scm_i_expensive_validation_check (SCM cell
)
102 if (!scm_in_heap_p (cell
))
104 fprintf (stderr
, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
105 (unsigned long) SCM_UNPACK (cell
));
109 /* If desired, perform additional garbage collections after a user
110 * defined number of cell accesses.
112 if (scm_debug_cells_gc_interval
)
114 static unsigned int counter
= 0;
122 counter
= scm_debug_cells_gc_interval
;
129 scm_assert_cell_valid (SCM cell
)
131 if (!scm_i_cell_validation_already_running
&& scm_debug_cell_accesses_p
)
133 scm_i_cell_validation_already_running
= 1; /* set to avoid recursion */
136 During GC, no user-code should be run, and the guile core
137 should use non-protected accessors.
139 if (scm_gc_running_p
)
143 Only scm_in_heap_p and rescanning the heap is wildly
146 if (scm_expensive_debug_cell_accesses_p
)
147 scm_i_expensive_validation_check (cell
);
149 if (!SCM_GC_MARK_P (cell
))
152 "scm_assert_cell_valid: this object is unmarked. \n"
153 "It has been garbage-collected in the last GC run: "
155 (unsigned long) SCM_UNPACK (cell
));
159 scm_i_cell_validation_already_running
= 0; /* re-enable */
165 SCM_DEFINE (scm_set_debug_cell_accesses_x
, "set-debug-cell-accesses!", 1, 0, 0,
167 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
168 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
169 "but no additional calls to garbage collection are issued.\n"
170 "If @var{flag} is a number, strict cell access checking is enabled,\n"
171 "with an additional garbage collection after the given\n"
172 "number of cell accesses.\n"
173 "This procedure only exists when the compile-time flag\n"
174 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
175 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
177 if (scm_is_false (flag
))
179 scm_debug_cell_accesses_p
= 0;
181 else if (scm_is_eq (flag
, SCM_BOOL_T
))
183 scm_debug_cells_gc_interval
= 0;
184 scm_debug_cell_accesses_p
= 1;
185 scm_expensive_debug_cell_accesses_p
= 0;
189 scm_debug_cells_gc_interval
= scm_to_signed_integer (flag
, 0, INT_MAX
);
190 scm_debug_cell_accesses_p
= 1;
191 scm_expensive_debug_cell_accesses_p
= 1;
193 return SCM_UNSPECIFIED
;
198 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
204 * is the number of bytes of malloc allocation needed to trigger gc.
206 unsigned long scm_mtrigger
;
208 /* GC Statistics Keeping
210 unsigned long scm_cells_allocated
= 0;
211 unsigned long scm_last_cells_allocated
= 0;
212 unsigned long scm_mallocated
= 0;
214 /* Global GC sweep statistics since the last full GC. */
215 scm_t_sweep_statistics scm_i_gc_sweep_stats
= { 0, 0 };
217 /* Total count of cells marked/swept. */
218 static double scm_gc_cells_marked_acc
= 0.;
219 static double scm_gc_cells_swept_acc
= 0.;
220 static double scm_gc_cells_allocated_acc
= 0.;
222 static unsigned long scm_gc_time_taken
= 0;
223 static unsigned long scm_gc_mark_time_taken
= 0;
225 static unsigned long scm_gc_times
= 0;
227 static int scm_gc_cell_yield_percentage
= 0;
228 static unsigned long protected_obj_count
= 0;
230 /* The following are accessed from `gc-malloc.c' and `gc-card.c'. */
231 int scm_gc_malloc_yield_percentage
= 0;
232 unsigned long scm_gc_malloc_collected
= 0;
235 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
236 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
237 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
238 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
239 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
240 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
241 SCM_SYMBOL (sym_gc_mark_time_taken
, "gc-mark-time-taken");
242 SCM_SYMBOL (sym_times
, "gc-times");
243 SCM_SYMBOL (sym_cells_marked
, "cells-marked");
244 SCM_SYMBOL (sym_cells_swept
, "cells-swept");
245 SCM_SYMBOL (sym_malloc_yield
, "malloc-yield");
246 SCM_SYMBOL (sym_cell_yield
, "cell-yield");
247 SCM_SYMBOL (sym_protected_objects
, "protected-objects");
248 SCM_SYMBOL (sym_total_cells_allocated
, "total-cells-allocated");
251 /* Number of calls to SCM_NEWCELL since startup. */
252 unsigned scm_newcell_count
;
253 unsigned scm_newcell2_count
;
256 /* {Scheme Interface to GC}
259 tag_table_to_type_alist (void *closure
, SCM key
, SCM val
, SCM acc
)
261 if (scm_is_integer (key
))
263 int c_tag
= scm_to_int (key
);
265 char const * name
= scm_i_tag_name (c_tag
);
268 key
= scm_from_locale_string (name
);
273 sprintf (s
, "tag %d", c_tag
);
274 key
= scm_from_locale_string (s
);
278 return scm_cons (scm_cons (key
, val
), acc
);
281 SCM_DEFINE (scm_gc_live_object_stats
, "gc-live-object-stats", 0, 0, 0,
283 "Return an alist of statistics of the current live objects. ")
284 #define FUNC_NAME s_scm_gc_live_object_stats
286 SCM tab
= scm_make_hash_table (scm_from_int (57));
289 scm_i_all_segments_statistics (tab
);
292 = scm_internal_hash_fold (&tag_table_to_type_alist
, NULL
, SCM_EOL
, tab
);
298 extern int scm_gc_malloc_yield_percentage
;
299 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
301 "Return an association list of statistics about Guile's current\n"
303 #define FUNC_NAME s_scm_gc_stats
306 SCM heap_segs
= SCM_EOL
;
307 unsigned long int local_scm_mtrigger
;
308 unsigned long int local_scm_mallocated
;
309 unsigned long int local_scm_heap_size
;
310 int local_scm_gc_cell_yield_percentage
;
311 int local_scm_gc_malloc_yield_percentage
;
312 unsigned long int local_scm_cells_allocated
;
313 unsigned long int local_scm_gc_time_taken
;
314 unsigned long int local_scm_gc_times
;
315 unsigned long int local_scm_gc_mark_time_taken
;
316 unsigned long int local_protected_obj_count
;
317 double local_scm_gc_cells_swept
;
318 double local_scm_gc_cells_marked
;
319 double local_scm_total_cells_allocated
;
321 unsigned long *bounds
= 0;
323 SCM_CRITICAL_SECTION_START
;
325 bounds
= scm_i_segment_table_info (&table_size
);
327 /* Below, we cons to produce the resulting list. We want a snapshot of
328 * the heap situation before consing.
330 local_scm_mtrigger
= scm_mtrigger
;
331 local_scm_mallocated
= scm_mallocated
;
332 local_scm_heap_size
=
333 (scm_i_master_freelist
.heap_total_cells
+ scm_i_master_freelist2
.heap_total_cells
);
335 local_scm_cells_allocated
=
336 scm_cells_allocated
+ scm_i_gc_sweep_stats
.collected
;
338 local_scm_gc_time_taken
= scm_gc_time_taken
;
339 local_scm_gc_mark_time_taken
= scm_gc_mark_time_taken
;
340 local_scm_gc_times
= scm_gc_times
;
341 local_scm_gc_malloc_yield_percentage
= scm_gc_malloc_yield_percentage
;
342 local_scm_gc_cell_yield_percentage
= scm_gc_cell_yield_percentage
;
343 local_protected_obj_count
= protected_obj_count
;
344 local_scm_gc_cells_swept
=
345 (double) scm_gc_cells_swept_acc
346 + (double) scm_i_gc_sweep_stats
.swept
;
347 local_scm_gc_cells_marked
= scm_gc_cells_marked_acc
348 +(double) scm_i_gc_sweep_stats
.swept
349 -(double) scm_i_gc_sweep_stats
.collected
;
351 local_scm_total_cells_allocated
= scm_gc_cells_allocated_acc
352 + (double) scm_i_gc_sweep_stats
.collected
;
354 for (i
= table_size
; i
--;)
356 heap_segs
= scm_cons (scm_cons (scm_from_ulong (bounds
[2*i
]),
357 scm_from_ulong (bounds
[2*i
+1])),
361 /* njrev: can any of these scm_cons's or scm_list_n signal a memory
362 error? If so we need a frame here. */
364 scm_list_n (scm_cons (sym_gc_time_taken
,
365 scm_from_ulong (local_scm_gc_time_taken
)),
366 scm_cons (sym_cells_allocated
,
367 scm_from_ulong (local_scm_cells_allocated
)),
368 scm_cons (sym_total_cells_allocated
,
369 scm_from_double (local_scm_total_cells_allocated
)),
370 scm_cons (sym_heap_size
,
371 scm_from_ulong (local_scm_heap_size
)),
372 scm_cons (sym_mallocated
,
373 scm_from_ulong (local_scm_mallocated
)),
374 scm_cons (sym_mtrigger
,
375 scm_from_ulong (local_scm_mtrigger
)),
377 scm_from_ulong (local_scm_gc_times
)),
378 scm_cons (sym_gc_mark_time_taken
,
379 scm_from_ulong (local_scm_gc_mark_time_taken
)),
380 scm_cons (sym_cells_marked
,
381 scm_from_double (local_scm_gc_cells_marked
)),
382 scm_cons (sym_cells_swept
,
383 scm_from_double (local_scm_gc_cells_swept
)),
384 scm_cons (sym_malloc_yield
,
385 scm_from_long(local_scm_gc_malloc_yield_percentage
)),
386 scm_cons (sym_cell_yield
,
387 scm_from_long (local_scm_gc_cell_yield_percentage
)),
388 scm_cons (sym_protected_objects
,
389 scm_from_ulong (local_protected_obj_count
)),
390 scm_cons (sym_heap_segments
, heap_segs
),
392 SCM_CRITICAL_SECTION_END
;
400 Update nice-to-know-statistics.
405 /* CELLS SWEPT is another word for the number of cells that were examined
406 during GC. YIELD is the number that we cleaned out. MARKED is the number
407 that weren't cleaned. */
408 scm_gc_cell_yield_percentage
= (scm_i_gc_sweep_stats
.collected
* 100) /
409 (scm_i_master_freelist
.heap_total_cells
+ scm_i_master_freelist2
.heap_total_cells
);
411 scm_gc_cells_allocated_acc
+=
412 (double) scm_i_gc_sweep_stats
.collected
;
413 scm_gc_cells_marked_acc
+= (double) scm_cells_allocated
;
414 scm_gc_cells_swept_acc
+= (double) scm_i_gc_sweep_stats
.swept
;
419 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
421 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
422 "returned by this function for @var{obj}")
423 #define FUNC_NAME s_scm_object_address
425 return scm_from_ulong (SCM_UNPACK (obj
));
430 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
432 "Scans all of SCM objects and reclaims for further use those that are\n"
433 "no longer accessible.")
434 #define FUNC_NAME s_scm_gc
436 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex
);
437 scm_gc_running_p
= 1;
439 /* njrev: It looks as though other places, e.g. scm_realloc,
440 can call scm_i_gc without acquiring the sweep mutex. Does this
441 matter? Also scm_i_gc (or its descendants) touch the
442 scm_sys_protects, which are protected in some cases
443 (e.g. scm_permobjs above in scm_gc_stats) by a critical section,
444 not by the sweep mutex. Shouldn't all the GC-relevant objects be
445 protected in the same way? */
446 scm_gc_running_p
= 0;
447 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex
);
448 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
449 return SCM_UNSPECIFIED
;
456 /* The master is global and common while the freelist will be
457 * individual for each thread.
461 scm_gc_for_newcell (scm_t_cell_type_statistics
*freelist
, SCM
*free_cells
)
466 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex
);
467 scm_gc_running_p
= 1;
469 *free_cells
= scm_i_sweep_for_freelist (freelist
);
470 if (*free_cells
== SCM_EOL
)
472 float delta
= scm_i_gc_heap_size_delta (freelist
);
475 size_t bytes
= ((unsigned long) delta
) * sizeof (scm_t_cell
);
476 freelist
->heap_segment_idx
=
477 scm_i_get_new_heap_segment (freelist
, bytes
, abort_on_error
);
479 *free_cells
= scm_i_sweep_for_freelist (freelist
);
483 if (*free_cells
== SCM_EOL
)
486 out of fresh cells. Try to get some new ones.
488 char reason
[] = "0-cells";
489 reason
[0] += freelist
->span
;
494 *free_cells
= scm_i_sweep_for_freelist (freelist
);
497 if (*free_cells
== SCM_EOL
)
500 failed getting new cells. Get new juice or die.
502 float delta
= scm_i_gc_heap_size_delta (freelist
);
503 assert (delta
> 0.0);
504 size_t bytes
= ((unsigned long) delta
) * sizeof (scm_t_cell
);
505 freelist
->heap_segment_idx
=
506 scm_i_get_new_heap_segment (freelist
, bytes
, abort_on_error
);
508 *free_cells
= scm_i_sweep_for_freelist (freelist
);
511 if (*free_cells
== SCM_EOL
)
516 *free_cells
= SCM_FREE_CELL_CDR (cell
);
518 scm_gc_running_p
= 0;
519 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex
);
522 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
528 scm_t_c_hook scm_before_gc_c_hook
;
529 scm_t_c_hook scm_before_mark_c_hook
;
530 scm_t_c_hook scm_before_sweep_c_hook
;
531 scm_t_c_hook scm_after_sweep_c_hook
;
532 scm_t_c_hook scm_after_gc_c_hook
;
535 scm_check_deprecated_memory_return()
537 if (scm_mallocated
< scm_i_deprecated_memory_return
)
539 /* The byte count of allocated objects has underflowed. This is
540 probably because you forgot to report the sizes of objects you
541 have allocated, by calling scm_done_malloc or some such. When
542 the GC freed them, it subtracted their size from
543 scm_mallocated, which underflowed. */
545 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
546 "This is probably because the GC hasn't been correctly informed\n"
547 "about object sizes\n");
550 scm_mallocated
-= scm_i_deprecated_memory_return
;
551 scm_i_deprecated_memory_return
= 0;
554 /* Must be called while holding scm_i_sweep_mutex.
556 This function is fairly long, but it touches various global
557 variables. To not obscure the side effects on global variables,
558 this function has not been split up.
561 scm_i_gc (const char *what
)
563 unsigned long t_before_gc
= 0;
565 scm_i_thread_put_to_sleep ();
567 scm_c_hook_run (&scm_before_gc_c_hook
, 0);
570 fprintf (stderr
,"gc reason %s\n", what
);
572 scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist
))
574 : (scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist2
)) ? "o" : "m"));
577 t_before_gc
= scm_c_get_internal_run_time ();
578 scm_gc_malloc_collected
= 0;
581 Set freelists to NULL so scm_cons() always triggers gc, causing
582 the assertion above to fail.
584 *SCM_FREELIST_LOC (scm_i_freelist
) = SCM_EOL
;
585 *SCM_FREELIST_LOC (scm_i_freelist2
) = SCM_EOL
;
588 Let's finish the sweep. The conservative GC might point into the
589 garbage, and marking that would create a mess.
591 scm_i_sweep_all_segments ("GC", &scm_i_gc_sweep_stats
);
592 scm_check_deprecated_memory_return();
594 /* Sanity check our numbers. */
595 assert (scm_cells_allocated
== scm_i_marked_count ());
596 assert (scm_i_gc_sweep_stats
.swept
597 == (scm_i_master_freelist
.heap_total_cells
598 + scm_i_master_freelist2
.heap_total_cells
));
599 assert (scm_i_gc_sweep_stats
.collected
+ scm_cells_allocated
600 == scm_i_gc_sweep_stats
.swept
);
603 scm_c_hook_run (&scm_before_mark_c_hook
, 0);
606 scm_gc_mark_time_taken
+= (scm_c_get_internal_run_time () - t_before_gc
);
608 scm_cells_allocated
= scm_i_marked_count ();
612 TODO: the after_sweep hook should probably be moved to just before
613 the mark, since that's where the sweep is finished in lazy
616 MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
617 original meaning implied at least two things: that it would be
620 1. the freelist is re-initialized (no evaluation possible, though)
624 2. the heap is "fresh"
625 (it is well-defined what data is used and what is not)
627 Neither of these conditions would hold just before the mark phase.
629 Of course, the lazy sweeping has muddled the distinction between
630 scm_before_sweep_c_hook and scm_after_sweep_c_hook, but even if
631 there were no difference, it would still be useful to have two
632 distinct classes of hook functions since this can prevent some
633 bad interference when several modules adds gc hooks.
635 scm_c_hook_run (&scm_before_sweep_c_hook
, 0);
638 Nothing here: lazy sweeping.
640 scm_i_reset_segments ();
642 *SCM_FREELIST_LOC (scm_i_freelist
) = SCM_EOL
;
643 *SCM_FREELIST_LOC (scm_i_freelist2
) = SCM_EOL
;
645 /* Invalidate the freelists of other threads. */
646 scm_i_thread_invalidate_freelists ();
647 assert(scm_cells_allocated
== scm_i_marked_count ());
649 scm_c_hook_run (&scm_after_sweep_c_hook
, 0);
652 assert(scm_cells_allocated
== scm_i_marked_count ());
654 scm_i_gc_sweep_stats
.collected
= scm_i_gc_sweep_stats
.swept
= 0;
655 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist
);
656 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2
);
658 /* Arguably, this statistic is fairly useless: marking will dominate
661 scm_gc_time_taken
+= (scm_c_get_internal_run_time () - t_before_gc
);
662 assert(scm_cells_allocated
== scm_i_marked_count ());
663 scm_i_thread_wake_up ();
665 For debugging purposes, you could do
666 scm_i_sweep_all_segments("debug"), but then the remains of the
667 cell aren't left to analyse.
673 /* {GC Protection Helper Functions}
678 * If within a function you need to protect one or more scheme objects from
679 * garbage collection, pass them as parameters to one of the
680 * scm_remember_upto_here* functions below. These functions don't do
681 * anything, but since the compiler does not know that they are actually
682 * no-ops, it will generate code that calls these functions with the given
683 * parameters. Therefore, you can be sure that the compiler will keep those
684 * scheme values alive (on the stack or in a register) up to the point where
685 * scm_remember_upto_here* is called. In other words, place the call to
686 * scm_remember_upto_here* _behind_ the last code in your function, that
687 * depends on the scheme object to exist.
689 * Example: We want to make sure that the string object str does not get
690 * garbage collected during the execution of 'some_function' in the code
691 * below, because otherwise the characters belonging to str would be freed and
692 * 'some_function' might access freed memory. To make sure that the compiler
693 * keeps str alive on the stack or in a register such that it is visible to
694 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
695 * call to 'some_function'. Note that this would not be necessary if str was
696 * used anyway after the call to 'some_function'.
697 * char *chars = scm_i_string_chars (str);
698 * some_function (chars);
699 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
702 /* Remove any macro versions of these while defining the functions.
703 Functions are always included in the library, for upward binary
704 compatibility and in case combinations of GCC and non-GCC are used. */
705 #undef scm_remember_upto_here_1
706 #undef scm_remember_upto_here_2
709 scm_remember_upto_here_1 (SCM obj SCM_UNUSED
)
711 /* Empty. Protects a single object from garbage collection. */
715 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED
, SCM obj2 SCM_UNUSED
)
717 /* Empty. Protects two objects from garbage collection. */
721 scm_remember_upto_here (SCM obj SCM_UNUSED
, ...)
723 /* Empty. Protects any number of objects from garbage collection. */
727 These crazy functions prevent garbage collection
728 of arguments after the first argument by
729 ensuring they remain live throughout the
730 function because they are used in the last
731 line of the code block.
732 It'd be better to have a nice compiler hint to
733 aid the conservative stack-scanning GC. --03/09/00 gjb */
735 scm_return_first (SCM elt
, ...)
741 scm_return_first_int (int i
, ...)
748 scm_permanent_object (SCM obj
)
750 SCM cell
= scm_cons (obj
, SCM_EOL
);
751 SCM_CRITICAL_SECTION_START
;
752 SCM_SETCDR (cell
, scm_permobjs
);
754 SCM_CRITICAL_SECTION_END
;
759 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
760 other references are dropped, until the object is unprotected by calling
761 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
762 i. e. it is possible to protect the same object several times, but it is
763 necessary to unprotect the object the same number of times to actually get
764 the object unprotected. It is an error to unprotect an object more often
765 than it has been protected before. The function scm_protect_object returns
769 /* Implementation note: For every object X, there is a counter which
770 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
776 scm_gc_protect_object (SCM obj
)
780 /* This critical section barrier will be replaced by a mutex. */
781 /* njrev: Indeed; if my comment above is correct, there is the same
782 critsec/mutex inconsistency here. */
783 SCM_CRITICAL_SECTION_START
;
785 handle
= scm_hashq_create_handle_x (scm_protects
, obj
, scm_from_int (0));
786 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), scm_from_int (1)));
788 protected_obj_count
++;
790 SCM_CRITICAL_SECTION_END
;
796 /* Remove any protection for OBJ established by a prior call to
797 scm_protect_object. This function returns OBJ.
799 See scm_protect_object for more information. */
801 scm_gc_unprotect_object (SCM obj
)
805 /* This critical section barrier will be replaced by a mutex. */
806 /* njrev: and again. */
807 SCM_CRITICAL_SECTION_START
;
809 if (scm_gc_running_p
)
811 fprintf (stderr
, "scm_unprotect_object called during GC.\n");
815 handle
= scm_hashq_get_handle (scm_protects
, obj
);
817 if (scm_is_false (handle
))
819 fprintf (stderr
, "scm_unprotect_object called on unprotected object\n");
824 SCM count
= scm_difference (SCM_CDR (handle
), scm_from_int (1));
825 if (scm_is_eq (count
, scm_from_int (0)))
826 scm_hashq_remove_x (scm_protects
, obj
);
828 SCM_SETCDR (handle
, count
);
830 protected_obj_count
--;
832 SCM_CRITICAL_SECTION_END
;
838 scm_gc_register_root (SCM
*p
)
841 SCM key
= scm_from_ulong ((unsigned long) p
);
843 /* This critical section barrier will be replaced by a mutex. */
844 /* njrev: and again. */
845 SCM_CRITICAL_SECTION_START
;
847 handle
= scm_hashv_create_handle_x (scm_gc_registered_roots
, key
,
849 /* njrev: note also that the above can probably signal an error */
850 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), scm_from_int (1)));
852 SCM_CRITICAL_SECTION_END
;
856 scm_gc_unregister_root (SCM
*p
)
859 SCM key
= scm_from_ulong ((unsigned long) p
);
861 /* This critical section barrier will be replaced by a mutex. */
862 /* njrev: and again. */
863 SCM_CRITICAL_SECTION_START
;
865 handle
= scm_hashv_get_handle (scm_gc_registered_roots
, key
);
867 if (scm_is_false (handle
))
869 fprintf (stderr
, "scm_gc_unregister_root called on unregistered root\n");
874 SCM count
= scm_difference (SCM_CDR (handle
), scm_from_int (1));
875 if (scm_is_eq (count
, scm_from_int (0)))
876 scm_hashv_remove_x (scm_gc_registered_roots
, key
);
878 SCM_SETCDR (handle
, count
);
881 SCM_CRITICAL_SECTION_END
;
885 scm_gc_register_roots (SCM
*b
, unsigned long n
)
888 for (; p
< b
+ n
; ++p
)
889 scm_gc_register_root (p
);
893 scm_gc_unregister_roots (SCM
*b
, unsigned long n
)
896 for (; p
< b
+ n
; ++p
)
897 scm_gc_unregister_root (p
);
900 int scm_i_terminating
;
906 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
909 /* Get an integer from an environment variable. */
911 scm_getenv_int (const char *var
, int def
)
914 char *val
= getenv (var
);
918 res
= strtol (val
, &end
, 10);
925 scm_storage_prehistory ()
927 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
928 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
929 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
930 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
931 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
934 scm_i_pthread_mutex_t scm_i_gc_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
941 j
= SCM_NUM_PROTECTS
;
943 scm_sys_protects
[--j
] = SCM_BOOL_F
;
945 scm_gc_init_freelist();
946 scm_gc_init_malloc ();
949 /* We can't have a cleanup handler since we have no thread to run it
956 on_exit (cleanup
, 0);
962 scm_stand_in_procs
= scm_make_weak_key_hash_table (scm_from_int (257));
963 scm_permobjs
= SCM_EOL
;
964 scm_protects
= scm_c_make_hash_table (31);
965 scm_gc_registered_roots
= scm_c_make_hash_table (31);
972 SCM scm_after_gc_hook
;
976 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
977 * is run after the gc, as soon as the asynchronous events are handled by the
981 gc_async_thunk (void)
983 scm_c_run_hook (scm_after_gc_hook
, SCM_EOL
);
984 return SCM_UNSPECIFIED
;
988 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
989 * the garbage collection. The only purpose of this function is to mark the
990 * gc_async (which will eventually lead to the execution of the
994 mark_gc_async (void * hook_data SCM_UNUSED
,
995 void *fn_data SCM_UNUSED
,
996 void *data SCM_UNUSED
)
998 /* If cell access debugging is enabled, the user may choose to perform
999 * additional garbage collections after an arbitrary number of cell
1000 * accesses. We don't want the scheme level after-gc-hook to be performed
1001 * for each of these garbage collections for the following reason: The
1002 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
1003 * after-gc-hook was performed with every gc, and if the gc was performed
1004 * after a very small number of cell accesses, then the number of cell
1005 * accesses during the execution of the after-gc-hook will suffice to cause
1006 * the execution of the next gc. Then, guile would keep executing the
1007 * after-gc-hook over and over again, and would never come to do other
1010 * To overcome this problem, if cell access debugging with additional
1011 * garbage collections is enabled, the after-gc-hook is never run by the
1012 * garbage collecter. When running guile with cell access debugging and the
1013 * execution of the after-gc-hook is desired, then it is necessary to run
1014 * the hook explicitly from the user code. This has the effect, that from
1015 * the scheme level point of view it seems that garbage collection is
1016 * performed with a much lower frequency than it actually is. Obviously,
1017 * this will not work for code that depends on a fixed one to one
1018 * relationship between the execution counts of the C level garbage
1019 * collection hooks and the execution count of the scheme level
1023 #if (SCM_DEBUG_CELL_ACCESSES == 1)
1024 if (scm_debug_cells_gc_interval
== 0)
1025 scm_system_async_mark (gc_async
);
1027 scm_system_async_mark (gc_async
);
1036 scm_gc_init_mark ();
1038 scm_after_gc_hook
= scm_permanent_object (scm_make_hook (SCM_INUM0
));
1039 scm_c_define ("after-gc-hook", scm_after_gc_hook
);
1041 gc_async
= scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0
,
1044 scm_c_hook_add (&scm_after_gc_c_hook
, mark_gc_async
, NULL
, 0);
1046 #include "libguile/gc.x"
1051 # include <sys/param.h>
1052 # include <sys/pstat.h>
1054 scm_ia64_register_backing_store_base (void)
1056 struct pst_vm_status vm_status
;
1058 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
1059 if (vm_status
.pst_type
== PS_RSESTACK
)
1060 return (void *) vm_status
.pst_vaddr
;
1064 scm_ia64_ar_bsp (const void *ctx
)
1067 __uc_get_ar_bsp(ctx
, &bsp
);
1068 return (void *) bsp
;
1072 # include <ucontext.h>
1074 scm_ia64_register_backing_store_base (void)
1076 extern void *__libc_ia64_register_backing_store_base
;
1077 return __libc_ia64_register_backing_store_base
;
1080 scm_ia64_ar_bsp (const void *opaque
)
1082 const ucontext_t
*ctx
= opaque
;
1083 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
1086 #endif /* __ia64__ */
1090 #define FUNC_NAME "scm_gc_sweep"