1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 /* #define DEBUGINFO */
33 extern unsigned long * __libc_ia64_register_backing_store_base
;
36 #include "libguile/_scm.h"
37 #include "libguile/eval.h"
38 #include "libguile/stime.h"
39 #include "libguile/stackchk.h"
40 #include "libguile/struct.h"
41 #include "libguile/smob.h"
42 #include "libguile/unif.h"
43 #include "libguile/async.h"
44 #include "libguile/ports.h"
45 #include "libguile/root.h"
46 #include "libguile/strings.h"
47 #include "libguile/vectors.h"
48 #include "libguile/weaks.h"
49 #include "libguile/hashtab.h"
50 #include "libguile/tags.h"
52 #include "libguile/private-gc.h"
53 #include "libguile/validate.h"
54 #include "libguile/deprecation.h"
55 #include "libguile/gc.h"
56 #include "libguile/dynwind.h"
60 #ifdef GUILE_DEBUG_MALLOC
61 #include "libguile/debug-malloc.h"
72 /* Lock this mutex before doing lazy sweeping.
74 scm_i_pthread_mutex_t scm_i_sweep_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
76 /* Set this to != 0 if every cell that is accessed shall be checked:
78 int scm_debug_cell_accesses_p
= 0;
79 int scm_expensive_debug_cell_accesses_p
= 0;
81 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
82 * the number of cell accesses after which a gc shall be called.
84 int scm_debug_cells_gc_interval
= 0;
87 Global variable, so you can switch it off at runtime by setting
88 scm_i_cell_validation_already_running.
90 int scm_i_cell_validation_already_running
;
92 #if (SCM_DEBUG_CELL_ACCESSES == 1)
97 Assert that the given object is a valid reference to a valid cell. This
98 test involves to determine whether the object is a cell pointer, whether
99 this pointer actually points into a heap segment and whether the cell
100 pointed to is not a free cell. Further, additional garbage collections may
101 get executed after a user defined number of cell accesses. This helps to
102 find places in the C code where references are dropped for extremely short
107 scm_i_expensive_validation_check (SCM cell
)
109 if (!scm_in_heap_p (cell
))
111 fprintf (stderr
, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
112 (unsigned long) SCM_UNPACK (cell
));
116 /* If desired, perform additional garbage collections after a user
117 * defined number of cell accesses.
119 if (scm_debug_cells_gc_interval
)
121 static unsigned int counter
= 0;
129 counter
= scm_debug_cells_gc_interval
;
136 scm_assert_cell_valid (SCM cell
)
138 if (!scm_i_cell_validation_already_running
&& scm_debug_cell_accesses_p
)
140 scm_i_cell_validation_already_running
= 1; /* set to avoid recursion */
143 During GC, no user-code should be run, and the guile core
144 should use non-protected accessors.
146 if (scm_gc_running_p
)
150 Only scm_in_heap_p and rescanning the heap is wildly
153 if (scm_expensive_debug_cell_accesses_p
)
154 scm_i_expensive_validation_check (cell
);
156 if (!SCM_GC_MARK_P (cell
))
159 "scm_assert_cell_valid: this object is unmarked. \n"
160 "It has been garbage-collected in the last GC run: "
162 (unsigned long) SCM_UNPACK (cell
));
166 scm_i_cell_validation_already_running
= 0; /* re-enable */
172 SCM_DEFINE (scm_set_debug_cell_accesses_x
, "set-debug-cell-accesses!", 1, 0, 0,
174 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
175 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
176 "but no additional calls to garbage collection are issued.\n"
177 "If @var{flag} is a number, strict cell access checking is enabled,\n"
178 "with an additional garbage collection after the given\n"
179 "number of cell accesses.\n"
180 "This procedure only exists when the compile-time flag\n"
181 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
182 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
184 if (scm_is_false (flag
))
186 scm_debug_cell_accesses_p
= 0;
188 else if (scm_is_eq (flag
, SCM_BOOL_T
))
190 scm_debug_cells_gc_interval
= 0;
191 scm_debug_cell_accesses_p
= 1;
192 scm_expensive_debug_cell_accesses_p
= 0;
196 scm_debug_cells_gc_interval
= scm_to_signed_integer (flag
, 0, INT_MAX
);
197 scm_debug_cell_accesses_p
= 1;
198 scm_expensive_debug_cell_accesses_p
= 1;
200 return SCM_UNSPECIFIED
;
205 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
209 scm_t_c_hook scm_before_gc_c_hook
;
210 scm_t_c_hook scm_before_mark_c_hook
;
211 scm_t_c_hook scm_before_sweep_c_hook
;
212 scm_t_c_hook scm_after_sweep_c_hook
;
213 scm_t_c_hook scm_after_gc_c_hook
;
217 * is the number of bytes of malloc allocation needed to trigger gc.
219 unsigned long scm_mtrigger
;
221 /* GC Statistics Keeping
223 unsigned long scm_cells_allocated
= 0;
224 unsigned long scm_mallocated
= 0;
225 unsigned long scm_gc_cells_collected
;
226 unsigned long scm_gc_cells_collected_1
= 0; /* previous GC yield */
227 unsigned long scm_gc_malloc_collected
;
228 unsigned long scm_gc_ports_collected
;
229 unsigned long scm_gc_time_taken
= 0;
230 static unsigned long t_before_gc
;
231 unsigned long scm_gc_mark_time_taken
= 0;
232 unsigned long scm_gc_times
= 0;
233 unsigned long scm_gc_cells_swept
= 0;
234 double scm_gc_cells_marked_acc
= 0.;
235 double scm_gc_cells_swept_acc
= 0.;
236 int scm_gc_cell_yield_percentage
=0;
237 int scm_gc_malloc_yield_percentage
= 0;
239 static unsigned long protected_obj_count
= 0;
242 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
243 SCM_SYMBOL (sym_heap_size
, "heap-size");
244 SCM_SYMBOL (sym_heap_free_size
, "heap-free-size");
245 SCM_SYMBOL (sym_heap_total_allocated
, "heap-total-allocated");
246 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
247 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
248 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
249 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
250 SCM_SYMBOL (sym_gc_mark_time_taken
, "gc-mark-time-taken");
251 SCM_SYMBOL (sym_times
, "gc-times");
252 SCM_SYMBOL (sym_cells_marked
, "cells-marked");
253 SCM_SYMBOL (sym_cells_swept
, "cells-swept");
254 SCM_SYMBOL (sym_malloc_yield
, "malloc-yield");
255 SCM_SYMBOL (sym_cell_yield
, "cell-yield");
256 SCM_SYMBOL (sym_protected_objects
, "protected-objects");
261 /* Number of calls to SCM_NEWCELL since startup. */
262 unsigned scm_newcell_count
;
263 unsigned scm_newcell2_count
;
266 /* {Scheme Interface to GC}
269 tag_table_to_type_alist (void *closure
, SCM key
, SCM val
, SCM acc
)
271 if (scm_is_integer (key
))
273 int c_tag
= scm_to_int (key
);
275 char const * name
= scm_i_tag_name (c_tag
);
278 key
= scm_from_locale_string (name
);
283 sprintf (s
, "tag %d", c_tag
);
284 key
= scm_from_locale_string (s
);
288 return scm_cons (scm_cons (key
, val
), acc
);
291 SCM_DEFINE (scm_gc_live_object_stats
, "gc-live-object-stats", 0, 0, 0,
293 "Return an alist of statistics of the current live objects. ")
294 #define FUNC_NAME s_scm_gc_live_object_stats
296 SCM tab
= scm_make_hash_table (scm_from_int (57));
300 = scm_internal_hash_fold (&tag_table_to_type_alist
, NULL
, SCM_EOL
, tab
);
306 extern int scm_gc_malloc_yield_percentage
;
307 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
309 "Return an association list of statistics about Guile's current\n"
311 #define FUNC_NAME s_scm_gc_stats
314 size_t heap_size
, free_bytes
, bytes_since_gc
, total_bytes
;
317 heap_size
= GC_get_heap_size ();
318 free_bytes
= GC_get_free_bytes ();
319 bytes_since_gc
= GC_get_bytes_since_gc ();
320 total_bytes
= GC_get_total_bytes ();
323 /* njrev: can any of these scm_cons's or scm_list_n signal a memory
324 error? If so we need a frame here. */
326 scm_list_n (scm_cons (sym_gc_time_taken
, SCM_INUM0
),
328 scm_cons (sym_cells_allocated
,
329 scm_from_ulong (local_scm_cells_allocated
)),
330 scm_cons (sym_mallocated
,
331 scm_from_ulong (local_scm_mallocated
)),
332 scm_cons (sym_mtrigger
,
333 scm_from_ulong (local_scm_mtrigger
)),
334 scm_cons (sym_gc_mark_time_taken
,
335 scm_from_ulong (local_scm_gc_mark_time_taken
)),
336 scm_cons (sym_cells_marked
,
337 scm_from_double (local_scm_gc_cells_marked
)),
338 scm_cons (sym_cells_swept
,
339 scm_from_double (local_scm_gc_cells_swept
)),
340 scm_cons (sym_malloc_yield
,
341 scm_from_long(local_scm_gc_malloc_yield_percentage
)),
342 scm_cons (sym_cell_yield
,
343 scm_from_long (local_scm_gc_cell_yield_percentage
)),
344 scm_cons (sym_heap_segments
, heap_segs
),
346 scm_cons (sym_heap_size
, scm_from_size_t (heap_size
)),
347 scm_cons (sym_heap_free_size
, scm_from_size_t (free_bytes
)),
348 scm_cons (sym_heap_total_allocated
,
349 scm_from_size_t (total_bytes
)),
350 scm_cons (sym_protected_objects
,
351 scm_from_ulong (protected_obj_count
)),
352 scm_cons (sym_times
, scm_from_size_t (gc_times
)),
362 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
364 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
365 "returned by this function for @var{obj}")
366 #define FUNC_NAME s_scm_object_address
368 return scm_from_ulong (SCM_UNPACK (obj
));
373 SCM_DEFINE (scm_gc_disable
, "gc-disable", 0, 0, 0,
375 "Disables the garbage collector. Nested calls are permitted. "
376 "GC is re-enabled once @code{gc-enable} has been called the "
377 "same number of times @code{gc-disable} was called.")
378 #define FUNC_NAME s_scm_gc_disable
381 return SCM_UNSPECIFIED
;
385 SCM_DEFINE (scm_gc_enable
, "gc-enable", 0, 0, 0,
387 "Enables the garbage collector.")
388 #define FUNC_NAME s_scm_gc_enable
391 return SCM_UNSPECIFIED
;
396 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
398 "Scans all of SCM objects and reclaims for further use those that are\n"
399 "no longer accessible.")
400 #define FUNC_NAME s_scm_gc
402 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex
);
403 scm_gc_running_p
= 1;
405 /* njrev: It looks as though other places, e.g. scm_realloc,
406 can call scm_i_gc without acquiring the sweep mutex. Does this
407 matter? Also scm_i_gc (or its descendants) touch the
408 scm_sys_protects, which are protected in some cases
409 (e.g. scm_permobjs above in scm_gc_stats) by a critical section,
410 not by the sweep mutex. Shouldn't all the GC-relevant objects be
411 protected in the same way? */
412 scm_gc_running_p
= 0;
413 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex
);
414 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
415 return SCM_UNSPECIFIED
;
420 scm_i_gc (const char *what
)
427 /* {GC Protection Helper Functions}
432 * If within a function you need to protect one or more scheme objects from
433 * garbage collection, pass them as parameters to one of the
434 * scm_remember_upto_here* functions below. These functions don't do
435 * anything, but since the compiler does not know that they are actually
436 * no-ops, it will generate code that calls these functions with the given
437 * parameters. Therefore, you can be sure that the compiler will keep those
438 * scheme values alive (on the stack or in a register) up to the point where
439 * scm_remember_upto_here* is called. In other words, place the call to
440 * scm_remember_upto_here* _behind_ the last code in your function, that
441 * depends on the scheme object to exist.
443 * Example: We want to make sure that the string object str does not get
444 * garbage collected during the execution of 'some_function' in the code
445 * below, because otherwise the characters belonging to str would be freed and
446 * 'some_function' might access freed memory. To make sure that the compiler
447 * keeps str alive on the stack or in a register such that it is visible to
448 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
449 * call to 'some_function'. Note that this would not be necessary if str was
450 * used anyway after the call to 'some_function'.
451 * char *chars = scm_i_string_chars (str);
452 * some_function (chars);
453 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
456 /* Remove any macro versions of these while defining the functions.
457 Functions are always included in the library, for upward binary
458 compatibility and in case combinations of GCC and non-GCC are used. */
459 #undef scm_remember_upto_here_1
460 #undef scm_remember_upto_here_2
463 scm_remember_upto_here_1 (SCM obj SCM_UNUSED
)
465 /* Empty. Protects a single object from garbage collection. */
469 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED
, SCM obj2 SCM_UNUSED
)
471 /* Empty. Protects two objects from garbage collection. */
475 scm_remember_upto_here (SCM obj SCM_UNUSED
, ...)
477 /* Empty. Protects any number of objects from garbage collection. */
481 These crazy functions prevent garbage collection
482 of arguments after the first argument by
483 ensuring they remain live throughout the
484 function because they are used in the last
485 line of the code block.
486 It'd be better to have a nice compiler hint to
487 aid the conservative stack-scanning GC. --03/09/00 gjb */
489 scm_return_first (SCM elt
, ...)
495 scm_return_first_int (int i
, ...)
502 scm_permanent_object (SCM obj
)
504 SCM cell
= scm_cons (obj
, SCM_EOL
);
505 SCM_CRITICAL_SECTION_START
;
506 SCM_SETCDR (cell
, scm_permobjs
);
508 SCM_CRITICAL_SECTION_END
;
513 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
514 other references are dropped, until the object is unprotected by calling
515 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
516 i. e. it is possible to protect the same object several times, but it is
517 necessary to unprotect the object the same number of times to actually get
518 the object unprotected. It is an error to unprotect an object more often
519 than it has been protected before. The function scm_protect_object returns
523 /* Implementation note: For every object X, there is a counter which
524 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
530 scm_gc_protect_object (SCM obj
)
534 /* This critical section barrier will be replaced by a mutex. */
535 /* njrev: Indeed; if my comment above is correct, there is the same
536 critsec/mutex inconsistency here. */
537 SCM_CRITICAL_SECTION_START
;
539 handle
= scm_hashq_create_handle_x (scm_protects
, obj
, scm_from_int (0));
540 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), scm_from_int (1)));
542 protected_obj_count
++;
544 SCM_CRITICAL_SECTION_END
;
550 /* Remove any protection for OBJ established by a prior call to
551 scm_protect_object. This function returns OBJ.
553 See scm_protect_object for more information. */
555 scm_gc_unprotect_object (SCM obj
)
559 /* This critical section barrier will be replaced by a mutex. */
560 /* njrev: and again. */
561 SCM_CRITICAL_SECTION_START
;
563 if (scm_gc_running_p
)
565 fprintf (stderr
, "scm_unprotect_object called during GC.\n");
569 handle
= scm_hashq_get_handle (scm_protects
, obj
);
571 if (scm_is_false (handle
))
573 fprintf (stderr
, "scm_unprotect_object called on unprotected object\n");
578 SCM count
= scm_difference (SCM_CDR (handle
), scm_from_int (1));
579 if (scm_is_eq (count
, scm_from_int (0)))
580 scm_hashq_remove_x (scm_protects
, obj
);
582 SCM_SETCDR (handle
, count
);
584 protected_obj_count
--;
586 SCM_CRITICAL_SECTION_END
;
592 scm_gc_register_root (SCM
*p
)
595 SCM key
= scm_from_ulong ((unsigned long) p
);
597 /* This critical section barrier will be replaced by a mutex. */
598 /* njrev: and again. */
599 SCM_CRITICAL_SECTION_START
;
601 handle
= scm_hashv_create_handle_x (scm_gc_registered_roots
, key
,
603 /* njrev: note also that the above can probably signal an error */
604 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), scm_from_int (1)));
606 SCM_CRITICAL_SECTION_END
;
610 scm_gc_unregister_root (SCM
*p
)
613 SCM key
= scm_from_ulong ((unsigned long) p
);
615 /* This critical section barrier will be replaced by a mutex. */
616 /* njrev: and again. */
617 SCM_CRITICAL_SECTION_START
;
619 handle
= scm_hashv_get_handle (scm_gc_registered_roots
, key
);
621 if (scm_is_false (handle
))
623 fprintf (stderr
, "scm_gc_unregister_root called on unregistered root\n");
628 SCM count
= scm_difference (SCM_CDR (handle
), scm_from_int (1));
629 if (scm_is_eq (count
, scm_from_int (0)))
630 scm_hashv_remove_x (scm_gc_registered_roots
, key
);
632 SCM_SETCDR (handle
, count
);
635 SCM_CRITICAL_SECTION_END
;
639 scm_gc_register_roots (SCM
*b
, unsigned long n
)
642 for (; p
< b
+ n
; ++p
)
643 scm_gc_register_root (p
);
647 scm_gc_unregister_roots (SCM
*b
, unsigned long n
)
650 for (; p
< b
+ n
; ++p
)
651 scm_gc_unregister_root (p
);
654 int scm_i_terminating
;
660 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
663 /* Get an integer from an environment variable. */
665 scm_getenv_int (const char *var
, int def
)
668 char *val
= getenv (var
);
672 res
= strtol (val
, &end
, 10);
679 scm_storage_prehistory ()
681 GC_all_interior_pointers
= 0;
685 /* We only need to register a displacement for those types for which the
686 higher bits of the type tag are used to store a pointer (that is, a
687 pointer to an 8-octet aligned region). For `scm_tc3_struct', this is
688 handled in `scm_alloc_struct ()'. */
689 GC_REGISTER_DISPLACEMENT (scm_tc3_cons
);
690 GC_REGISTER_DISPLACEMENT (scm_tc3_closure
);
693 if (!GC_is_visible (scm_sys_protects
))
696 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
697 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
698 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
699 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
700 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
703 scm_i_pthread_mutex_t scm_i_gc_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
710 j
= SCM_NUM_PROTECTS
;
712 scm_sys_protects
[--j
] = SCM_BOOL_F
;
714 j
= SCM_HEAP_SEG_SIZE
;
717 /* We can't have a cleanup handler since we have no thread to run it
724 on_exit (cleanup
, 0);
730 scm_stand_in_procs
= scm_make_weak_key_hash_table (scm_from_int (257));
731 scm_permobjs
= SCM_EOL
;
732 scm_protects
= scm_c_make_hash_table (31);
733 scm_gc_registered_roots
= scm_c_make_hash_table (31);
740 SCM scm_after_gc_hook
;
744 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
745 * is run after the gc, as soon as the asynchronous events are handled by the
749 gc_async_thunk (void)
751 scm_c_run_hook (scm_after_gc_hook
, SCM_EOL
);
752 return SCM_UNSPECIFIED
;
756 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
757 * the garbage collection. The only purpose of this function is to mark the
758 * gc_async (which will eventually lead to the execution of the
762 mark_gc_async (void * hook_data SCM_UNUSED
,
763 void *func_data SCM_UNUSED
,
764 void *data SCM_UNUSED
)
766 /* If cell access debugging is enabled, the user may choose to perform
767 * additional garbage collections after an arbitrary number of cell
768 * accesses. We don't want the scheme level after-gc-hook to be performed
769 * for each of these garbage collections for the following reason: The
770 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
771 * after-gc-hook was performed with every gc, and if the gc was performed
772 * after a very small number of cell accesses, then the number of cell
773 * accesses during the execution of the after-gc-hook will suffice to cause
774 * the execution of the next gc. Then, guile would keep executing the
775 * after-gc-hook over and over again, and would never come to do other
778 * To overcome this problem, if cell access debugging with additional
779 * garbage collections is enabled, the after-gc-hook is never run by the
780 * garbage collecter. When running guile with cell access debugging and the
781 * execution of the after-gc-hook is desired, then it is necessary to run
782 * the hook explicitly from the user code. This has the effect, that from
783 * the scheme level point of view it seems that garbage collection is
784 * performed with a much lower frequency than it actually is. Obviously,
785 * this will not work for code that depends on a fixed one to one
786 * relationship between the execution counts of the C level garbage
787 * collection hooks and the execution count of the scheme level
791 #if (SCM_DEBUG_CELL_ACCESSES == 1)
792 if (scm_debug_cells_gc_interval
== 0)
793 scm_system_async_mark (gc_async
);
795 scm_system_async_mark (gc_async
);
802 scm_i_tag_name (scm_t_bits tag
)
806 if (tag
== scm_tc_free_cell
)
810 int k
= 0xff & (tag
>> 8);
811 return (scm_smobs
[k
].name
);
815 switch (tag
) /* 7 bits */
819 case scm_tcs_cons_imcar
:
820 return "cons (immediate car)";
821 case scm_tcs_cons_nimcar
:
822 return "cons (non-immediate car)";
823 case scm_tcs_closures
:
828 return "weak vector";
833 return "compiled closure";
844 case scm_tc16_complex
:
845 return "complex number";
847 case scm_tc16_fraction
:
855 case scm_tc7_stringbuf
:
856 return "string buffer";
861 case scm_tc7_variable
:
871 return "smob"; /* should not occur. */
884 /* `GC_INIT ()' was invoked in `scm_storage_prehistory ()'. */
886 scm_after_gc_hook
= scm_permanent_object (scm_make_hook (SCM_INUM0
));
887 scm_c_define ("after-gc-hook", scm_after_gc_hook
);
889 gc_async
= scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0
,
892 scm_c_hook_add (&scm_after_gc_c_hook
, mark_gc_async
, NULL
, 0);
894 #include "libguile/gc.x"
900 #define FUNC_NAME "scm_gc_sweep"
903 fprintf (stderr
, "%s: doing nothing\n", __FUNCTION__
);