*** empty log message ***
[bpt/guile.git] / libguile / gc.c
... / ...
CommitLineData
1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42
43/* #define DEBUGINFO */
44
45
46#include <stdio.h>
47#include <errno.h>
48#include <string.h>
49#include <assert.h>
50
51#ifdef __ia64__
52#include <ucontext.h>
53extern unsigned long * __libc_ia64_register_backing_store_base;
54#endif
55
56#include "libguile/_scm.h"
57#include "libguile/eval.h"
58#include "libguile/stime.h"
59#include "libguile/stackchk.h"
60#include "libguile/struct.h"
61#include "libguile/smob.h"
62#include "libguile/unif.h"
63#include "libguile/async.h"
64#include "libguile/ports.h"
65#include "libguile/root.h"
66#include "libguile/strings.h"
67#include "libguile/vectors.h"
68#include "libguile/weaks.h"
69#include "libguile/hashtab.h"
70#include "libguile/tags.h"
71
72#include "libguile/private-gc.h"
73#include "libguile/validate.h"
74#include "libguile/deprecation.h"
75#include "libguile/gc.h"
76
77#ifdef GUILE_DEBUG_MALLOC
78#include "libguile/debug-malloc.h"
79#endif
80
81#ifdef HAVE_MALLOC_H
82#include <malloc.h>
83#endif
84
85#ifdef HAVE_UNISTD_H
86#include <unistd.h>
87#endif
88
89
90
91unsigned int scm_gc_running_p = 0;
92
93#if (SCM_DEBUG_CELL_ACCESSES == 1)
94
95/* Set this to != 0 if every cell that is accessed shall be checked:
96 */
97unsigned int scm_debug_cell_accesses_p = 1;
98
99/* Set this to 0 if no additional gc's shall be performed, otherwise set it to
100 * the number of cell accesses after which a gc shall be called.
101 */
102static unsigned int debug_cells_gc_interval = 0;
103
104
105/* Assert that the given object is a valid reference to a valid cell. This
106 * test involves to determine whether the object is a cell pointer, whether
107 * this pointer actually points into a heap segment and whether the cell
108 * pointed to is not a free cell. Further, additional garbage collections may
109 * get executed after a user defined number of cell accesses. This helps to
110 * find places in the C code where references are dropped for extremely short
111 * periods.
112 */
113
114void
115scm_assert_cell_valid (SCM cell)
116{
117 static unsigned int already_running = 0;
118
119 if (!already_running)
120 {
121 already_running = 1; /* set to avoid recursion */
122
123 /*
124 During GC, no user-code should be run, and the guile core should
125 use non-protected accessors.
126 */
127 if (scm_gc_running_p)
128 abort();
129
130 /*
131 Only scm_in_heap_p is wildly expensive.
132 */
133 if (scm_debug_cell_accesses_p)
134 if (!scm_in_heap_p (cell))
135 {
136 fprintf (stderr, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
137 (unsigned long) SCM_UNPACK (cell));
138 abort ();
139 }
140
141 if (!SCM_GC_MARK_P (cell))
142 {
143 fprintf (stderr,
144 "scm_assert_cell_valid: this object is unmarked. \n"
145 "It has been garbage-collected in the last GC run: "
146 "%lux\n",
147 (unsigned long) SCM_UNPACK (cell));
148 abort ();
149 }
150
151
152 /* If desired, perform additional garbage collections after a user
153 * defined number of cell accesses.
154 */
155 if (scm_debug_cell_accesses_p && debug_cells_gc_interval)
156 {
157 static unsigned int counter = 0;
158
159 if (counter != 0)
160 {
161 --counter;
162 }
163 else
164 {
165 counter = debug_cells_gc_interval;
166 scm_igc ("scm_assert_cell_valid");
167 }
168 }
169 already_running = 0; /* re-enable */
170 }
171}
172
173
174SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
175 (SCM flag),
176 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
177 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
178 "but no additional calls to garbage collection are issued.\n"
179 "If @var{flag} is a number, cell access checking is enabled,\n"
180 "with an additional garbage collection after the given\n"
181 "number of cell accesses.\n"
182 "This procedure only exists when the compile-time flag\n"
183 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
184#define FUNC_NAME s_scm_set_debug_cell_accesses_x
185{
186 if (SCM_FALSEP (flag)) {
187 scm_debug_cell_accesses_p = 0;
188 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
189 debug_cells_gc_interval = 0;
190 scm_debug_cell_accesses_p = 1;
191 } else if (SCM_INUMP (flag)) {
192 long int f = SCM_INUM (flag);
193 if (f <= 0) SCM_OUT_OF_RANGE (1, flag);
194 debug_cells_gc_interval = f;
195 scm_debug_cell_accesses_p = 1;
196 } else {
197 SCM_WRONG_TYPE_ARG (1, flag);
198 }
199 return SCM_UNSPECIFIED;
200}
201#undef FUNC_NAME
202#else
203
204/*
205 Provide a stub, so people can use their Scheme code on non-debug
206 versions of GUILE as well.
207 */
208SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
209 (SCM flag),
210 "This function is used to turn on checking for a debug version of GUILE. This version does not support this functionality\n")
211#define FUNC_NAME s_scm_set_debug_cell_accesses_x
212{
213
214 /*
215 do nothing
216 */
217
218 scm_remember_upto_here (flag);
219 return SCM_UNSPECIFIED;
220}
221#undef FUNC_NAME
222
223#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
224
225\f
226
227SCM scm_i_freelist = SCM_EOL;
228SCM scm_i_freelist2 = SCM_EOL;
229
230
231/* scm_mtrigger
232 * is the number of bytes of malloc allocation needed to trigger gc.
233 */
234unsigned long scm_mtrigger;
235
236/* scm_gc_heap_lock
237 * If set, don't expand the heap. Set only during gc, during which no allocation
238 * is supposed to take place anyway.
239 */
240int scm_gc_heap_lock = 0;
241
242/* GC Blocking
243 * Don't pause for collection if this is set -- just
244 * expand the heap.
245 */
246int scm_block_gc = 1;
247
248/* During collection, this accumulates objects holding
249 * weak references.
250 */
251SCM scm_weak_vectors;
252
253/* During collection, this accumulates structures which are to be freed.
254 */
255SCM scm_structs_to_free;
256
257/* GC Statistics Keeping
258 */
259long scm_cells_allocated = 0;
260unsigned long scm_mallocated = 0;
261unsigned long scm_gc_cells_collected;
262unsigned long scm_gc_cells_collected_1 = 0; /* previous GC yield */
263unsigned long scm_gc_malloc_collected;
264unsigned long scm_gc_ports_collected;
265unsigned long scm_gc_time_taken = 0;
266static unsigned long t_before_gc;
267unsigned long scm_gc_mark_time_taken = 0;
268unsigned long scm_gc_times = 0;
269unsigned long scm_gc_cells_swept = 0;
270double scm_gc_cells_marked_acc = 0.;
271double scm_gc_cells_swept_acc = 0.;
272int scm_gc_cell_yield_percentage =0;
273int scm_gc_malloc_yield_percentage = 0;
274
275
276SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
277SCM_SYMBOL (sym_heap_size, "cell-heap-size");
278SCM_SYMBOL (sym_mallocated, "bytes-malloced");
279SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
280SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
281SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
282SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
283SCM_SYMBOL (sym_times, "gc-times");
284SCM_SYMBOL (sym_cells_marked, "cells-marked");
285SCM_SYMBOL (sym_cells_swept, "cells-swept");
286SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
287SCM_SYMBOL (sym_cell_yield, "cell-yield");
288
289
290
291
292/* Number of calls to SCM_NEWCELL since startup. */
293unsigned scm_newcell_count;
294unsigned scm_newcell2_count;
295
296
297/* {Scheme Interface to GC}
298 */
299extern int scm_gc_malloc_yield_percentage;
300SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
301 (),
302 "Return an association list of statistics about Guile's current\n"
303 "use of storage.\n")
304#define FUNC_NAME s_scm_gc_stats
305{
306 long i = 0;
307 SCM heap_segs = SCM_EOL ;
308 unsigned long int local_scm_mtrigger;
309 unsigned long int local_scm_mallocated;
310 unsigned long int local_scm_heap_size;
311 int local_scm_gc_cell_yield_percentage;
312 int local_scm_gc_malloc_yield_percentage;
313 long int local_scm_cells_allocated;
314 unsigned long int local_scm_gc_time_taken;
315 unsigned long int local_scm_gc_times;
316 unsigned long int local_scm_gc_mark_time_taken;
317 double local_scm_gc_cells_swept;
318 double local_scm_gc_cells_marked;
319 SCM answer;
320 unsigned long *bounds = 0;
321 int table_size = scm_i_heap_segment_table_size;
322 SCM_DEFER_INTS;
323
324 /*
325 temporarily store the numbers, so as not to cause GC.
326 */
327
328 bounds = malloc (sizeof (int) * table_size * 2);
329 if (!bounds)
330 abort();
331 for (i = table_size; i--; )
332 {
333 bounds[2*i] = (unsigned long)scm_i_heap_segment_table[i]->bounds[0];
334 bounds[2*i+1] = (unsigned long)scm_i_heap_segment_table[i]->bounds[1];
335 }
336
337
338 /* Below, we cons to produce the resulting list. We want a snapshot of
339 * the heap situation before consing.
340 */
341 local_scm_mtrigger = scm_mtrigger;
342 local_scm_mallocated = scm_mallocated;
343 local_scm_heap_size = SCM_HEAP_SIZE;
344
345 local_scm_cells_allocated = scm_cells_allocated;
346
347 local_scm_gc_time_taken = scm_gc_time_taken;
348 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
349 local_scm_gc_times = scm_gc_times;
350 local_scm_gc_malloc_yield_percentage = scm_gc_malloc_yield_percentage;
351 local_scm_gc_cell_yield_percentage= scm_gc_cell_yield_percentage;
352
353 local_scm_gc_cells_swept =
354 (double) scm_gc_cells_swept_acc
355 + (double) scm_gc_cells_swept;
356 local_scm_gc_cells_marked = scm_gc_cells_marked_acc
357 +(double) scm_gc_cells_swept
358 -(double) scm_gc_cells_collected;
359
360 for (i = table_size; i--;)
361 {
362 heap_segs = scm_cons (scm_cons (scm_ulong2num (bounds[2*i]),
363 scm_ulong2num (bounds[2*i+1])),
364 heap_segs);
365 }
366
367 answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
368 scm_cons (sym_cells_allocated, scm_long2num (local_scm_cells_allocated)),
369 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
370 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
371 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
372 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
373 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
374 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
375 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
376 scm_cons (sym_malloc_yield, scm_long2num (local_scm_gc_malloc_yield_percentage)),
377 scm_cons (sym_cell_yield, scm_long2num (local_scm_gc_cell_yield_percentage)),
378 scm_cons (sym_heap_segments, heap_segs),
379 SCM_UNDEFINED);
380 SCM_ALLOW_INTS;
381
382 free (bounds);
383 return answer;
384}
385#undef FUNC_NAME
386
387static void
388gc_start_stats (const char *what SCM_UNUSED)
389{
390 t_before_gc = scm_c_get_internal_run_time ();
391
392 scm_gc_cells_marked_acc += (double) scm_gc_cells_swept
393 - (double) scm_gc_cells_collected;
394 scm_gc_cells_swept_acc += (double) scm_gc_cells_swept;
395
396 scm_gc_cell_yield_percentage = ( scm_gc_cells_collected * 100 ) / SCM_HEAP_SIZE;
397
398 scm_gc_cells_swept = 0;
399 scm_gc_cells_collected_1 = scm_gc_cells_collected;
400
401 /*
402 CELLS SWEPT is another word for the number of cells that were
403 examined during GC. YIELD is the number that we cleaned
404 out. MARKED is the number that weren't cleaned.
405 */
406 scm_gc_cells_collected = 0;
407 scm_gc_malloc_collected = 0;
408 scm_gc_ports_collected = 0;
409}
410
411static void
412gc_end_stats ()
413{
414 unsigned long t = scm_c_get_internal_run_time ();
415 scm_gc_time_taken += (t - t_before_gc);
416
417 ++scm_gc_times;
418}
419
420
421SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
422 (SCM obj),
423 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
424 "returned by this function for @var{obj}")
425#define FUNC_NAME s_scm_object_address
426{
427 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
428}
429#undef FUNC_NAME
430
431
432SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
433 (),
434 "Scans all of SCM objects and reclaims for further use those that are\n"
435 "no longer accessible.")
436#define FUNC_NAME s_scm_gc
437{
438 SCM_DEFER_INTS;
439 scm_igc ("call");
440 SCM_ALLOW_INTS;
441 return SCM_UNSPECIFIED;
442}
443#undef FUNC_NAME
444
445
446\f
447
448/* When we get POSIX threads support, the master will be global and
449 * common while the freelist will be individual for each thread.
450 */
451
452SCM
453scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
454{
455 SCM cell;
456
457 ++scm_ints_disabled;
458
459 *free_cells = scm_i_sweep_some_segments (freelist);
460 if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
461 {
462 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
463 *free_cells = scm_i_sweep_some_segments (freelist);
464 }
465
466 if (*free_cells == SCM_EOL && !scm_block_gc)
467 {
468 /*
469 with the advent of lazy sweep, GC yield is only know just
470 before doing the GC.
471 */
472 scm_i_adjust_min_yield (freelist);
473
474 /*
475 out of fresh cells. Try to get some new ones.
476 */
477
478 scm_igc ("cells");
479
480 *free_cells = scm_i_sweep_some_segments (freelist);
481 }
482
483 if (*free_cells == SCM_EOL)
484 {
485 /*
486 failed getting new cells. Get new juice or die.
487 */
488 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
489 *free_cells = scm_i_sweep_some_segments (freelist);
490 }
491
492 if (*free_cells == SCM_EOL)
493 abort ();
494
495 cell = *free_cells;
496
497 --scm_ints_disabled;
498
499 *free_cells = SCM_FREE_CELL_CDR (cell);
500 return cell;
501}
502
503
504scm_t_c_hook scm_before_gc_c_hook;
505scm_t_c_hook scm_before_mark_c_hook;
506scm_t_c_hook scm_before_sweep_c_hook;
507scm_t_c_hook scm_after_sweep_c_hook;
508scm_t_c_hook scm_after_gc_c_hook;
509
510void
511scm_igc (const char *what)
512{
513 ++scm_gc_running_p;
514 scm_c_hook_run (&scm_before_gc_c_hook, 0);
515
516#ifdef DEBUGINFO
517 fprintf (stderr,"gc reason %s\n", what);
518
519 fprintf (stderr,
520 SCM_NULLP (scm_i_freelist)
521 ? "*"
522 : (SCM_NULLP (scm_i_freelist2) ? "o" : "m"));
523#endif
524
525 /* During the critical section, only the current thread may run. */
526 SCM_CRITICAL_SECTION_START;
527
528 if (!scm_stack_base || scm_block_gc)
529 {
530 --scm_gc_running_p;
531 return;
532 }
533
534 gc_start_stats (what);
535
536 if (scm_gc_heap_lock)
537 /* We've invoked the collector while a GC is already in progress.
538 That should never happen. */
539 abort ();
540
541 ++scm_gc_heap_lock;
542
543 /*
544 Let's finish the sweep. The conservative GC might point into the
545 garbage, and marking that would create a mess.
546 */
547 scm_i_sweep_all_segments("GC");
548 if (scm_mallocated < scm_i_deprecated_memory_return)
549 {
550 /* The byte count of allocated objects has underflowed. This is
551 probably because you forgot to report the sizes of objects you
552 have allocated, by calling scm_done_malloc or some such. When
553 the GC freed them, it subtracted their size from
554 scm_mallocated, which underflowed. */
555 fprintf (stderr,
556 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
557 "This is probably because the GC hasn't been correctly informed\n"
558 "about object sizes\n");
559 abort ();
560 }
561 scm_mallocated -= scm_i_deprecated_memory_return;
562
563
564
565 scm_c_hook_run (&scm_before_mark_c_hook, 0);
566
567 scm_mark_all ();
568
569 scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
570
571 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
572
573 /*
574 Moved this lock upwards so that we can alloc new heap at the end of a sweep.
575
576 DOCME: why should the heap be locked anyway?
577 */
578 --scm_gc_heap_lock;
579
580 scm_gc_sweep ();
581
582 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
583 gc_end_stats ();
584
585 SCM_CRITICAL_SECTION_END;
586 scm_c_hook_run (&scm_after_gc_c_hook, 0);
587 --scm_gc_running_p;
588}
589
590\f
591
592
593
594
595
596\f
597
598\f
599/* {GC Protection Helper Functions}
600 */
601
602
603/*
604 * If within a function you need to protect one or more scheme objects from
605 * garbage collection, pass them as parameters to one of the
606 * scm_remember_upto_here* functions below. These functions don't do
607 * anything, but since the compiler does not know that they are actually
608 * no-ops, it will generate code that calls these functions with the given
609 * parameters. Therefore, you can be sure that the compiler will keep those
610 * scheme values alive (on the stack or in a register) up to the point where
611 * scm_remember_upto_here* is called. In other words, place the call to
612 * scm_remember_upto_here* _behind_ the last code in your function, that
613 * depends on the scheme object to exist.
614 *
615 * Example: We want to make sure that the string object str does not get
616 * garbage collected during the execution of 'some_function' in the code
617 * below, because otherwise the characters belonging to str would be freed and
618 * 'some_function' might access freed memory. To make sure that the compiler
619 * keeps str alive on the stack or in a register such that it is visible to
620 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
621 * call to 'some_function'. Note that this would not be necessary if str was
622 * used anyway after the call to 'some_function'.
623 * char *chars = SCM_STRING_CHARS (str);
624 * some_function (chars);
625 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
626 */
627
628void
629scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
630{
631 /* Empty. Protects a single object from garbage collection. */
632}
633
634void
635scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
636{
637 /* Empty. Protects two objects from garbage collection. */
638}
639
640void
641scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
642{
643 /* Empty. Protects any number of objects from garbage collection. */
644}
645
646/*
647 These crazy functions prevent garbage collection
648 of arguments after the first argument by
649 ensuring they remain live throughout the
650 function because they are used in the last
651 line of the code block.
652 It'd be better to have a nice compiler hint to
653 aid the conservative stack-scanning GC. --03/09/00 gjb */
654SCM
655scm_return_first (SCM elt, ...)
656{
657 return elt;
658}
659
660int
661scm_return_first_int (int i, ...)
662{
663 return i;
664}
665
666
667SCM
668scm_permanent_object (SCM obj)
669{
670 SCM_REDEFER_INTS;
671 scm_permobjs = scm_cons (obj, scm_permobjs);
672 SCM_REALLOW_INTS;
673 return obj;
674}
675
676
677/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
678 other references are dropped, until the object is unprotected by calling
679 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
680 i. e. it is possible to protect the same object several times, but it is
681 necessary to unprotect the object the same number of times to actually get
682 the object unprotected. It is an error to unprotect an object more often
683 than it has been protected before. The function scm_protect_object returns
684 OBJ.
685*/
686
687/* Implementation note: For every object X, there is a counter which
688 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
689*/
690
691SCM
692scm_gc_protect_object (SCM obj)
693{
694 SCM handle;
695
696 /* This critical section barrier will be replaced by a mutex. */
697 SCM_REDEFER_INTS;
698
699 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
700 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
701
702 SCM_REALLOW_INTS;
703
704 return obj;
705}
706
707
708/* Remove any protection for OBJ established by a prior call to
709 scm_protect_object. This function returns OBJ.
710
711 See scm_protect_object for more information. */
712SCM
713scm_gc_unprotect_object (SCM obj)
714{
715 SCM handle;
716
717 /* This critical section barrier will be replaced by a mutex. */
718 SCM_REDEFER_INTS;
719
720 handle = scm_hashq_get_handle (scm_protects, obj);
721
722 if (SCM_FALSEP (handle))
723 {
724 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
725 abort ();
726 }
727 else
728 {
729 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
730 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
731 scm_hashq_remove_x (scm_protects, obj);
732 else
733 SCM_SETCDR (handle, count);
734 }
735
736 SCM_REALLOW_INTS;
737
738 return obj;
739}
740
741void
742scm_gc_register_root (SCM *p)
743{
744 SCM handle;
745 SCM key = scm_long2num ((long) p);
746
747 /* This critical section barrier will be replaced by a mutex. */
748 SCM_REDEFER_INTS;
749
750 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
751 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
752
753 SCM_REALLOW_INTS;
754}
755
756void
757scm_gc_unregister_root (SCM *p)
758{
759 SCM handle;
760 SCM key = scm_long2num ((long) p);
761
762 /* This critical section barrier will be replaced by a mutex. */
763 SCM_REDEFER_INTS;
764
765 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
766
767 if (SCM_FALSEP (handle))
768 {
769 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
770 abort ();
771 }
772 else
773 {
774 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
775 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
776 scm_hashv_remove_x (scm_gc_registered_roots, key);
777 else
778 SCM_SETCDR (handle, count);
779 }
780
781 SCM_REALLOW_INTS;
782}
783
784void
785scm_gc_register_roots (SCM *b, unsigned long n)
786{
787 SCM *p = b;
788 for (; p < b + n; ++p)
789 scm_gc_register_root (p);
790}
791
792void
793scm_gc_unregister_roots (SCM *b, unsigned long n)
794{
795 SCM *p = b;
796 for (; p < b + n; ++p)
797 scm_gc_unregister_root (p);
798}
799
800int scm_i_terminating;
801
802/* called on process termination. */
803#ifdef HAVE_ATEXIT
804static void
805cleanup (void)
806#else
807#ifdef HAVE_ON_EXIT
808extern int on_exit (void (*procp) (), int arg);
809
810static void
811cleanup (int status, void *arg)
812#else
813#error Dont know how to setup a cleanup handler on your system.
814#endif
815#endif
816{
817 scm_i_terminating = 1;
818 scm_flush_all_ports ();
819}
820
821\f
822
823
824/*
825 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
826 */
827
828/* Get an integer from an environment variable. */
829int
830scm_getenv_int (const char *var, int def)
831{
832 char *end = 0;
833 char *val = getenv (var);
834 long res = def;
835 if (!val)
836 return def;
837 res = strtol (val, &end, 10);
838 if (end == val)
839 return def;
840 return res;
841}
842
843
844int
845scm_init_storage ()
846{
847 size_t j;
848
849 j = SCM_NUM_PROTECTS;
850 while (j)
851 scm_sys_protects[--j] = SCM_BOOL_F;
852 scm_block_gc = 1;
853
854 scm_gc_init_freelist();
855 scm_gc_init_malloc ();
856
857 j = SCM_HEAP_SEG_SIZE;
858
859
860
861 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
862 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
863 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
864 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
865 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
866
867 /* Initialise the list of ports. */
868 scm_port_table = (scm_t_port **)
869 malloc (sizeof (scm_t_port *) * scm_port_table_room);
870 if (!scm_port_table)
871 return 1;
872
873#ifdef HAVE_ATEXIT
874 atexit (cleanup);
875#else
876#ifdef HAVE_ON_EXIT
877 on_exit (cleanup, 0);
878#endif
879#endif
880
881 scm_stand_in_procs = SCM_EOL;
882 scm_permobjs = SCM_EOL;
883 scm_protects = scm_c_make_hash_table (31);
884 scm_gc_registered_roots = scm_c_make_hash_table (31);
885
886 return 0;
887}
888
889\f
890
891SCM scm_after_gc_hook;
892
893static SCM gc_async;
894
895/* The function gc_async_thunk causes the execution of the after-gc-hook. It
896 * is run after the gc, as soon as the asynchronous events are handled by the
897 * evaluator.
898 */
899static SCM
900gc_async_thunk (void)
901{
902 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
903 return SCM_UNSPECIFIED;
904}
905
906
907/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
908 * the garbage collection. The only purpose of this function is to mark the
909 * gc_async (which will eventually lead to the execution of the
910 * gc_async_thunk).
911 */
912static void *
913mark_gc_async (void * hook_data SCM_UNUSED,
914 void *func_data SCM_UNUSED,
915 void *data SCM_UNUSED)
916{
917 /* If cell access debugging is enabled, the user may choose to perform
918 * additional garbage collections after an arbitrary number of cell
919 * accesses. We don't want the scheme level after-gc-hook to be performed
920 * for each of these garbage collections for the following reason: The
921 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
922 * after-gc-hook was performed with every gc, and if the gc was performed
923 * after a very small number of cell accesses, then the number of cell
924 * accesses during the execution of the after-gc-hook will suffice to cause
925 * the execution of the next gc. Then, guile would keep executing the
926 * after-gc-hook over and over again, and would never come to do other
927 * things.
928 *
929 * To overcome this problem, if cell access debugging with additional
930 * garbage collections is enabled, the after-gc-hook is never run by the
931 * garbage collecter. When running guile with cell access debugging and the
932 * execution of the after-gc-hook is desired, then it is necessary to run
933 * the hook explicitly from the user code. This has the effect, that from
934 * the scheme level point of view it seems that garbage collection is
935 * performed with a much lower frequency than it actually is. Obviously,
936 * this will not work for code that depends on a fixed one to one
937 * relationship between the execution counts of the C level garbage
938 * collection hooks and the execution count of the scheme level
939 * after-gc-hook.
940 */
941#if (SCM_DEBUG_CELL_ACCESSES == 1)
942 if (debug_cells_gc_interval == 0)
943 scm_system_async_mark (gc_async);
944#else
945 scm_system_async_mark (gc_async);
946#endif
947
948 return NULL;
949}
950
951void
952scm_init_gc ()
953{
954 SCM after_gc_thunk;
955
956
957 scm_gc_init_mark ();
958
959 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
960 scm_c_define ("after-gc-hook", scm_after_gc_hook);
961
962 after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
963 gc_async_thunk);
964 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
965
966 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
967
968#include "libguile/gc.x"
969}
970
971
972void
973scm_gc_sweep (void)
974#define FUNC_NAME "scm_gc_sweep"
975{
976 scm_i_deprecated_memory_return = 0;
977
978 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist);
979 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2);
980
981 /*
982 NOTHING HERE: LAZY SWEEPING !
983 */
984 scm_i_reset_segments ();
985
986 /* When we move to POSIX threads private freelists should probably
987 be GC-protected instead. */
988 scm_i_freelist = SCM_EOL;
989 scm_i_freelist2 = SCM_EOL;
990}
991
992#undef FUNC_NAME
993
994
995
996/*
997 Local Variables:
998 c-file-style: "gnu"
999 End:
1000*/