* inline.h: include stdio.h
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42
43 /* #define DEBUGINFO */
44
45
46 #include <stdio.h>
47 #include <errno.h>
48 #include <string.h>
49 #include <assert.h>
50
51 #ifdef __ia64__
52 #include <ucontext.h>
53 extern unsigned long * __libc_ia64_register_backing_store_base;
54 #endif
55
56 #include "libguile/_scm.h"
57 #include "libguile/eval.h"
58 #include "libguile/stime.h"
59 #include "libguile/stackchk.h"
60 #include "libguile/struct.h"
61 #include "libguile/smob.h"
62 #include "libguile/unif.h"
63 #include "libguile/async.h"
64 #include "libguile/ports.h"
65 #include "libguile/root.h"
66 #include "libguile/strings.h"
67 #include "libguile/vectors.h"
68 #include "libguile/weaks.h"
69 #include "libguile/hashtab.h"
70 #include "libguile/tags.h"
71
72 #include "libguile/private-gc.h"
73 #include "libguile/validate.h"
74 #include "libguile/deprecation.h"
75 #include "libguile/gc.h"
76
77 #ifdef GUILE_DEBUG_MALLOC
78 #include "libguile/debug-malloc.h"
79 #endif
80
81 #ifdef HAVE_MALLOC_H
82 #include <malloc.h>
83 #endif
84
85 #ifdef HAVE_UNISTD_H
86 #include <unistd.h>
87 #endif
88
89
90
91 unsigned int scm_gc_running_p = 0;
92
93 /* Set this to != 0 if every cell that is accessed shall be checked:
94 */
95 int scm_debug_cell_accesses_p = 0;
96 int scm_expensive_debug_cell_accesses_p = 0;
97
98 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
99 * the number of cell accesses after which a gc shall be called.
100 */
101 int scm_debug_cells_gc_interval = 0;
102
103 /*
104 Global variable, so you can switch it off at runtime by setting
105 scm_i_cell_validation_already_running.
106 */
107 int scm_i_cell_validation_already_running ;
108
109 #if (SCM_DEBUG_CELL_ACCESSES == 1)
110
111
112 /*
113
114 Assert that the given object is a valid reference to a valid cell. This
115 test involves to determine whether the object is a cell pointer, whether
116 this pointer actually points into a heap segment and whether the cell
117 pointed to is not a free cell. Further, additional garbage collections may
118 get executed after a user defined number of cell accesses. This helps to
119 find places in the C code where references are dropped for extremely short
120 periods.
121
122 */
123 void
124 scm_i_expensive_validation_check (SCM cell)
125 {
126 if (!scm_in_heap_p (cell))
127 {
128 fprintf (stderr, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
129 (unsigned long) SCM_UNPACK (cell));
130 abort ();
131 }
132
133 /* If desired, perform additional garbage collections after a user
134 * defined number of cell accesses.
135 */
136 if (scm_debug_cells_gc_interval)
137 {
138 static unsigned int counter = 0;
139
140 if (counter != 0)
141 {
142 --counter;
143 }
144 else
145 {
146 counter = scm_debug_cells_gc_interval;
147 scm_igc ("scm_assert_cell_valid");
148 }
149 }
150 }
151
152 void
153 scm_assert_cell_valid (SCM cell)
154 {
155 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
156 {
157 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
158
159 /*
160 During GC, no user-code should be run, and the guile core
161 should use non-protected accessors.
162 */
163 if (scm_gc_running_p)
164 return;
165
166 /*
167 Only scm_in_heap_p and rescanning the heap is wildly
168 expensive.
169 */
170 if (scm_expensive_debug_cell_accesses_p)
171 scm_i_expensive_validation_check (cell);
172
173 if (!SCM_GC_MARK_P (cell))
174 {
175 fprintf (stderr,
176 "scm_assert_cell_valid: this object is unmarked. \n"
177 "It has been garbage-collected in the last GC run: "
178 "%lux\n",
179 (unsigned long) SCM_UNPACK (cell));
180 abort ();
181 }
182
183 scm_i_cell_validation_already_running = 0; /* re-enable */
184 }
185 }
186
187
188
189 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
190 (SCM flag),
191 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
192 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
193 "but no additional calls to garbage collection are issued.\n"
194 "If @var{flag} is a number, strict cell access checking is enabled,\n"
195 "with an additional garbage collection after the given\n"
196 "number of cell accesses.\n"
197 "This procedure only exists when the compile-time flag\n"
198 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
199 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
200 {
201 if (SCM_FALSEP (flag))
202 {
203 scm_debug_cell_accesses_p = 0;
204 }
205 else if (SCM_EQ_P (flag, SCM_BOOL_T))
206 {
207 scm_debug_cells_gc_interval = 0;
208 scm_debug_cell_accesses_p = 1;
209 scm_expensive_debug_cell_accesses_p = 0;
210 }
211 else if (SCM_INUMP (flag))
212 {
213 long int f = SCM_INUM (flag);
214 if (f <= 0)
215 SCM_OUT_OF_RANGE (1, flag);
216 scm_debug_cells_gc_interval = f;
217 scm_debug_cell_accesses_p = 1;
218 scm_expensive_debug_cell_accesses_p = 1;
219 }
220 else
221 {
222 SCM_WRONG_TYPE_ARG (1, flag);
223 }
224 return SCM_UNSPECIFIED;
225 }
226 #undef FUNC_NAME
227 #else
228
229 /*
230 Provide a stub, so people can use their Scheme code on non-debug
231 versions of GUILE as well.
232 */
233 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
234 (SCM flag),
235 "This function is used to turn on checking for a debug version of GUILE. This version does not support this functionality\n")
236 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
237 {
238
239 /*
240 do nothing
241 */
242 fprintf (stderr, "\nWARNING: GUILE was not compiled with SCM_DEBUG_CELL_ACCESSES");
243 scm_remember_upto_here (flag);
244 return SCM_UNSPECIFIED;
245 }
246 #undef FUNC_NAME
247
248 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
249
250 \f
251
252 SCM scm_i_freelist = SCM_EOL;
253 SCM scm_i_freelist2 = SCM_EOL;
254
255
256 /* scm_mtrigger
257 * is the number of bytes of malloc allocation needed to trigger gc.
258 */
259 unsigned long scm_mtrigger;
260
261 /* scm_gc_heap_lock
262 * If set, don't expand the heap. Set only during gc, during which no allocation
263 * is supposed to take place anyway.
264 */
265 int scm_gc_heap_lock = 0;
266
267 /* GC Blocking
268 * Don't pause for collection if this is set -- just
269 * expand the heap.
270 */
271 int scm_block_gc = 1;
272
273 /* During collection, this accumulates objects holding
274 * weak references.
275 */
276 SCM scm_weak_vectors;
277
278 /* GC Statistics Keeping
279 */
280 unsigned long scm_cells_allocated = 0;
281 unsigned long scm_mallocated = 0;
282 unsigned long scm_gc_cells_collected;
283 unsigned long scm_gc_cells_collected_1 = 0; /* previous GC yield */
284 unsigned long scm_gc_malloc_collected;
285 unsigned long scm_gc_ports_collected;
286 unsigned long scm_gc_time_taken = 0;
287 static unsigned long t_before_gc;
288 unsigned long scm_gc_mark_time_taken = 0;
289 unsigned long scm_gc_times = 0;
290 unsigned long scm_gc_cells_swept = 0;
291 double scm_gc_cells_marked_acc = 0.;
292 double scm_gc_cells_swept_acc = 0.;
293 int scm_gc_cell_yield_percentage =0;
294 int scm_gc_malloc_yield_percentage = 0;
295
296
297 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
298 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
299 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
300 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
301 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
302 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
303 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
304 SCM_SYMBOL (sym_times, "gc-times");
305 SCM_SYMBOL (sym_cells_marked, "cells-marked");
306 SCM_SYMBOL (sym_cells_swept, "cells-swept");
307 SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
308 SCM_SYMBOL (sym_cell_yield, "cell-yield");
309
310
311
312
313 /* Number of calls to SCM_NEWCELL since startup. */
314 unsigned scm_newcell_count;
315 unsigned scm_newcell2_count;
316
317
318 /* {Scheme Interface to GC}
319 */
320 extern int scm_gc_malloc_yield_percentage;
321 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
322 (),
323 "Return an association list of statistics about Guile's current\n"
324 "use of storage.\n")
325 #define FUNC_NAME s_scm_gc_stats
326 {
327 long i = 0;
328 SCM heap_segs = SCM_EOL ;
329 unsigned long int local_scm_mtrigger;
330 unsigned long int local_scm_mallocated;
331 unsigned long int local_scm_heap_size;
332 int local_scm_gc_cell_yield_percentage;
333 int local_scm_gc_malloc_yield_percentage;
334 unsigned long int local_scm_cells_allocated;
335 unsigned long int local_scm_gc_time_taken;
336 unsigned long int local_scm_gc_times;
337 unsigned long int local_scm_gc_mark_time_taken;
338 double local_scm_gc_cells_swept;
339 double local_scm_gc_cells_marked;
340 SCM answer;
341 unsigned long *bounds = 0;
342 int table_size = scm_i_heap_segment_table_size;
343 SCM_DEFER_INTS;
344
345 /*
346 temporarily store the numbers, so as not to cause GC.
347 */
348
349 bounds = malloc (sizeof (int) * table_size * 2);
350 if (!bounds)
351 abort();
352 for (i = table_size; i--; )
353 {
354 bounds[2*i] = (unsigned long)scm_i_heap_segment_table[i]->bounds[0];
355 bounds[2*i+1] = (unsigned long)scm_i_heap_segment_table[i]->bounds[1];
356 }
357
358
359 /* Below, we cons to produce the resulting list. We want a snapshot of
360 * the heap situation before consing.
361 */
362 local_scm_mtrigger = scm_mtrigger;
363 local_scm_mallocated = scm_mallocated;
364 local_scm_heap_size = SCM_HEAP_SIZE;
365
366 local_scm_cells_allocated = scm_cells_allocated;
367
368 local_scm_gc_time_taken = scm_gc_time_taken;
369 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
370 local_scm_gc_times = scm_gc_times;
371 local_scm_gc_malloc_yield_percentage = scm_gc_malloc_yield_percentage;
372 local_scm_gc_cell_yield_percentage= scm_gc_cell_yield_percentage;
373
374 local_scm_gc_cells_swept =
375 (double) scm_gc_cells_swept_acc
376 + (double) scm_gc_cells_swept;
377 local_scm_gc_cells_marked = scm_gc_cells_marked_acc
378 +(double) scm_gc_cells_swept
379 -(double) scm_gc_cells_collected;
380
381 for (i = table_size; i--;)
382 {
383 heap_segs = scm_cons (scm_cons (scm_ulong2num (bounds[2*i]),
384 scm_ulong2num (bounds[2*i+1])),
385 heap_segs);
386 }
387
388 answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
389 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
390 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
391 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
392 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
393 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
394 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
395 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
396 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
397 scm_cons (sym_malloc_yield, scm_long2num (local_scm_gc_malloc_yield_percentage)),
398 scm_cons (sym_cell_yield, scm_long2num (local_scm_gc_cell_yield_percentage)),
399 scm_cons (sym_heap_segments, heap_segs),
400 SCM_UNDEFINED);
401 SCM_ALLOW_INTS;
402
403 free (bounds);
404 return answer;
405 }
406 #undef FUNC_NAME
407
408 static void
409 gc_start_stats (const char *what SCM_UNUSED)
410 {
411 t_before_gc = scm_c_get_internal_run_time ();
412
413 scm_gc_cells_marked_acc += (double) scm_gc_cells_swept
414 - (double) scm_gc_cells_collected;
415 scm_gc_cells_swept_acc += (double) scm_gc_cells_swept;
416
417 scm_gc_cell_yield_percentage = ( scm_gc_cells_collected * 100 ) / SCM_HEAP_SIZE;
418
419 scm_gc_cells_swept = 0;
420 scm_gc_cells_collected_1 = scm_gc_cells_collected;
421
422 /*
423 CELLS SWEPT is another word for the number of cells that were
424 examined during GC. YIELD is the number that we cleaned
425 out. MARKED is the number that weren't cleaned.
426 */
427 scm_gc_cells_collected = 0;
428 scm_gc_malloc_collected = 0;
429 scm_gc_ports_collected = 0;
430 }
431
432 static void
433 gc_end_stats ()
434 {
435 unsigned long t = scm_c_get_internal_run_time ();
436 scm_gc_time_taken += (t - t_before_gc);
437
438 ++scm_gc_times;
439 }
440
441
442 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
443 (SCM obj),
444 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
445 "returned by this function for @var{obj}")
446 #define FUNC_NAME s_scm_object_address
447 {
448 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
449 }
450 #undef FUNC_NAME
451
452
453 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
454 (),
455 "Scans all of SCM objects and reclaims for further use those that are\n"
456 "no longer accessible.")
457 #define FUNC_NAME s_scm_gc
458 {
459 SCM_DEFER_INTS;
460 scm_igc ("call");
461 SCM_ALLOW_INTS;
462 return SCM_UNSPECIFIED;
463 }
464 #undef FUNC_NAME
465
466
467 \f
468
469 /* When we get POSIX threads support, the master will be global and
470 * common while the freelist will be individual for each thread.
471 */
472
473 SCM
474 scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
475 {
476 SCM cell;
477
478 ++scm_ints_disabled;
479
480 *free_cells = scm_i_sweep_some_segments (freelist);
481 if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
482 {
483 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
484 *free_cells = scm_i_sweep_some_segments (freelist);
485 }
486
487 if (*free_cells == SCM_EOL && !scm_block_gc)
488 {
489 /*
490 with the advent of lazy sweep, GC yield is only know just
491 before doing the GC.
492 */
493 scm_i_adjust_min_yield (freelist);
494
495 /*
496 out of fresh cells. Try to get some new ones.
497 */
498
499 scm_igc ("cells");
500
501 *free_cells = scm_i_sweep_some_segments (freelist);
502 }
503
504 if (*free_cells == SCM_EOL)
505 {
506 /*
507 failed getting new cells. Get new juice or die.
508 */
509 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
510 *free_cells = scm_i_sweep_some_segments (freelist);
511 }
512
513 if (*free_cells == SCM_EOL)
514 abort ();
515
516 cell = *free_cells;
517
518 --scm_ints_disabled;
519
520 *free_cells = SCM_FREE_CELL_CDR (cell);
521
522
523 return cell;
524 }
525
526
527 scm_t_c_hook scm_before_gc_c_hook;
528 scm_t_c_hook scm_before_mark_c_hook;
529 scm_t_c_hook scm_before_sweep_c_hook;
530 scm_t_c_hook scm_after_sweep_c_hook;
531 scm_t_c_hook scm_after_gc_c_hook;
532
533 void
534 scm_igc (const char *what)
535 {
536 ++scm_gc_running_p;
537 scm_c_hook_run (&scm_before_gc_c_hook, 0);
538
539 #ifdef DEBUGINFO
540 fprintf (stderr,"gc reason %s\n", what);
541
542 fprintf (stderr,
543 SCM_NULLP (scm_i_freelist)
544 ? "*"
545 : (SCM_NULLP (scm_i_freelist2) ? "o" : "m"));
546 #endif
547
548 /* During the critical section, only the current thread may run. */
549 SCM_CRITICAL_SECTION_START;
550
551 if (!scm_root || !scm_stack_base || scm_block_gc)
552 {
553 --scm_gc_running_p;
554 return;
555 }
556
557 gc_start_stats (what);
558
559 if (scm_gc_heap_lock)
560 /* We've invoked the collector while a GC is already in progress.
561 That should never happen. */
562 abort ();
563
564 ++scm_gc_heap_lock;
565
566 /*
567 Let's finish the sweep. The conservative GC might point into the
568 garbage, and marking that would create a mess.
569 */
570 scm_i_sweep_all_segments("GC");
571 if (scm_mallocated < scm_i_deprecated_memory_return)
572 {
573 /* The byte count of allocated objects has underflowed. This is
574 probably because you forgot to report the sizes of objects you
575 have allocated, by calling scm_done_malloc or some such. When
576 the GC freed them, it subtracted their size from
577 scm_mallocated, which underflowed. */
578 fprintf (stderr,
579 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
580 "This is probably because the GC hasn't been correctly informed\n"
581 "about object sizes\n");
582 abort ();
583 }
584 scm_mallocated -= scm_i_deprecated_memory_return;
585
586
587
588 scm_c_hook_run (&scm_before_mark_c_hook, 0);
589
590 scm_mark_all ();
591
592 scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
593
594 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
595
596 /*
597 Moved this lock upwards so that we can alloc new heap at the end of a sweep.
598
599 DOCME: why should the heap be locked anyway?
600 */
601 --scm_gc_heap_lock;
602
603 scm_gc_sweep ();
604
605
606 /*
607 TODO: this hook should probably be moved to just before the mark,
608 since that's where the sweep is finished in lazy sweeping.
609 */
610 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
611 gc_end_stats ();
612
613 SCM_CRITICAL_SECTION_END;
614
615 /*
616 See above.
617 */
618 scm_c_hook_run (&scm_after_gc_c_hook, 0);
619 --scm_gc_running_p;
620
621 /*
622 For debugging purposes, you could do
623 scm_i_sweep_all_segments("debug"), but then the remains of the
624 cell aren't left to analyse.
625 */
626 }
627
628 \f
629 /* {GC Protection Helper Functions}
630 */
631
632
633 /*
634 * If within a function you need to protect one or more scheme objects from
635 * garbage collection, pass them as parameters to one of the
636 * scm_remember_upto_here* functions below. These functions don't do
637 * anything, but since the compiler does not know that they are actually
638 * no-ops, it will generate code that calls these functions with the given
639 * parameters. Therefore, you can be sure that the compiler will keep those
640 * scheme values alive (on the stack or in a register) up to the point where
641 * scm_remember_upto_here* is called. In other words, place the call to
642 * scm_remember_upto_here* _behind_ the last code in your function, that
643 * depends on the scheme object to exist.
644 *
645 * Example: We want to make sure that the string object str does not get
646 * garbage collected during the execution of 'some_function' in the code
647 * below, because otherwise the characters belonging to str would be freed and
648 * 'some_function' might access freed memory. To make sure that the compiler
649 * keeps str alive on the stack or in a register such that it is visible to
650 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
651 * call to 'some_function'. Note that this would not be necessary if str was
652 * used anyway after the call to 'some_function'.
653 * char *chars = SCM_STRING_CHARS (str);
654 * some_function (chars);
655 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
656 */
657
658 void
659 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
660 {
661 /* Empty. Protects a single object from garbage collection. */
662 }
663
664 void
665 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
666 {
667 /* Empty. Protects two objects from garbage collection. */
668 }
669
670 void
671 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
672 {
673 /* Empty. Protects any number of objects from garbage collection. */
674 }
675
676 /*
677 These crazy functions prevent garbage collection
678 of arguments after the first argument by
679 ensuring they remain live throughout the
680 function because they are used in the last
681 line of the code block.
682 It'd be better to have a nice compiler hint to
683 aid the conservative stack-scanning GC. --03/09/00 gjb */
684 SCM
685 scm_return_first (SCM elt, ...)
686 {
687 return elt;
688 }
689
690 int
691 scm_return_first_int (int i, ...)
692 {
693 return i;
694 }
695
696
697 SCM
698 scm_permanent_object (SCM obj)
699 {
700 SCM_REDEFER_INTS;
701 scm_permobjs = scm_cons (obj, scm_permobjs);
702 SCM_REALLOW_INTS;
703 return obj;
704 }
705
706
707 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
708 other references are dropped, until the object is unprotected by calling
709 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
710 i. e. it is possible to protect the same object several times, but it is
711 necessary to unprotect the object the same number of times to actually get
712 the object unprotected. It is an error to unprotect an object more often
713 than it has been protected before. The function scm_protect_object returns
714 OBJ.
715 */
716
717 /* Implementation note: For every object X, there is a counter which
718 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
719 */
720
721 SCM
722 scm_gc_protect_object (SCM obj)
723 {
724 SCM handle;
725
726 /* This critical section barrier will be replaced by a mutex. */
727 SCM_REDEFER_INTS;
728
729 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
730 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
731
732 SCM_REALLOW_INTS;
733
734 return obj;
735 }
736
737
738 /* Remove any protection for OBJ established by a prior call to
739 scm_protect_object. This function returns OBJ.
740
741 See scm_protect_object for more information. */
742 SCM
743 scm_gc_unprotect_object (SCM obj)
744 {
745 SCM handle;
746
747 /* This critical section barrier will be replaced by a mutex. */
748 SCM_REDEFER_INTS;
749
750 handle = scm_hashq_get_handle (scm_protects, obj);
751
752 if (SCM_FALSEP (handle))
753 {
754 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
755 abort ();
756 }
757 else
758 {
759 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
760 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
761 scm_hashq_remove_x (scm_protects, obj);
762 else
763 SCM_SETCDR (handle, count);
764 }
765
766 SCM_REALLOW_INTS;
767
768 return obj;
769 }
770
771 void
772 scm_gc_register_root (SCM *p)
773 {
774 SCM handle;
775 SCM key = scm_long2num ((long) p);
776
777 /* This critical section barrier will be replaced by a mutex. */
778 SCM_REDEFER_INTS;
779
780 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
781 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
782
783 SCM_REALLOW_INTS;
784 }
785
786 void
787 scm_gc_unregister_root (SCM *p)
788 {
789 SCM handle;
790 SCM key = scm_long2num ((long) p);
791
792 /* This critical section barrier will be replaced by a mutex. */
793 SCM_REDEFER_INTS;
794
795 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
796
797 if (SCM_FALSEP (handle))
798 {
799 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
800 abort ();
801 }
802 else
803 {
804 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
805 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
806 scm_hashv_remove_x (scm_gc_registered_roots, key);
807 else
808 SCM_SETCDR (handle, count);
809 }
810
811 SCM_REALLOW_INTS;
812 }
813
814 void
815 scm_gc_register_roots (SCM *b, unsigned long n)
816 {
817 SCM *p = b;
818 for (; p < b + n; ++p)
819 scm_gc_register_root (p);
820 }
821
822 void
823 scm_gc_unregister_roots (SCM *b, unsigned long n)
824 {
825 SCM *p = b;
826 for (; p < b + n; ++p)
827 scm_gc_unregister_root (p);
828 }
829
830 int scm_i_terminating;
831
832 /* called on process termination. */
833 #ifdef HAVE_ATEXIT
834 static void
835 cleanup (void)
836 #else
837 #ifdef HAVE_ON_EXIT
838 extern int on_exit (void (*procp) (), int arg);
839
840 static void
841 cleanup (int status, void *arg)
842 #else
843 #error Dont know how to setup a cleanup handler on your system.
844 #endif
845 #endif
846 {
847 scm_i_terminating = 1;
848 scm_flush_all_ports ();
849 }
850
851 \f
852
853
854 /*
855 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
856 */
857
858 /* Get an integer from an environment variable. */
859 int
860 scm_getenv_int (const char *var, int def)
861 {
862 char *end = 0;
863 char *val = getenv (var);
864 long res = def;
865 if (!val)
866 return def;
867 res = strtol (val, &end, 10);
868 if (end == val)
869 return def;
870 return res;
871 }
872
873
874 int
875 scm_init_storage ()
876 {
877 size_t j;
878
879 j = SCM_NUM_PROTECTS;
880 while (j)
881 scm_sys_protects[--j] = SCM_BOOL_F;
882 scm_block_gc = 1;
883
884 scm_gc_init_freelist();
885 scm_gc_init_malloc ();
886
887 j = SCM_HEAP_SEG_SIZE;
888
889
890
891 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
892 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
893 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
894 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
895 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
896
897 /* Initialise the list of ports. */
898 scm_i_port_table = (scm_t_port **)
899 malloc (sizeof (scm_t_port *) * scm_i_port_table_room);
900 if (!scm_i_port_table)
901 return 1;
902
903 #ifdef HAVE_ATEXIT
904 atexit (cleanup);
905 #else
906 #ifdef HAVE_ON_EXIT
907 on_exit (cleanup, 0);
908 #endif
909 #endif
910
911 scm_stand_in_procs = SCM_EOL;
912 scm_permobjs = SCM_EOL;
913 scm_protects = scm_c_make_hash_table (31);
914 scm_gc_registered_roots = scm_c_make_hash_table (31);
915
916 return 0;
917 }
918
919 \f
920
921 SCM scm_after_gc_hook;
922
923 static SCM gc_async;
924
925 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
926 * is run after the gc, as soon as the asynchronous events are handled by the
927 * evaluator.
928 */
929 static SCM
930 gc_async_thunk (void)
931 {
932 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
933 return SCM_UNSPECIFIED;
934 }
935
936
937 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
938 * the garbage collection. The only purpose of this function is to mark the
939 * gc_async (which will eventually lead to the execution of the
940 * gc_async_thunk).
941 */
942 static void *
943 mark_gc_async (void * hook_data SCM_UNUSED,
944 void *func_data SCM_UNUSED,
945 void *data SCM_UNUSED)
946 {
947 /* If cell access debugging is enabled, the user may choose to perform
948 * additional garbage collections after an arbitrary number of cell
949 * accesses. We don't want the scheme level after-gc-hook to be performed
950 * for each of these garbage collections for the following reason: The
951 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
952 * after-gc-hook was performed with every gc, and if the gc was performed
953 * after a very small number of cell accesses, then the number of cell
954 * accesses during the execution of the after-gc-hook will suffice to cause
955 * the execution of the next gc. Then, guile would keep executing the
956 * after-gc-hook over and over again, and would never come to do other
957 * things.
958 *
959 * To overcome this problem, if cell access debugging with additional
960 * garbage collections is enabled, the after-gc-hook is never run by the
961 * garbage collecter. When running guile with cell access debugging and the
962 * execution of the after-gc-hook is desired, then it is necessary to run
963 * the hook explicitly from the user code. This has the effect, that from
964 * the scheme level point of view it seems that garbage collection is
965 * performed with a much lower frequency than it actually is. Obviously,
966 * this will not work for code that depends on a fixed one to one
967 * relationship between the execution counts of the C level garbage
968 * collection hooks and the execution count of the scheme level
969 * after-gc-hook.
970 */
971 #if (SCM_DEBUG_CELL_ACCESSES == 1)
972 if (scm_debug_cells_gc_interval == 0)
973 scm_system_async_mark (gc_async);
974 #else
975 scm_system_async_mark (gc_async);
976 #endif
977
978 return NULL;
979 }
980
981 void
982 scm_init_gc ()
983 {
984 SCM after_gc_thunk;
985
986
987 scm_gc_init_mark ();
988
989 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
990 scm_c_define ("after-gc-hook", scm_after_gc_hook);
991
992 after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
993 gc_async_thunk);
994 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
995
996 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
997
998 #include "libguile/gc.x"
999 }
1000
1001
1002 void
1003 scm_gc_sweep (void)
1004 #define FUNC_NAME "scm_gc_sweep"
1005 {
1006 scm_i_deprecated_memory_return = 0;
1007
1008 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist);
1009 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2);
1010
1011 /*
1012 NOTHING HERE: LAZY SWEEPING !
1013 */
1014 scm_i_reset_segments ();
1015
1016 /* When we move to POSIX threads private freelists should probably
1017 be GC-protected instead. */
1018 scm_i_freelist = SCM_EOL;
1019 scm_i_freelist2 = SCM_EOL;
1020 }
1021
1022 #undef FUNC_NAME
1023
1024
1025
1026 /*
1027 Local Variables:
1028 c-file-style: "gnu"
1029 End:
1030 */