*** empty log message ***
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18
19 /* #define DEBUGINFO */
20
21 #if HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include <stdio.h>
26 #include <errno.h>
27 #include <string.h>
28 #include <assert.h>
29
30 #ifdef __ia64__
31 #include <ucontext.h>
32 extern unsigned long * __libc_ia64_register_backing_store_base;
33 #endif
34
35 #include "libguile/_scm.h"
36 #include "libguile/eval.h"
37 #include "libguile/stime.h"
38 #include "libguile/stackchk.h"
39 #include "libguile/struct.h"
40 #include "libguile/smob.h"
41 #include "libguile/unif.h"
42 #include "libguile/async.h"
43 #include "libguile/ports.h"
44 #include "libguile/root.h"
45 #include "libguile/strings.h"
46 #include "libguile/vectors.h"
47 #include "libguile/weaks.h"
48 #include "libguile/hashtab.h"
49 #include "libguile/tags.h"
50
51 #include "libguile/private-gc.h"
52 #include "libguile/validate.h"
53 #include "libguile/deprecation.h"
54 #include "libguile/gc.h"
55
56 #ifdef GUILE_DEBUG_MALLOC
57 #include "libguile/debug-malloc.h"
58 #endif
59
60 #ifdef HAVE_MALLOC_H
61 #include <malloc.h>
62 #endif
63
64 #ifdef HAVE_UNISTD_H
65 #include <unistd.h>
66 #endif
67
68
69
70 unsigned int scm_gc_running_p = 0;
71
72 /* Lock this mutex before doing lazy sweeping.
73 */
74 scm_t_rec_mutex scm_i_sweep_mutex;
75
76 /* Set this to != 0 if every cell that is accessed shall be checked:
77 */
78 int scm_debug_cell_accesses_p = 0;
79 int scm_expensive_debug_cell_accesses_p = 0;
80
81 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
82 * the number of cell accesses after which a gc shall be called.
83 */
84 int scm_debug_cells_gc_interval = 0;
85
86 /*
87 Global variable, so you can switch it off at runtime by setting
88 scm_i_cell_validation_already_running.
89 */
90 int scm_i_cell_validation_already_running ;
91
92 #if (SCM_DEBUG_CELL_ACCESSES == 1)
93
94
95 /*
96
97 Assert that the given object is a valid reference to a valid cell. This
98 test involves to determine whether the object is a cell pointer, whether
99 this pointer actually points into a heap segment and whether the cell
100 pointed to is not a free cell. Further, additional garbage collections may
101 get executed after a user defined number of cell accesses. This helps to
102 find places in the C code where references are dropped for extremely short
103 periods.
104
105 */
106 void
107 scm_i_expensive_validation_check (SCM cell)
108 {
109 if (!scm_in_heap_p (cell))
110 {
111 fprintf (stderr, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
112 (unsigned long) SCM_UNPACK (cell));
113 abort ();
114 }
115
116 /* If desired, perform additional garbage collections after a user
117 * defined number of cell accesses.
118 */
119 if (scm_debug_cells_gc_interval)
120 {
121 static unsigned int counter = 0;
122
123 if (counter != 0)
124 {
125 --counter;
126 }
127 else
128 {
129 counter = scm_debug_cells_gc_interval;
130 scm_igc ("scm_assert_cell_valid");
131 }
132 }
133 }
134
135 void
136 scm_assert_cell_valid (SCM cell)
137 {
138 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
139 {
140 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
141
142 /*
143 During GC, no user-code should be run, and the guile core
144 should use non-protected accessors.
145 */
146 if (scm_gc_running_p)
147 return;
148
149 /*
150 Only scm_in_heap_p and rescanning the heap is wildly
151 expensive.
152 */
153 if (scm_expensive_debug_cell_accesses_p)
154 scm_i_expensive_validation_check (cell);
155
156 if (!SCM_GC_MARK_P (cell))
157 {
158 fprintf (stderr,
159 "scm_assert_cell_valid: this object is unmarked. \n"
160 "It has been garbage-collected in the last GC run: "
161 "%lux\n",
162 (unsigned long) SCM_UNPACK (cell));
163 abort ();
164 }
165
166 scm_i_cell_validation_already_running = 0; /* re-enable */
167 }
168 }
169
170
171
172 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
173 (SCM flag),
174 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
175 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
176 "but no additional calls to garbage collection are issued.\n"
177 "If @var{flag} is a number, strict cell access checking is enabled,\n"
178 "with an additional garbage collection after the given\n"
179 "number of cell accesses.\n"
180 "This procedure only exists when the compile-time flag\n"
181 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
182 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
183 {
184 if (scm_is_false (flag))
185 {
186 scm_debug_cell_accesses_p = 0;
187 }
188 else if (scm_is_eq (flag, SCM_BOOL_T))
189 {
190 scm_debug_cells_gc_interval = 0;
191 scm_debug_cell_accesses_p = 1;
192 scm_expensive_debug_cell_accesses_p = 0;
193 }
194 else
195 {
196 scm_debug_cells_gc_interval = scm_to_signed_integer (flag, 0, INT_MAX);
197 scm_debug_cell_accesses_p = 1;
198 scm_expensive_debug_cell_accesses_p = 1;
199 }
200 return SCM_UNSPECIFIED;
201 }
202 #undef FUNC_NAME
203
204
205 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
206
207 \f
208
209 scm_t_key scm_i_freelist;
210 scm_t_key scm_i_freelist2;
211
212
213 /* scm_mtrigger
214 * is the number of bytes of malloc allocation needed to trigger gc.
215 */
216 unsigned long scm_mtrigger;
217
218 /* scm_gc_heap_lock
219 * If set, don't expand the heap. Set only during gc, during which no allocation
220 * is supposed to take place anyway.
221 */
222 int scm_gc_heap_lock = 0;
223
224 /* GC Blocking
225 * Don't pause for collection if this is set -- just
226 * expand the heap.
227 */
228 int scm_block_gc = 1;
229
230 /* During collection, this accumulates objects holding
231 * weak references.
232 */
233 SCM scm_weak_vectors;
234
235 /* GC Statistics Keeping
236 */
237 unsigned long scm_cells_allocated = 0;
238 unsigned long scm_mallocated = 0;
239 unsigned long scm_gc_cells_collected;
240 unsigned long scm_gc_cells_collected_1 = 0; /* previous GC yield */
241 unsigned long scm_gc_malloc_collected;
242 unsigned long scm_gc_ports_collected;
243 unsigned long scm_gc_time_taken = 0;
244 static unsigned long t_before_gc;
245 unsigned long scm_gc_mark_time_taken = 0;
246 unsigned long scm_gc_times = 0;
247 unsigned long scm_gc_cells_swept = 0;
248 double scm_gc_cells_marked_acc = 0.;
249 double scm_gc_cells_swept_acc = 0.;
250 int scm_gc_cell_yield_percentage =0;
251 int scm_gc_malloc_yield_percentage = 0;
252 unsigned long protected_obj_count = 0;
253
254
255 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
256 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
257 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
258 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
259 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
260 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
261 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
262 SCM_SYMBOL (sym_times, "gc-times");
263 SCM_SYMBOL (sym_cells_marked, "cells-marked");
264 SCM_SYMBOL (sym_cells_swept, "cells-swept");
265 SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
266 SCM_SYMBOL (sym_cell_yield, "cell-yield");
267 SCM_SYMBOL (sym_protected_objects, "protected-objects");
268
269
270
271
272 /* Number of calls to SCM_NEWCELL since startup. */
273 unsigned scm_newcell_count;
274 unsigned scm_newcell2_count;
275
276
277 /* {Scheme Interface to GC}
278 */
279 extern int scm_gc_malloc_yield_percentage;
280 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
281 (),
282 "Return an association list of statistics about Guile's current\n"
283 "use of storage.\n")
284 #define FUNC_NAME s_scm_gc_stats
285 {
286 long i = 0;
287 SCM heap_segs = SCM_EOL ;
288 unsigned long int local_scm_mtrigger;
289 unsigned long int local_scm_mallocated;
290 unsigned long int local_scm_heap_size;
291 int local_scm_gc_cell_yield_percentage;
292 int local_scm_gc_malloc_yield_percentage;
293 unsigned long int local_scm_cells_allocated;
294 unsigned long int local_scm_gc_time_taken;
295 unsigned long int local_scm_gc_times;
296 unsigned long int local_scm_gc_mark_time_taken;
297 unsigned long int local_protected_obj_count;
298 double local_scm_gc_cells_swept;
299 double local_scm_gc_cells_marked;
300 SCM answer;
301 unsigned long *bounds = 0;
302 int table_size = scm_i_heap_segment_table_size;
303 SCM_DEFER_INTS;
304
305 /*
306 temporarily store the numbers, so as not to cause GC.
307 */
308
309 bounds = malloc (sizeof (int) * table_size * 2);
310 if (!bounds)
311 abort();
312 for (i = table_size; i--; )
313 {
314 bounds[2*i] = (unsigned long)scm_i_heap_segment_table[i]->bounds[0];
315 bounds[2*i+1] = (unsigned long)scm_i_heap_segment_table[i]->bounds[1];
316 }
317
318
319 /* Below, we cons to produce the resulting list. We want a snapshot of
320 * the heap situation before consing.
321 */
322 local_scm_mtrigger = scm_mtrigger;
323 local_scm_mallocated = scm_mallocated;
324 local_scm_heap_size = SCM_HEAP_SIZE;
325
326 local_scm_cells_allocated = scm_cells_allocated;
327
328 local_scm_gc_time_taken = scm_gc_time_taken;
329 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
330 local_scm_gc_times = scm_gc_times;
331 local_scm_gc_malloc_yield_percentage = scm_gc_malloc_yield_percentage;
332 local_scm_gc_cell_yield_percentage= scm_gc_cell_yield_percentage;
333 local_protected_obj_count = protected_obj_count;
334 local_scm_gc_cells_swept =
335 (double) scm_gc_cells_swept_acc
336 + (double) scm_gc_cells_swept;
337 local_scm_gc_cells_marked = scm_gc_cells_marked_acc
338 +(double) scm_gc_cells_swept
339 -(double) scm_gc_cells_collected;
340
341 for (i = table_size; i--;)
342 {
343 heap_segs = scm_cons (scm_cons (scm_from_ulong (bounds[2*i]),
344 scm_from_ulong (bounds[2*i+1])),
345 heap_segs);
346 }
347
348 answer =
349 scm_list_n (scm_cons (sym_gc_time_taken,
350 scm_from_ulong (local_scm_gc_time_taken)),
351 scm_cons (sym_cells_allocated,
352 scm_from_ulong (local_scm_cells_allocated)),
353 scm_cons (sym_heap_size,
354 scm_from_ulong (local_scm_heap_size)),
355 scm_cons (sym_mallocated,
356 scm_from_ulong (local_scm_mallocated)),
357 scm_cons (sym_mtrigger,
358 scm_from_ulong (local_scm_mtrigger)),
359 scm_cons (sym_times,
360 scm_from_ulong (local_scm_gc_times)),
361 scm_cons (sym_gc_mark_time_taken,
362 scm_from_ulong (local_scm_gc_mark_time_taken)),
363 scm_cons (sym_cells_marked,
364 scm_from_double (local_scm_gc_cells_marked)),
365 scm_cons (sym_cells_swept,
366 scm_from_double (local_scm_gc_cells_swept)),
367 scm_cons (sym_malloc_yield,
368 scm_from_long(local_scm_gc_malloc_yield_percentage)),
369 scm_cons (sym_cell_yield,
370 scm_from_long (local_scm_gc_cell_yield_percentage)),
371 scm_cons (sym_protected_objects,
372 scm_from_ulong (local_protected_obj_count)),
373 scm_cons (sym_heap_segments, heap_segs),
374 SCM_UNDEFINED);
375 SCM_ALLOW_INTS;
376
377 free (bounds);
378 return answer;
379 }
380 #undef FUNC_NAME
381
382 static void
383 gc_start_stats (const char *what SCM_UNUSED)
384 {
385 t_before_gc = scm_c_get_internal_run_time ();
386
387 scm_gc_cells_marked_acc += (double) scm_gc_cells_swept
388 - (double) scm_gc_cells_collected;
389 scm_gc_cells_swept_acc += (double) scm_gc_cells_swept;
390
391 scm_gc_cell_yield_percentage = ( scm_gc_cells_collected * 100 ) / SCM_HEAP_SIZE;
392
393 scm_gc_cells_swept = 0;
394 scm_gc_cells_collected_1 = scm_gc_cells_collected;
395
396 /*
397 CELLS SWEPT is another word for the number of cells that were
398 examined during GC. YIELD is the number that we cleaned
399 out. MARKED is the number that weren't cleaned.
400 */
401 scm_gc_cells_collected = 0;
402 scm_gc_malloc_collected = 0;
403 scm_gc_ports_collected = 0;
404 }
405
406 static void
407 gc_end_stats ()
408 {
409 unsigned long t = scm_c_get_internal_run_time ();
410 scm_gc_time_taken += (t - t_before_gc);
411
412 ++scm_gc_times;
413 }
414
415
416 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
417 (SCM obj),
418 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
419 "returned by this function for @var{obj}")
420 #define FUNC_NAME s_scm_object_address
421 {
422 return scm_from_ulong (SCM_UNPACK (obj));
423 }
424 #undef FUNC_NAME
425
426
427 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
428 (),
429 "Scans all of SCM objects and reclaims for further use those that are\n"
430 "no longer accessible.")
431 #define FUNC_NAME s_scm_gc
432 {
433 scm_igc ("call");
434 return SCM_UNSPECIFIED;
435 }
436 #undef FUNC_NAME
437
438
439 \f
440
441 /* When we get POSIX threads support, the master will be global and
442 * common while the freelist will be individual for each thread.
443 */
444
445 SCM
446 scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
447 {
448 SCM cell;
449
450 scm_rec_mutex_lock (&scm_i_sweep_mutex);
451
452 *free_cells = scm_i_sweep_some_segments (freelist);
453 if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
454 {
455 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
456 *free_cells = scm_i_sweep_some_segments (freelist);
457 }
458
459 if (*free_cells == SCM_EOL && !scm_block_gc)
460 {
461 /*
462 with the advent of lazy sweep, GC yield is only know just
463 before doing the GC.
464 */
465 scm_i_adjust_min_yield (freelist);
466
467 /*
468 out of fresh cells. Try to get some new ones.
469 */
470
471 scm_igc ("cells");
472
473 *free_cells = scm_i_sweep_some_segments (freelist);
474 }
475
476 if (*free_cells == SCM_EOL)
477 {
478 /*
479 failed getting new cells. Get new juice or die.
480 */
481 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
482 *free_cells = scm_i_sweep_some_segments (freelist);
483 }
484
485 if (*free_cells == SCM_EOL)
486 abort ();
487
488 cell = *free_cells;
489
490 *free_cells = SCM_FREE_CELL_CDR (cell);
491
492 scm_rec_mutex_unlock (&scm_i_sweep_mutex);
493
494 return cell;
495 }
496
497
498 scm_t_c_hook scm_before_gc_c_hook;
499 scm_t_c_hook scm_before_mark_c_hook;
500 scm_t_c_hook scm_before_sweep_c_hook;
501 scm_t_c_hook scm_after_sweep_c_hook;
502 scm_t_c_hook scm_after_gc_c_hook;
503
504 void
505 scm_igc (const char *what)
506 {
507 scm_rec_mutex_lock (&scm_i_sweep_mutex);
508 ++scm_gc_running_p;
509 scm_c_hook_run (&scm_before_gc_c_hook, 0);
510
511 #ifdef DEBUGINFO
512 fprintf (stderr,"gc reason %s\n", what);
513
514 fprintf (stderr,
515 scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist))
516 ? "*"
517 : (scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist2)) ? "o" : "m"));
518 #endif
519
520 /* During the critical section, only the current thread may run. */
521 scm_i_thread_put_to_sleep ();
522
523 if (!scm_root || !scm_stack_base || scm_block_gc)
524 {
525 --scm_gc_running_p;
526 return;
527 }
528
529 gc_start_stats (what);
530
531 if (scm_gc_heap_lock)
532 /* We've invoked the collector while a GC is already in progress.
533 That should never happen. */
534 abort ();
535
536 ++scm_gc_heap_lock;
537
538 /*
539 Let's finish the sweep. The conservative GC might point into the
540 garbage, and marking that would create a mess.
541 */
542 scm_i_sweep_all_segments("GC");
543 if (scm_mallocated < scm_i_deprecated_memory_return)
544 {
545 /* The byte count of allocated objects has underflowed. This is
546 probably because you forgot to report the sizes of objects you
547 have allocated, by calling scm_done_malloc or some such. When
548 the GC freed them, it subtracted their size from
549 scm_mallocated, which underflowed. */
550 fprintf (stderr,
551 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
552 "This is probably because the GC hasn't been correctly informed\n"
553 "about object sizes\n");
554 abort ();
555 }
556 scm_mallocated -= scm_i_deprecated_memory_return;
557
558
559
560 scm_c_hook_run (&scm_before_mark_c_hook, 0);
561
562 scm_mark_all ();
563
564 scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
565
566 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
567
568 /*
569 Moved this lock upwards so that we can alloc new heap at the end of a sweep.
570
571 DOCME: why should the heap be locked anyway?
572 */
573 --scm_gc_heap_lock;
574
575 scm_gc_sweep ();
576
577
578 /*
579 TODO: this hook should probably be moved to just before the mark,
580 since that's where the sweep is finished in lazy sweeping.
581
582 MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
583 original meaning implied at least two things: that it would be
584 called when
585
586 1. the freelist is re-initialized (no evaluation possible, though)
587
588 and
589
590 2. the heap is "fresh"
591 (it is well-defined what data is used and what is not)
592
593 Neither of these conditions would hold just before the mark phase.
594
595 Of course, the lazy sweeping has muddled the distinction between
596 scm_before_sweep_c_hook and scm_after_sweep_c_hook, but even if
597 there were no difference, it would still be useful to have two
598 distinct classes of hook functions since this can prevent some
599 bad interference when several modules adds gc hooks.
600 */
601 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
602 gc_end_stats ();
603
604 scm_i_thread_wake_up ();
605
606 /*
607 See above.
608 */
609 --scm_gc_running_p;
610 scm_c_hook_run (&scm_after_gc_c_hook, 0);
611 scm_rec_mutex_unlock (&scm_i_sweep_mutex);
612
613 /*
614 For debugging purposes, you could do
615 scm_i_sweep_all_segments("debug"), but then the remains of the
616 cell aren't left to analyse.
617 */
618 }
619
620 \f
621 /* {GC Protection Helper Functions}
622 */
623
624
625 /*
626 * If within a function you need to protect one or more scheme objects from
627 * garbage collection, pass them as parameters to one of the
628 * scm_remember_upto_here* functions below. These functions don't do
629 * anything, but since the compiler does not know that they are actually
630 * no-ops, it will generate code that calls these functions with the given
631 * parameters. Therefore, you can be sure that the compiler will keep those
632 * scheme values alive (on the stack or in a register) up to the point where
633 * scm_remember_upto_here* is called. In other words, place the call to
634 * scm_remember_upto_here* _behind_ the last code in your function, that
635 * depends on the scheme object to exist.
636 *
637 * Example: We want to make sure that the string object str does not get
638 * garbage collected during the execution of 'some_function' in the code
639 * below, because otherwise the characters belonging to str would be freed and
640 * 'some_function' might access freed memory. To make sure that the compiler
641 * keeps str alive on the stack or in a register such that it is visible to
642 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
643 * call to 'some_function'. Note that this would not be necessary if str was
644 * used anyway after the call to 'some_function'.
645 * char *chars = scm_i_string_chars (str);
646 * some_function (chars);
647 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
648 */
649
650 /* Remove any macro versions of these while defining the functions.
651 Functions are always included in the library, for upward binary
652 compatibility and in case combinations of GCC and non-GCC are used. */
653 #undef scm_remember_upto_here_1
654 #undef scm_remember_upto_here_2
655
656 void
657 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
658 {
659 /* Empty. Protects a single object from garbage collection. */
660 }
661
662 void
663 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
664 {
665 /* Empty. Protects two objects from garbage collection. */
666 }
667
668 void
669 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
670 {
671 /* Empty. Protects any number of objects from garbage collection. */
672 }
673
674 /*
675 These crazy functions prevent garbage collection
676 of arguments after the first argument by
677 ensuring they remain live throughout the
678 function because they are used in the last
679 line of the code block.
680 It'd be better to have a nice compiler hint to
681 aid the conservative stack-scanning GC. --03/09/00 gjb */
682 SCM
683 scm_return_first (SCM elt, ...)
684 {
685 return elt;
686 }
687
688 int
689 scm_return_first_int (int i, ...)
690 {
691 return i;
692 }
693
694
695 SCM
696 scm_permanent_object (SCM obj)
697 {
698 SCM_REDEFER_INTS;
699 scm_permobjs = scm_cons (obj, scm_permobjs);
700 SCM_REALLOW_INTS;
701 return obj;
702 }
703
704
705 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
706 other references are dropped, until the object is unprotected by calling
707 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
708 i. e. it is possible to protect the same object several times, but it is
709 necessary to unprotect the object the same number of times to actually get
710 the object unprotected. It is an error to unprotect an object more often
711 than it has been protected before. The function scm_protect_object returns
712 OBJ.
713 */
714
715 /* Implementation note: For every object X, there is a counter which
716 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
717 */
718
719
720
721 SCM
722 scm_gc_protect_object (SCM obj)
723 {
724 SCM handle;
725
726 /* This critical section barrier will be replaced by a mutex. */
727 SCM_REDEFER_INTS;
728
729 handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
730 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
731
732 protected_obj_count ++;
733
734 SCM_REALLOW_INTS;
735
736 return obj;
737 }
738
739
740 /* Remove any protection for OBJ established by a prior call to
741 scm_protect_object. This function returns OBJ.
742
743 See scm_protect_object for more information. */
744 SCM
745 scm_gc_unprotect_object (SCM obj)
746 {
747 SCM handle;
748
749 /* This critical section barrier will be replaced by a mutex. */
750 SCM_REDEFER_INTS;
751
752 if (scm_gc_running_p)
753 {
754 fprintf (stderr, "scm_unprotect_object called during GC.\n");
755 abort ();
756 }
757
758 handle = scm_hashq_get_handle (scm_protects, obj);
759
760 if (scm_is_false (handle))
761 {
762 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
763 abort ();
764 }
765 else
766 {
767 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
768 if (scm_is_eq (count, scm_from_int (0)))
769 scm_hashq_remove_x (scm_protects, obj);
770 else
771 SCM_SETCDR (handle, count);
772 }
773 protected_obj_count --;
774
775 SCM_REALLOW_INTS;
776
777 return obj;
778 }
779
780 void
781 scm_gc_register_root (SCM *p)
782 {
783 SCM handle;
784 SCM key = scm_from_ulong ((unsigned long) p);
785
786 /* This critical section barrier will be replaced by a mutex. */
787 SCM_REDEFER_INTS;
788
789 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key,
790 scm_from_int (0));
791 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
792
793 SCM_REALLOW_INTS;
794 }
795
796 void
797 scm_gc_unregister_root (SCM *p)
798 {
799 SCM handle;
800 SCM key = scm_from_ulong ((unsigned long) p);
801
802 /* This critical section barrier will be replaced by a mutex. */
803 SCM_REDEFER_INTS;
804
805 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
806
807 if (scm_is_false (handle))
808 {
809 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
810 abort ();
811 }
812 else
813 {
814 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
815 if (scm_is_eq (count, scm_from_int (0)))
816 scm_hashv_remove_x (scm_gc_registered_roots, key);
817 else
818 SCM_SETCDR (handle, count);
819 }
820
821 SCM_REALLOW_INTS;
822 }
823
824 void
825 scm_gc_register_roots (SCM *b, unsigned long n)
826 {
827 SCM *p = b;
828 for (; p < b + n; ++p)
829 scm_gc_register_root (p);
830 }
831
832 void
833 scm_gc_unregister_roots (SCM *b, unsigned long n)
834 {
835 SCM *p = b;
836 for (; p < b + n; ++p)
837 scm_gc_unregister_root (p);
838 }
839
840 int scm_i_terminating;
841
842 /* called on process termination. */
843 #ifdef HAVE_ATEXIT
844 static void
845 cleanup (void)
846 #else
847 #ifdef HAVE_ON_EXIT
848 extern int on_exit (void (*procp) (), int arg);
849
850 static void
851 cleanup (int status, void *arg)
852 #else
853 #error Dont know how to setup a cleanup handler on your system.
854 #endif
855 #endif
856 {
857 scm_i_terminating = 1;
858 scm_flush_all_ports ();
859 }
860
861 \f
862
863
864 /*
865 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
866 */
867
868 /* Get an integer from an environment variable. */
869 int
870 scm_getenv_int (const char *var, int def)
871 {
872 char *end = 0;
873 char *val = getenv (var);
874 long res = def;
875 if (!val)
876 return def;
877 res = strtol (val, &end, 10);
878 if (end == val)
879 return def;
880 return res;
881 }
882
883 void
884 scm_storage_prehistory ()
885 {
886 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
887 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
888 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
889 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
890 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
891 }
892
893 scm_t_mutex scm_i_gc_admin_mutex;
894
895 int
896 scm_init_storage ()
897 {
898 size_t j;
899
900 /* Fixme: Should use mutexattr from the low-level API. */
901 scm_rec_mutex_init (&scm_i_sweep_mutex, &scm_i_plugin_rec_mutex);
902
903 scm_i_plugin_mutex_init (&scm_i_gc_admin_mutex, &scm_i_plugin_mutex);
904
905 j = SCM_NUM_PROTECTS;
906 while (j)
907 scm_sys_protects[--j] = SCM_BOOL_F;
908 scm_block_gc = 1;
909
910 scm_gc_init_freelist();
911 scm_gc_init_malloc ();
912
913 j = SCM_HEAP_SEG_SIZE;
914
915
916 /* Initialise the list of ports. */
917 scm_i_port_table = (scm_t_port **)
918 malloc (sizeof (scm_t_port *) * scm_i_port_table_room);
919 if (!scm_i_port_table)
920 return 1;
921
922 #ifdef HAVE_ATEXIT
923 atexit (cleanup);
924 #else
925 #ifdef HAVE_ON_EXIT
926 on_exit (cleanup, 0);
927 #endif
928 #endif
929
930 scm_stand_in_procs = scm_c_make_hash_table (257);
931 scm_permobjs = SCM_EOL;
932 scm_protects = scm_c_make_hash_table (31);
933 scm_gc_registered_roots = scm_c_make_hash_table (31);
934
935 return 0;
936 }
937
938 \f
939
940 SCM scm_after_gc_hook;
941
942 static SCM gc_async;
943
944 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
945 * is run after the gc, as soon as the asynchronous events are handled by the
946 * evaluator.
947 */
948 static SCM
949 gc_async_thunk (void)
950 {
951 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
952 return SCM_UNSPECIFIED;
953 }
954
955
956 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
957 * the garbage collection. The only purpose of this function is to mark the
958 * gc_async (which will eventually lead to the execution of the
959 * gc_async_thunk).
960 */
961 static void *
962 mark_gc_async (void * hook_data SCM_UNUSED,
963 void *func_data SCM_UNUSED,
964 void *data SCM_UNUSED)
965 {
966 /* If cell access debugging is enabled, the user may choose to perform
967 * additional garbage collections after an arbitrary number of cell
968 * accesses. We don't want the scheme level after-gc-hook to be performed
969 * for each of these garbage collections for the following reason: The
970 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
971 * after-gc-hook was performed with every gc, and if the gc was performed
972 * after a very small number of cell accesses, then the number of cell
973 * accesses during the execution of the after-gc-hook will suffice to cause
974 * the execution of the next gc. Then, guile would keep executing the
975 * after-gc-hook over and over again, and would never come to do other
976 * things.
977 *
978 * To overcome this problem, if cell access debugging with additional
979 * garbage collections is enabled, the after-gc-hook is never run by the
980 * garbage collecter. When running guile with cell access debugging and the
981 * execution of the after-gc-hook is desired, then it is necessary to run
982 * the hook explicitly from the user code. This has the effect, that from
983 * the scheme level point of view it seems that garbage collection is
984 * performed with a much lower frequency than it actually is. Obviously,
985 * this will not work for code that depends on a fixed one to one
986 * relationship between the execution counts of the C level garbage
987 * collection hooks and the execution count of the scheme level
988 * after-gc-hook.
989 */
990 #if (SCM_DEBUG_CELL_ACCESSES == 1)
991 if (scm_debug_cells_gc_interval == 0)
992 scm_system_async_mark (gc_async);
993 #else
994 scm_system_async_mark (gc_async);
995 #endif
996
997 return NULL;
998 }
999
1000 void
1001 scm_init_gc ()
1002 {
1003 scm_gc_init_mark ();
1004
1005 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
1006 scm_c_define ("after-gc-hook", scm_after_gc_hook);
1007
1008 gc_async = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
1009 gc_async_thunk);
1010
1011 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
1012
1013 #include "libguile/gc.x"
1014 }
1015
1016
1017 void
1018 scm_gc_sweep (void)
1019 #define FUNC_NAME "scm_gc_sweep"
1020 {
1021 scm_i_deprecated_memory_return = 0;
1022
1023 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist);
1024 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2);
1025
1026 /*
1027 NOTHING HERE: LAZY SWEEPING !
1028 */
1029 scm_i_reset_segments ();
1030
1031 /* When we move to POSIX threads private freelists should probably
1032 be GC-protected instead. */
1033 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
1034 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
1035
1036 /* Invalidate the freelists of other threads. */
1037 scm_i_thread_invalidate_freelists ();
1038 }
1039
1040 #undef FUNC_NAME
1041
1042
1043
1044 /*
1045 Local Variables:
1046 c-file-style: "gnu"
1047 End:
1048 */