See ChangeLog from 2005-03-02.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18 #define _GNU_SOURCE
19
20 /* #define DEBUGINFO */
21
22 #if HAVE_CONFIG_H
23 # include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <errno.h>
28 #include <string.h>
29 #include <assert.h>
30
31 #ifdef __ia64__
32 #include <ucontext.h>
33 extern unsigned long * __libc_ia64_register_backing_store_base;
34 #endif
35
36 #include "libguile/_scm.h"
37 #include "libguile/eval.h"
38 #include "libguile/stime.h"
39 #include "libguile/stackchk.h"
40 #include "libguile/struct.h"
41 #include "libguile/smob.h"
42 #include "libguile/unif.h"
43 #include "libguile/async.h"
44 #include "libguile/ports.h"
45 #include "libguile/root.h"
46 #include "libguile/strings.h"
47 #include "libguile/vectors.h"
48 #include "libguile/weaks.h"
49 #include "libguile/hashtab.h"
50 #include "libguile/tags.h"
51
52 #include "libguile/private-gc.h"
53 #include "libguile/validate.h"
54 #include "libguile/deprecation.h"
55 #include "libguile/gc.h"
56 #include "libguile/dynwind.h"
57
58 #ifdef GUILE_DEBUG_MALLOC
59 #include "libguile/debug-malloc.h"
60 #endif
61
62 #ifdef HAVE_MALLOC_H
63 #include <malloc.h>
64 #endif
65
66 #ifdef HAVE_UNISTD_H
67 #include <unistd.h>
68 #endif
69
70
71
72 unsigned int scm_gc_running_p = 0;
73
74 /* Lock this mutex before doing lazy sweeping.
75 */
76 scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
77
78 /* Set this to != 0 if every cell that is accessed shall be checked:
79 */
80 int scm_debug_cell_accesses_p = 0;
81 int scm_expensive_debug_cell_accesses_p = 0;
82
83 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
84 * the number of cell accesses after which a gc shall be called.
85 */
86 int scm_debug_cells_gc_interval = 0;
87
88 /*
89 Global variable, so you can switch it off at runtime by setting
90 scm_i_cell_validation_already_running.
91 */
92 int scm_i_cell_validation_already_running ;
93
94 #if (SCM_DEBUG_CELL_ACCESSES == 1)
95
96
97 /*
98
99 Assert that the given object is a valid reference to a valid cell. This
100 test involves to determine whether the object is a cell pointer, whether
101 this pointer actually points into a heap segment and whether the cell
102 pointed to is not a free cell. Further, additional garbage collections may
103 get executed after a user defined number of cell accesses. This helps to
104 find places in the C code where references are dropped for extremely short
105 periods.
106
107 */
108 void
109 scm_i_expensive_validation_check (SCM cell)
110 {
111 if (!scm_in_heap_p (cell))
112 {
113 fprintf (stderr, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
114 (unsigned long) SCM_UNPACK (cell));
115 abort ();
116 }
117
118 /* If desired, perform additional garbage collections after a user
119 * defined number of cell accesses.
120 */
121 if (scm_debug_cells_gc_interval)
122 {
123 static unsigned int counter = 0;
124
125 if (counter != 0)
126 {
127 --counter;
128 }
129 else
130 {
131 counter = scm_debug_cells_gc_interval;
132 scm_igc ("scm_assert_cell_valid");
133 }
134 }
135 }
136
137 void
138 scm_assert_cell_valid (SCM cell)
139 {
140 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
141 {
142 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
143
144 /*
145 During GC, no user-code should be run, and the guile core
146 should use non-protected accessors.
147 */
148 if (scm_gc_running_p)
149 return;
150
151 /*
152 Only scm_in_heap_p and rescanning the heap is wildly
153 expensive.
154 */
155 if (scm_expensive_debug_cell_accesses_p)
156 scm_i_expensive_validation_check (cell);
157
158 if (!SCM_GC_MARK_P (cell))
159 {
160 fprintf (stderr,
161 "scm_assert_cell_valid: this object is unmarked. \n"
162 "It has been garbage-collected in the last GC run: "
163 "%lux\n",
164 (unsigned long) SCM_UNPACK (cell));
165 abort ();
166 }
167
168 scm_i_cell_validation_already_running = 0; /* re-enable */
169 }
170 }
171
172
173
174 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
175 (SCM flag),
176 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
177 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
178 "but no additional calls to garbage collection are issued.\n"
179 "If @var{flag} is a number, strict cell access checking is enabled,\n"
180 "with an additional garbage collection after the given\n"
181 "number of cell accesses.\n"
182 "This procedure only exists when the compile-time flag\n"
183 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
184 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
185 {
186 if (scm_is_false (flag))
187 {
188 scm_debug_cell_accesses_p = 0;
189 }
190 else if (scm_is_eq (flag, SCM_BOOL_T))
191 {
192 scm_debug_cells_gc_interval = 0;
193 scm_debug_cell_accesses_p = 1;
194 scm_expensive_debug_cell_accesses_p = 0;
195 }
196 else
197 {
198 scm_debug_cells_gc_interval = scm_to_signed_integer (flag, 0, INT_MAX);
199 scm_debug_cell_accesses_p = 1;
200 scm_expensive_debug_cell_accesses_p = 1;
201 }
202 return SCM_UNSPECIFIED;
203 }
204 #undef FUNC_NAME
205
206
207 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
208
209 \f
210
211
212 /* scm_mtrigger
213 * is the number of bytes of malloc allocation needed to trigger gc.
214 */
215 unsigned long scm_mtrigger;
216
217 /* scm_gc_heap_lock
218 * If set, don't expand the heap. Set only during gc, during which no allocation
219 * is supposed to take place anyway.
220 */
221 int scm_gc_heap_lock = 0;
222
223 /* GC Blocking
224 * Don't pause for collection if this is set -- just
225 * expand the heap.
226 */
227 int scm_block_gc = 1;
228
229 /* During collection, this accumulates objects holding
230 * weak references.
231 */
232 SCM scm_weak_vectors;
233
234 /* GC Statistics Keeping
235 */
236 unsigned long scm_cells_allocated = 0;
237 unsigned long scm_mallocated = 0;
238 unsigned long scm_gc_cells_collected;
239 unsigned long scm_gc_cells_collected_1 = 0; /* previous GC yield */
240 unsigned long scm_gc_malloc_collected;
241 unsigned long scm_gc_ports_collected;
242 unsigned long scm_gc_time_taken = 0;
243 static unsigned long t_before_gc;
244 unsigned long scm_gc_mark_time_taken = 0;
245 unsigned long scm_gc_times = 0;
246 unsigned long scm_gc_cells_swept = 0;
247 double scm_gc_cells_marked_acc = 0.;
248 double scm_gc_cells_swept_acc = 0.;
249 int scm_gc_cell_yield_percentage =0;
250 int scm_gc_malloc_yield_percentage = 0;
251 unsigned long protected_obj_count = 0;
252
253
254 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
255 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
256 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
257 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
258 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
259 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
260 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
261 SCM_SYMBOL (sym_times, "gc-times");
262 SCM_SYMBOL (sym_cells_marked, "cells-marked");
263 SCM_SYMBOL (sym_cells_swept, "cells-swept");
264 SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
265 SCM_SYMBOL (sym_cell_yield, "cell-yield");
266 SCM_SYMBOL (sym_protected_objects, "protected-objects");
267
268
269
270
271 /* Number of calls to SCM_NEWCELL since startup. */
272 unsigned scm_newcell_count;
273 unsigned scm_newcell2_count;
274
275
276 /* {Scheme Interface to GC}
277 */
278 static SCM
279 tag_table_to_type_alist (void *closure, SCM key, SCM val, SCM acc)
280 {
281 scm_t_bits c_tag = scm_to_int (key);
282
283 char const * name = scm_i_tag_name (c_tag);
284 if (name != NULL)
285 key = scm_from_locale_string (name);
286
287 return scm_cons (scm_cons (key, val), acc);
288 }
289
290 SCM_DEFINE (scm_gc_live_object_stats, "gc-live-object-stats", 0, 0, 0,
291 (),
292 "Return an alist of statistics of the current live objects. ")
293 #define FUNC_NAME s_scm_gc_live_object_stats
294 {
295 SCM tab = scm_make_hash_table (scm_from_int (57));
296 scm_i_all_segments_statistics (tab);
297
298 SCM alist
299 = scm_internal_hash_fold (&tag_table_to_type_alist, NULL, SCM_EOL, tab);
300
301 return alist;
302 }
303 #undef FUNC_NAME
304
305 extern int scm_gc_malloc_yield_percentage;
306 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
307 (),
308 "Return an association list of statistics about Guile's current\n"
309 "use of storage.\n")
310 #define FUNC_NAME s_scm_gc_stats
311 {
312 long i = 0;
313 SCM heap_segs = SCM_EOL ;
314 unsigned long int local_scm_mtrigger;
315 unsigned long int local_scm_mallocated;
316 unsigned long int local_scm_heap_size;
317 int local_scm_gc_cell_yield_percentage;
318 int local_scm_gc_malloc_yield_percentage;
319 unsigned long int local_scm_cells_allocated;
320 unsigned long int local_scm_gc_time_taken;
321 unsigned long int local_scm_gc_times;
322 unsigned long int local_scm_gc_mark_time_taken;
323 unsigned long int local_protected_obj_count;
324 double local_scm_gc_cells_swept;
325 double local_scm_gc_cells_marked;
326 SCM answer;
327 unsigned long *bounds = 0;
328 int table_size = scm_i_heap_segment_table_size;
329 SCM_CRITICAL_SECTION_START;
330
331 /*
332 temporarily store the numbers, so as not to cause GC.
333 */
334
335 bounds = malloc (sizeof (int) * table_size * 2);
336 if (!bounds)
337 abort();
338 for (i = table_size; i--; )
339 {
340 bounds[2*i] = (unsigned long)scm_i_heap_segment_table[i]->bounds[0];
341 bounds[2*i+1] = (unsigned long)scm_i_heap_segment_table[i]->bounds[1];
342 }
343
344
345 /* Below, we cons to produce the resulting list. We want a snapshot of
346 * the heap situation before consing.
347 */
348 local_scm_mtrigger = scm_mtrigger;
349 local_scm_mallocated = scm_mallocated;
350 local_scm_heap_size = SCM_HEAP_SIZE;
351
352 local_scm_cells_allocated = scm_cells_allocated;
353
354 local_scm_gc_time_taken = scm_gc_time_taken;
355 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
356 local_scm_gc_times = scm_gc_times;
357 local_scm_gc_malloc_yield_percentage = scm_gc_malloc_yield_percentage;
358 local_scm_gc_cell_yield_percentage= scm_gc_cell_yield_percentage;
359 local_protected_obj_count = protected_obj_count;
360 local_scm_gc_cells_swept =
361 (double) scm_gc_cells_swept_acc
362 + (double) scm_gc_cells_swept;
363 local_scm_gc_cells_marked = scm_gc_cells_marked_acc
364 +(double) scm_gc_cells_swept
365 -(double) scm_gc_cells_collected;
366
367 for (i = table_size; i--;)
368 {
369 heap_segs = scm_cons (scm_cons (scm_from_ulong (bounds[2*i]),
370 scm_from_ulong (bounds[2*i+1])),
371 heap_segs);
372 }
373
374 answer =
375 scm_list_n (scm_cons (sym_gc_time_taken,
376 scm_from_ulong (local_scm_gc_time_taken)),
377 scm_cons (sym_cells_allocated,
378 scm_from_ulong (local_scm_cells_allocated)),
379 scm_cons (sym_heap_size,
380 scm_from_ulong (local_scm_heap_size)),
381 scm_cons (sym_mallocated,
382 scm_from_ulong (local_scm_mallocated)),
383 scm_cons (sym_mtrigger,
384 scm_from_ulong (local_scm_mtrigger)),
385 scm_cons (sym_times,
386 scm_from_ulong (local_scm_gc_times)),
387 scm_cons (sym_gc_mark_time_taken,
388 scm_from_ulong (local_scm_gc_mark_time_taken)),
389 scm_cons (sym_cells_marked,
390 scm_from_double (local_scm_gc_cells_marked)),
391 scm_cons (sym_cells_swept,
392 scm_from_double (local_scm_gc_cells_swept)),
393 scm_cons (sym_malloc_yield,
394 scm_from_long(local_scm_gc_malloc_yield_percentage)),
395 scm_cons (sym_cell_yield,
396 scm_from_long (local_scm_gc_cell_yield_percentage)),
397 scm_cons (sym_protected_objects,
398 scm_from_ulong (local_protected_obj_count)),
399 scm_cons (sym_heap_segments, heap_segs),
400 SCM_UNDEFINED);
401 SCM_CRITICAL_SECTION_END;
402
403 free (bounds);
404 return answer;
405 }
406 #undef FUNC_NAME
407
408 static void
409 gc_start_stats (const char *what SCM_UNUSED)
410 {
411 t_before_gc = scm_c_get_internal_run_time ();
412
413 scm_gc_cells_marked_acc += (double) scm_gc_cells_swept
414 - (double) scm_gc_cells_collected;
415 scm_gc_cells_swept_acc += (double) scm_gc_cells_swept;
416
417 scm_gc_cell_yield_percentage = ( scm_gc_cells_collected * 100 ) / SCM_HEAP_SIZE;
418
419 scm_gc_cells_swept = 0;
420 scm_gc_cells_collected_1 = scm_gc_cells_collected;
421
422 /*
423 CELLS SWEPT is another word for the number of cells that were
424 examined during GC. YIELD is the number that we cleaned
425 out. MARKED is the number that weren't cleaned.
426 */
427 scm_gc_cells_collected = 0;
428 scm_gc_malloc_collected = 0;
429 scm_gc_ports_collected = 0;
430 }
431
432 static void
433 gc_end_stats ()
434 {
435 unsigned long t = scm_c_get_internal_run_time ();
436 scm_gc_time_taken += (t - t_before_gc);
437
438 ++scm_gc_times;
439 }
440
441
442 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
443 (SCM obj),
444 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
445 "returned by this function for @var{obj}")
446 #define FUNC_NAME s_scm_object_address
447 {
448 return scm_from_ulong (SCM_UNPACK (obj));
449 }
450 #undef FUNC_NAME
451
452
453 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
454 (),
455 "Scans all of SCM objects and reclaims for further use those that are\n"
456 "no longer accessible.")
457 #define FUNC_NAME s_scm_gc
458 {
459 scm_igc ("call");
460 return SCM_UNSPECIFIED;
461 }
462 #undef FUNC_NAME
463
464
465 \f
466
467 /* When we get POSIX threads support, the master will be global and
468 * common while the freelist will be individual for each thread.
469 */
470
471 SCM
472 scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
473 {
474 SCM cell;
475
476 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
477
478 *free_cells = scm_i_sweep_some_segments (freelist);
479 if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
480 {
481 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
482 *free_cells = scm_i_sweep_some_segments (freelist);
483 }
484
485 if (*free_cells == SCM_EOL && !scm_block_gc)
486 {
487 /*
488 with the advent of lazy sweep, GC yield is only know just
489 before doing the GC.
490 */
491 scm_i_adjust_min_yield (freelist);
492
493 /*
494 out of fresh cells. Try to get some new ones.
495 */
496
497 scm_igc ("cells");
498
499 *free_cells = scm_i_sweep_some_segments (freelist);
500 }
501
502 if (*free_cells == SCM_EOL)
503 {
504 /*
505 failed getting new cells. Get new juice or die.
506 */
507 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
508 *free_cells = scm_i_sweep_some_segments (freelist);
509 }
510
511 if (*free_cells == SCM_EOL)
512 abort ();
513
514 cell = *free_cells;
515
516 *free_cells = SCM_FREE_CELL_CDR (cell);
517
518 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
519
520 return cell;
521 }
522
523
524 scm_t_c_hook scm_before_gc_c_hook;
525 scm_t_c_hook scm_before_mark_c_hook;
526 scm_t_c_hook scm_before_sweep_c_hook;
527 scm_t_c_hook scm_after_sweep_c_hook;
528 scm_t_c_hook scm_after_gc_c_hook;
529
530 void
531 scm_igc (const char *what)
532 {
533 if (scm_block_gc)
534 return;
535
536 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
537
538 /* During the critical section, only the current thread may run. */
539 scm_i_thread_put_to_sleep ();
540
541 ++scm_gc_running_p;
542 scm_c_hook_run (&scm_before_gc_c_hook, 0);
543
544 #ifdef DEBUGINFO
545 fprintf (stderr,"gc reason %s\n", what);
546
547 fprintf (stderr,
548 scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist))
549 ? "*"
550 : (scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist2)) ? "o" : "m"));
551 #endif
552
553 gc_start_stats (what);
554
555
556
557 if (scm_gc_heap_lock)
558 /* We've invoked the collector while a GC is already in progress.
559 That should never happen. */
560 abort ();
561
562 /*
563 Set freelists to NULL so scm_cons() always triggers gc, causing
564 the above abort() to be triggered.
565 */
566 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
567 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
568
569 ++scm_gc_heap_lock;
570
571 /*
572 Let's finish the sweep. The conservative GC might point into the
573 garbage, and marking that would create a mess.
574 */
575 scm_i_sweep_all_segments("GC");
576 if (scm_mallocated < scm_i_deprecated_memory_return)
577 {
578 /* The byte count of allocated objects has underflowed. This is
579 probably because you forgot to report the sizes of objects you
580 have allocated, by calling scm_done_malloc or some such. When
581 the GC freed them, it subtracted their size from
582 scm_mallocated, which underflowed. */
583 fprintf (stderr,
584 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
585 "This is probably because the GC hasn't been correctly informed\n"
586 "about object sizes\n");
587 abort ();
588 }
589 scm_mallocated -= scm_i_deprecated_memory_return;
590
591
592
593 scm_c_hook_run (&scm_before_mark_c_hook, 0);
594
595 scm_mark_all ();
596
597 scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
598
599 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
600
601 /*
602 Moved this lock upwards so that we can alloc new heap at the end of a sweep.
603
604 DOCME: why should the heap be locked anyway?
605 */
606 --scm_gc_heap_lock;
607
608 scm_gc_sweep ();
609
610
611 /*
612 TODO: this hook should probably be moved to just before the mark,
613 since that's where the sweep is finished in lazy sweeping.
614
615 MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
616 original meaning implied at least two things: that it would be
617 called when
618
619 1. the freelist is re-initialized (no evaluation possible, though)
620
621 and
622
623 2. the heap is "fresh"
624 (it is well-defined what data is used and what is not)
625
626 Neither of these conditions would hold just before the mark phase.
627
628 Of course, the lazy sweeping has muddled the distinction between
629 scm_before_sweep_c_hook and scm_after_sweep_c_hook, but even if
630 there were no difference, it would still be useful to have two
631 distinct classes of hook functions since this can prevent some
632 bad interference when several modules adds gc hooks.
633 */
634 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
635 gc_end_stats ();
636
637 --scm_gc_running_p;
638 scm_i_thread_wake_up ();
639
640 /*
641 See above.
642 */
643 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
644 scm_c_hook_run (&scm_after_gc_c_hook, 0);
645
646 /*
647 For debugging purposes, you could do
648 scm_i_sweep_all_segments("debug"), but then the remains of the
649 cell aren't left to analyse.
650 */
651 }
652
653 \f
654 /* {GC Protection Helper Functions}
655 */
656
657
658 /*
659 * If within a function you need to protect one or more scheme objects from
660 * garbage collection, pass them as parameters to one of the
661 * scm_remember_upto_here* functions below. These functions don't do
662 * anything, but since the compiler does not know that they are actually
663 * no-ops, it will generate code that calls these functions with the given
664 * parameters. Therefore, you can be sure that the compiler will keep those
665 * scheme values alive (on the stack or in a register) up to the point where
666 * scm_remember_upto_here* is called. In other words, place the call to
667 * scm_remember_upto_here* _behind_ the last code in your function, that
668 * depends on the scheme object to exist.
669 *
670 * Example: We want to make sure that the string object str does not get
671 * garbage collected during the execution of 'some_function' in the code
672 * below, because otherwise the characters belonging to str would be freed and
673 * 'some_function' might access freed memory. To make sure that the compiler
674 * keeps str alive on the stack or in a register such that it is visible to
675 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
676 * call to 'some_function'. Note that this would not be necessary if str was
677 * used anyway after the call to 'some_function'.
678 * char *chars = scm_i_string_chars (str);
679 * some_function (chars);
680 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
681 */
682
683 /* Remove any macro versions of these while defining the functions.
684 Functions are always included in the library, for upward binary
685 compatibility and in case combinations of GCC and non-GCC are used. */
686 #undef scm_remember_upto_here_1
687 #undef scm_remember_upto_here_2
688
689 void
690 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
691 {
692 /* Empty. Protects a single object from garbage collection. */
693 }
694
695 void
696 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
697 {
698 /* Empty. Protects two objects from garbage collection. */
699 }
700
701 void
702 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
703 {
704 /* Empty. Protects any number of objects from garbage collection. */
705 }
706
707 /*
708 These crazy functions prevent garbage collection
709 of arguments after the first argument by
710 ensuring they remain live throughout the
711 function because they are used in the last
712 line of the code block.
713 It'd be better to have a nice compiler hint to
714 aid the conservative stack-scanning GC. --03/09/00 gjb */
715 SCM
716 scm_return_first (SCM elt, ...)
717 {
718 return elt;
719 }
720
721 int
722 scm_return_first_int (int i, ...)
723 {
724 return i;
725 }
726
727
728 SCM
729 scm_permanent_object (SCM obj)
730 {
731 SCM cell = scm_cons (obj, SCM_EOL);
732 SCM_CRITICAL_SECTION_START;
733 SCM_SETCDR (cell, scm_permobjs);
734 scm_permobjs = cell;
735 SCM_CRITICAL_SECTION_END;
736 return obj;
737 }
738
739
740 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
741 other references are dropped, until the object is unprotected by calling
742 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
743 i. e. it is possible to protect the same object several times, but it is
744 necessary to unprotect the object the same number of times to actually get
745 the object unprotected. It is an error to unprotect an object more often
746 than it has been protected before. The function scm_protect_object returns
747 OBJ.
748 */
749
750 /* Implementation note: For every object X, there is a counter which
751 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
752 */
753
754
755
756 SCM
757 scm_gc_protect_object (SCM obj)
758 {
759 SCM handle;
760
761 /* This critical section barrier will be replaced by a mutex. */
762 SCM_CRITICAL_SECTION_START;
763
764 handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
765 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
766
767 protected_obj_count ++;
768
769 SCM_CRITICAL_SECTION_END;
770
771 return obj;
772 }
773
774
775 /* Remove any protection for OBJ established by a prior call to
776 scm_protect_object. This function returns OBJ.
777
778 See scm_protect_object for more information. */
779 SCM
780 scm_gc_unprotect_object (SCM obj)
781 {
782 SCM handle;
783
784 /* This critical section barrier will be replaced by a mutex. */
785 SCM_CRITICAL_SECTION_START;
786
787 if (scm_gc_running_p)
788 {
789 fprintf (stderr, "scm_unprotect_object called during GC.\n");
790 abort ();
791 }
792
793 handle = scm_hashq_get_handle (scm_protects, obj);
794
795 if (scm_is_false (handle))
796 {
797 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
798 abort ();
799 }
800 else
801 {
802 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
803 if (scm_is_eq (count, scm_from_int (0)))
804 scm_hashq_remove_x (scm_protects, obj);
805 else
806 SCM_SETCDR (handle, count);
807 }
808 protected_obj_count --;
809
810 SCM_CRITICAL_SECTION_END;
811
812 return obj;
813 }
814
815 void
816 scm_gc_register_root (SCM *p)
817 {
818 SCM handle;
819 SCM key = scm_from_ulong ((unsigned long) p);
820
821 /* This critical section barrier will be replaced by a mutex. */
822 SCM_CRITICAL_SECTION_START;
823
824 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key,
825 scm_from_int (0));
826 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
827
828 SCM_CRITICAL_SECTION_END;
829 }
830
831 void
832 scm_gc_unregister_root (SCM *p)
833 {
834 SCM handle;
835 SCM key = scm_from_ulong ((unsigned long) p);
836
837 /* This critical section barrier will be replaced by a mutex. */
838 SCM_CRITICAL_SECTION_START;
839
840 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
841
842 if (scm_is_false (handle))
843 {
844 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
845 abort ();
846 }
847 else
848 {
849 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
850 if (scm_is_eq (count, scm_from_int (0)))
851 scm_hashv_remove_x (scm_gc_registered_roots, key);
852 else
853 SCM_SETCDR (handle, count);
854 }
855
856 SCM_CRITICAL_SECTION_END;
857 }
858
859 void
860 scm_gc_register_roots (SCM *b, unsigned long n)
861 {
862 SCM *p = b;
863 for (; p < b + n; ++p)
864 scm_gc_register_root (p);
865 }
866
867 void
868 scm_gc_unregister_roots (SCM *b, unsigned long n)
869 {
870 SCM *p = b;
871 for (; p < b + n; ++p)
872 scm_gc_unregister_root (p);
873 }
874
875 int scm_i_terminating;
876
877 \f
878
879
880 /*
881 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
882 */
883
884 /* Get an integer from an environment variable. */
885 int
886 scm_getenv_int (const char *var, int def)
887 {
888 char *end = 0;
889 char *val = getenv (var);
890 long res = def;
891 if (!val)
892 return def;
893 res = strtol (val, &end, 10);
894 if (end == val)
895 return def;
896 return res;
897 }
898
899 void
900 scm_storage_prehistory ()
901 {
902 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
903 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
904 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
905 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
906 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
907 }
908
909 scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
910
911 int
912 scm_init_storage ()
913 {
914 size_t j;
915
916 j = SCM_NUM_PROTECTS;
917 while (j)
918 scm_sys_protects[--j] = SCM_BOOL_F;
919 scm_block_gc = 1;
920
921 scm_gc_init_freelist();
922 scm_gc_init_malloc ();
923
924 j = SCM_HEAP_SEG_SIZE;
925
926
927 /* Initialise the list of ports. */
928 scm_i_port_table = (scm_t_port **)
929 malloc (sizeof (scm_t_port *) * scm_i_port_table_room);
930 if (!scm_i_port_table)
931 return 1;
932
933 #if 0
934 /* We can't have a cleanup handler since we have no thread to run it
935 in. */
936
937 #ifdef HAVE_ATEXIT
938 atexit (cleanup);
939 #else
940 #ifdef HAVE_ON_EXIT
941 on_exit (cleanup, 0);
942 #endif
943 #endif
944
945 #endif
946
947 scm_stand_in_procs = scm_c_make_hash_table (257);
948 scm_permobjs = SCM_EOL;
949 scm_protects = scm_c_make_hash_table (31);
950 scm_gc_registered_roots = scm_c_make_hash_table (31);
951
952 return 0;
953 }
954
955 \f
956
957 SCM scm_after_gc_hook;
958
959 static SCM gc_async;
960
961 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
962 * is run after the gc, as soon as the asynchronous events are handled by the
963 * evaluator.
964 */
965 static SCM
966 gc_async_thunk (void)
967 {
968 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
969 return SCM_UNSPECIFIED;
970 }
971
972
973 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
974 * the garbage collection. The only purpose of this function is to mark the
975 * gc_async (which will eventually lead to the execution of the
976 * gc_async_thunk).
977 */
978 static void *
979 mark_gc_async (void * hook_data SCM_UNUSED,
980 void *func_data SCM_UNUSED,
981 void *data SCM_UNUSED)
982 {
983 /* If cell access debugging is enabled, the user may choose to perform
984 * additional garbage collections after an arbitrary number of cell
985 * accesses. We don't want the scheme level after-gc-hook to be performed
986 * for each of these garbage collections for the following reason: The
987 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
988 * after-gc-hook was performed with every gc, and if the gc was performed
989 * after a very small number of cell accesses, then the number of cell
990 * accesses during the execution of the after-gc-hook will suffice to cause
991 * the execution of the next gc. Then, guile would keep executing the
992 * after-gc-hook over and over again, and would never come to do other
993 * things.
994 *
995 * To overcome this problem, if cell access debugging with additional
996 * garbage collections is enabled, the after-gc-hook is never run by the
997 * garbage collecter. When running guile with cell access debugging and the
998 * execution of the after-gc-hook is desired, then it is necessary to run
999 * the hook explicitly from the user code. This has the effect, that from
1000 * the scheme level point of view it seems that garbage collection is
1001 * performed with a much lower frequency than it actually is. Obviously,
1002 * this will not work for code that depends on a fixed one to one
1003 * relationship between the execution counts of the C level garbage
1004 * collection hooks and the execution count of the scheme level
1005 * after-gc-hook.
1006 */
1007
1008 #if (SCM_DEBUG_CELL_ACCESSES == 1)
1009 if (scm_debug_cells_gc_interval == 0)
1010 scm_system_async_mark (gc_async);
1011 #else
1012 scm_system_async_mark (gc_async);
1013 #endif
1014
1015 return NULL;
1016 }
1017
1018 void
1019 scm_init_gc ()
1020 {
1021 scm_gc_init_mark ();
1022
1023 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
1024 scm_c_define ("after-gc-hook", scm_after_gc_hook);
1025
1026 gc_async = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
1027 gc_async_thunk);
1028
1029 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
1030
1031 #include "libguile/gc.x"
1032 }
1033
1034
1035 void
1036 scm_gc_sweep (void)
1037 #define FUNC_NAME "scm_gc_sweep"
1038 {
1039 scm_i_deprecated_memory_return = 0;
1040
1041 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist);
1042 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2);
1043
1044 /*
1045 NOTHING HERE: LAZY SWEEPING !
1046 */
1047 scm_i_reset_segments ();
1048
1049 /* When we move to POSIX threads private freelists should probably
1050 be GC-protected instead. */
1051 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
1052 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
1053
1054 /* Invalidate the freelists of other threads. */
1055 scm_i_thread_invalidate_freelists ();
1056 }
1057
1058 #undef FUNC_NAME
1059
1060
1061
1062 /*
1063 Local Variables:
1064 c-file-style: "gnu"
1065 End:
1066 */