382022c82da8b0dcbca17c504040efb6b3103e81
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 #define _GNU_SOURCE
19
20 /* #define DEBUGINFO */
21
22 #if HAVE_CONFIG_H
23 # include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <errno.h>
28 #include <string.h>
29 #include <assert.h>
30
31 #ifdef __ia64__
32 #include <ucontext.h>
33 extern unsigned long * __libc_ia64_register_backing_store_base;
34 #endif
35
36 #include "libguile/_scm.h"
37 #include "libguile/eval.h"
38 #include "libguile/stime.h"
39 #include "libguile/stackchk.h"
40 #include "libguile/struct.h"
41 #include "libguile/smob.h"
42 #include "libguile/unif.h"
43 #include "libguile/async.h"
44 #include "libguile/ports.h"
45 #include "libguile/root.h"
46 #include "libguile/strings.h"
47 #include "libguile/vectors.h"
48 #include "libguile/weaks.h"
49 #include "libguile/hashtab.h"
50 #include "libguile/tags.h"
51
52 #include "libguile/private-gc.h"
53 #include "libguile/validate.h"
54 #include "libguile/deprecation.h"
55 #include "libguile/gc.h"
56 #include "libguile/dynwind.h"
57
58 #ifdef GUILE_DEBUG_MALLOC
59 #include "libguile/debug-malloc.h"
60 #endif
61
62 #ifdef HAVE_MALLOC_H
63 #include <malloc.h>
64 #endif
65
66 #ifdef HAVE_UNISTD_H
67 #include <unistd.h>
68 #endif
69
70 /* Lock this mutex before doing lazy sweeping.
71 */
72 scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
73
74 /* Set this to != 0 if every cell that is accessed shall be checked:
75 */
76 int scm_debug_cell_accesses_p = 0;
77 int scm_expensive_debug_cell_accesses_p = 0;
78
79 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
80 * the number of cell accesses after which a gc shall be called.
81 */
82 int scm_debug_cells_gc_interval = 0;
83
84 /*
85 Global variable, so you can switch it off at runtime by setting
86 scm_i_cell_validation_already_running.
87 */
88 int scm_i_cell_validation_already_running ;
89
90 #if (SCM_DEBUG_CELL_ACCESSES == 1)
91
92
93 /*
94
95 Assert that the given object is a valid reference to a valid cell. This
96 test involves to determine whether the object is a cell pointer, whether
97 this pointer actually points into a heap segment and whether the cell
98 pointed to is not a free cell. Further, additional garbage collections may
99 get executed after a user defined number of cell accesses. This helps to
100 find places in the C code where references are dropped for extremely short
101 periods.
102
103 */
104 void
105 scm_i_expensive_validation_check (SCM cell)
106 {
107 if (!scm_in_heap_p (cell))
108 {
109 fprintf (stderr, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
110 (unsigned long) SCM_UNPACK (cell));
111 abort ();
112 }
113
114 /* If desired, perform additional garbage collections after a user
115 * defined number of cell accesses.
116 */
117 if (scm_debug_cells_gc_interval)
118 {
119 static unsigned int counter = 0;
120
121 if (counter != 0)
122 {
123 --counter;
124 }
125 else
126 {
127 counter = scm_debug_cells_gc_interval;
128 scm_gc ();
129 }
130 }
131 }
132
133 void
134 scm_assert_cell_valid (SCM cell)
135 {
136 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
137 {
138 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
139
140 /*
141 During GC, no user-code should be run, and the guile core
142 should use non-protected accessors.
143 */
144 if (scm_gc_running_p)
145 return;
146
147 /*
148 Only scm_in_heap_p and rescanning the heap is wildly
149 expensive.
150 */
151 if (scm_expensive_debug_cell_accesses_p)
152 scm_i_expensive_validation_check (cell);
153
154 if (!SCM_GC_MARK_P (cell))
155 {
156 fprintf (stderr,
157 "scm_assert_cell_valid: this object is unmarked. \n"
158 "It has been garbage-collected in the last GC run: "
159 "%lux\n",
160 (unsigned long) SCM_UNPACK (cell));
161 abort ();
162 }
163
164 scm_i_cell_validation_already_running = 0; /* re-enable */
165 }
166 }
167
168
169
170 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
171 (SCM flag),
172 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
173 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
174 "but no additional calls to garbage collection are issued.\n"
175 "If @var{flag} is a number, strict cell access checking is enabled,\n"
176 "with an additional garbage collection after the given\n"
177 "number of cell accesses.\n"
178 "This procedure only exists when the compile-time flag\n"
179 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
180 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
181 {
182 if (scm_is_false (flag))
183 {
184 scm_debug_cell_accesses_p = 0;
185 }
186 else if (scm_is_eq (flag, SCM_BOOL_T))
187 {
188 scm_debug_cells_gc_interval = 0;
189 scm_debug_cell_accesses_p = 1;
190 scm_expensive_debug_cell_accesses_p = 0;
191 }
192 else
193 {
194 scm_debug_cells_gc_interval = scm_to_signed_integer (flag, 0, INT_MAX);
195 scm_debug_cell_accesses_p = 1;
196 scm_expensive_debug_cell_accesses_p = 1;
197 }
198 return SCM_UNSPECIFIED;
199 }
200 #undef FUNC_NAME
201
202
203 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
204
205 \f
206
207
208 /* scm_mtrigger
209 * is the number of bytes of malloc allocation needed to trigger gc.
210 */
211 unsigned long scm_mtrigger;
212
213 /* During collection, this accumulates objects holding
214 * weak references.
215 */
216 SCM scm_weak_vectors;
217
218 /* GC Statistics Keeping
219 */
220 unsigned long scm_cells_allocated = 0;
221 unsigned long scm_mallocated = 0;
222 unsigned long scm_gc_cells_collected;
223 unsigned long scm_gc_cells_collected_1 = 0; /* previous GC yield */
224 unsigned long scm_gc_malloc_collected;
225 unsigned long scm_gc_ports_collected;
226 unsigned long scm_gc_time_taken = 0;
227 static unsigned long t_before_gc;
228 unsigned long scm_gc_mark_time_taken = 0;
229 unsigned long scm_gc_times = 0;
230 unsigned long scm_gc_cells_swept = 0;
231 double scm_gc_cells_marked_acc = 0.;
232 double scm_gc_cells_swept_acc = 0.;
233 int scm_gc_cell_yield_percentage =0;
234 int scm_gc_malloc_yield_percentage = 0;
235 unsigned long protected_obj_count = 0;
236
237
238 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
239 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
240 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
241 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
242 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
243 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
244 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
245 SCM_SYMBOL (sym_times, "gc-times");
246 SCM_SYMBOL (sym_cells_marked, "cells-marked");
247 SCM_SYMBOL (sym_cells_swept, "cells-swept");
248 SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
249 SCM_SYMBOL (sym_cell_yield, "cell-yield");
250 SCM_SYMBOL (sym_protected_objects, "protected-objects");
251
252
253
254
255 /* Number of calls to SCM_NEWCELL since startup. */
256 unsigned scm_newcell_count;
257 unsigned scm_newcell2_count;
258
259
260 /* {Scheme Interface to GC}
261 */
262 static SCM
263 tag_table_to_type_alist (void *closure, SCM key, SCM val, SCM acc)
264 {
265 if (scm_is_integer (key))
266 {
267 scm_t_bits c_tag = scm_to_int (key);
268
269 char const * name = scm_i_tag_name (c_tag);
270 if (name != NULL)
271 {
272 key = scm_from_locale_string (name);
273 }
274 else
275 {
276 char s[100];
277 sprintf (s, "tag %d", c_tag);
278 key = scm_from_locale_string (s);
279 }
280 }
281
282 return scm_cons (scm_cons (key, val), acc);
283 }
284
285 SCM_DEFINE (scm_gc_live_object_stats, "gc-live-object-stats", 0, 0, 0,
286 (),
287 "Return an alist of statistics of the current live objects. ")
288 #define FUNC_NAME s_scm_gc_live_object_stats
289 {
290 SCM tab = scm_make_hash_table (scm_from_int (57));
291 SCM alist;
292
293 scm_i_all_segments_statistics (tab);
294
295 alist
296 = scm_internal_hash_fold (&tag_table_to_type_alist, NULL, SCM_EOL, tab);
297
298 return alist;
299 }
300 #undef FUNC_NAME
301
302 extern int scm_gc_malloc_yield_percentage;
303 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
304 (),
305 "Return an association list of statistics about Guile's current\n"
306 "use of storage.\n")
307 #define FUNC_NAME s_scm_gc_stats
308 {
309 long i = 0;
310 SCM heap_segs = SCM_EOL ;
311 unsigned long int local_scm_mtrigger;
312 unsigned long int local_scm_mallocated;
313 unsigned long int local_scm_heap_size;
314 int local_scm_gc_cell_yield_percentage;
315 int local_scm_gc_malloc_yield_percentage;
316 unsigned long int local_scm_cells_allocated;
317 unsigned long int local_scm_gc_time_taken;
318 unsigned long int local_scm_gc_times;
319 unsigned long int local_scm_gc_mark_time_taken;
320 unsigned long int local_protected_obj_count;
321 double local_scm_gc_cells_swept;
322 double local_scm_gc_cells_marked;
323 SCM answer;
324 unsigned long *bounds = 0;
325 int table_size = scm_i_heap_segment_table_size;
326 SCM_CRITICAL_SECTION_START;
327
328 /*
329 temporarily store the numbers, so as not to cause GC.
330 */
331
332 bounds = malloc (sizeof (int) * table_size * 2);
333 if (!bounds)
334 abort();
335 for (i = table_size; i--; )
336 {
337 bounds[2*i] = (unsigned long)scm_i_heap_segment_table[i]->bounds[0];
338 bounds[2*i+1] = (unsigned long)scm_i_heap_segment_table[i]->bounds[1];
339 }
340
341
342 /* Below, we cons to produce the resulting list. We want a snapshot of
343 * the heap situation before consing.
344 */
345 local_scm_mtrigger = scm_mtrigger;
346 local_scm_mallocated = scm_mallocated;
347 local_scm_heap_size = SCM_HEAP_SIZE;
348
349 local_scm_cells_allocated = scm_cells_allocated;
350
351 local_scm_gc_time_taken = scm_gc_time_taken;
352 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
353 local_scm_gc_times = scm_gc_times;
354 local_scm_gc_malloc_yield_percentage = scm_gc_malloc_yield_percentage;
355 local_scm_gc_cell_yield_percentage= scm_gc_cell_yield_percentage;
356 local_protected_obj_count = protected_obj_count;
357 local_scm_gc_cells_swept =
358 (double) scm_gc_cells_swept_acc
359 + (double) scm_gc_cells_swept;
360 local_scm_gc_cells_marked = scm_gc_cells_marked_acc
361 +(double) scm_gc_cells_swept
362 -(double) scm_gc_cells_collected;
363
364 for (i = table_size; i--;)
365 {
366 heap_segs = scm_cons (scm_cons (scm_from_ulong (bounds[2*i]),
367 scm_from_ulong (bounds[2*i+1])),
368 heap_segs);
369 }
370 /* njrev: can any of these scm_cons's or scm_list_n signal a memory
371 error? If so we need a frame here. */
372 answer =
373 scm_list_n (scm_cons (sym_gc_time_taken,
374 scm_from_ulong (local_scm_gc_time_taken)),
375 scm_cons (sym_cells_allocated,
376 scm_from_ulong (local_scm_cells_allocated)),
377 scm_cons (sym_heap_size,
378 scm_from_ulong (local_scm_heap_size)),
379 scm_cons (sym_mallocated,
380 scm_from_ulong (local_scm_mallocated)),
381 scm_cons (sym_mtrigger,
382 scm_from_ulong (local_scm_mtrigger)),
383 scm_cons (sym_times,
384 scm_from_ulong (local_scm_gc_times)),
385 scm_cons (sym_gc_mark_time_taken,
386 scm_from_ulong (local_scm_gc_mark_time_taken)),
387 scm_cons (sym_cells_marked,
388 scm_from_double (local_scm_gc_cells_marked)),
389 scm_cons (sym_cells_swept,
390 scm_from_double (local_scm_gc_cells_swept)),
391 scm_cons (sym_malloc_yield,
392 scm_from_long(local_scm_gc_malloc_yield_percentage)),
393 scm_cons (sym_cell_yield,
394 scm_from_long (local_scm_gc_cell_yield_percentage)),
395 scm_cons (sym_protected_objects,
396 scm_from_ulong (local_protected_obj_count)),
397 scm_cons (sym_heap_segments, heap_segs),
398 SCM_UNDEFINED);
399 SCM_CRITICAL_SECTION_END;
400
401 free (bounds);
402 return answer;
403 }
404 #undef FUNC_NAME
405
406 static void
407 gc_start_stats (const char *what SCM_UNUSED)
408 {
409 t_before_gc = scm_c_get_internal_run_time ();
410
411 scm_gc_cells_marked_acc += (double) scm_gc_cells_swept
412 - (double) scm_gc_cells_collected;
413 scm_gc_cells_swept_acc += (double) scm_gc_cells_swept;
414
415 scm_gc_cell_yield_percentage = ( scm_gc_cells_collected * 100 ) / SCM_HEAP_SIZE;
416
417 scm_gc_cells_swept = 0;
418 scm_gc_cells_collected_1 = scm_gc_cells_collected;
419
420 /*
421 CELLS SWEPT is another word for the number of cells that were
422 examined during GC. YIELD is the number that we cleaned
423 out. MARKED is the number that weren't cleaned.
424 */
425 scm_gc_cells_collected = 0;
426 scm_gc_malloc_collected = 0;
427 scm_gc_ports_collected = 0;
428 }
429
430 static void
431 gc_end_stats ()
432 {
433 unsigned long t = scm_c_get_internal_run_time ();
434 scm_gc_time_taken += (t - t_before_gc);
435
436 ++scm_gc_times;
437 }
438
439
440 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
441 (SCM obj),
442 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
443 "returned by this function for @var{obj}")
444 #define FUNC_NAME s_scm_object_address
445 {
446 return scm_from_ulong (SCM_UNPACK (obj));
447 }
448 #undef FUNC_NAME
449
450
451 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
452 (),
453 "Scans all of SCM objects and reclaims for further use those that are\n"
454 "no longer accessible.")
455 #define FUNC_NAME s_scm_gc
456 {
457 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
458 scm_gc_running_p = 1;
459 scm_i_gc ("call");
460 /* njrev: It looks as though other places, e.g. scm_realloc,
461 can call scm_i_gc without acquiring the sweep mutex. Does this
462 matter? Also scm_i_gc (or its descendants) touch the
463 scm_sys_protects, which are protected in some cases
464 (e.g. scm_permobjs above in scm_gc_stats) by a critical section,
465 not by the sweep mutex. Shouldn't all the GC-relevant objects be
466 protected in the same way? */
467 scm_gc_running_p = 0;
468 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
469 scm_c_hook_run (&scm_after_gc_c_hook, 0);
470 return SCM_UNSPECIFIED;
471 }
472 #undef FUNC_NAME
473
474
475 \f
476
477 /* The master is global and common while the freelist will be
478 * individual for each thread.
479 */
480
481 SCM
482 scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
483 {
484 SCM cell;
485 int did_gc = 0;
486
487 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
488 scm_gc_running_p = 1;
489
490 *free_cells = scm_i_sweep_some_segments (freelist);
491 if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
492 {
493 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
494 *free_cells = scm_i_sweep_some_segments (freelist);
495 }
496
497 if (*free_cells == SCM_EOL)
498 {
499 /*
500 with the advent of lazy sweep, GC yield is only known just
501 before doing the GC.
502 */
503 scm_i_adjust_min_yield (freelist);
504
505 /*
506 out of fresh cells. Try to get some new ones.
507 */
508
509 did_gc = 1;
510 scm_i_gc ("cells");
511
512 *free_cells = scm_i_sweep_some_segments (freelist);
513 }
514
515 if (*free_cells == SCM_EOL)
516 {
517 /*
518 failed getting new cells. Get new juice or die.
519 */
520 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
521 *free_cells = scm_i_sweep_some_segments (freelist);
522 }
523
524 if (*free_cells == SCM_EOL)
525 abort ();
526
527 cell = *free_cells;
528
529 *free_cells = SCM_FREE_CELL_CDR (cell);
530
531 scm_gc_running_p = 0;
532 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
533
534 if (did_gc)
535 scm_c_hook_run (&scm_after_gc_c_hook, 0);
536
537 return cell;
538 }
539
540
541 scm_t_c_hook scm_before_gc_c_hook;
542 scm_t_c_hook scm_before_mark_c_hook;
543 scm_t_c_hook scm_before_sweep_c_hook;
544 scm_t_c_hook scm_after_sweep_c_hook;
545 scm_t_c_hook scm_after_gc_c_hook;
546
547 /* Must be called while holding scm_i_sweep_mutex.
548 */
549
550 void
551 scm_i_gc (const char *what)
552 {
553 scm_i_thread_put_to_sleep ();
554
555 scm_c_hook_run (&scm_before_gc_c_hook, 0);
556
557 #ifdef DEBUGINFO
558 fprintf (stderr,"gc reason %s\n", what);
559
560 fprintf (stderr,
561 scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist))
562 ? "*"
563 : (scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist2)) ? "o" : "m"));
564 #endif
565
566 gc_start_stats (what);
567
568 /*
569 Set freelists to NULL so scm_cons() always triggers gc, causing
570 the assertion above to fail.
571 */
572 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
573 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
574
575 /*
576 Let's finish the sweep. The conservative GC might point into the
577 garbage, and marking that would create a mess.
578 */
579 scm_i_sweep_all_segments("GC");
580 if (scm_mallocated < scm_i_deprecated_memory_return)
581 {
582 /* The byte count of allocated objects has underflowed. This is
583 probably because you forgot to report the sizes of objects you
584 have allocated, by calling scm_done_malloc or some such. When
585 the GC freed them, it subtracted their size from
586 scm_mallocated, which underflowed. */
587 fprintf (stderr,
588 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
589 "This is probably because the GC hasn't been correctly informed\n"
590 "about object sizes\n");
591 abort ();
592 }
593 scm_mallocated -= scm_i_deprecated_memory_return;
594
595
596 /* Mark */
597
598 scm_c_hook_run (&scm_before_mark_c_hook, 0);
599 scm_mark_all ();
600 scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
601
602 /* Sweep
603
604 TODO: the after_sweep hook should probably be moved to just before
605 the mark, since that's where the sweep is finished in lazy
606 sweeping.
607
608 MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
609 original meaning implied at least two things: that it would be
610 called when
611
612 1. the freelist is re-initialized (no evaluation possible, though)
613
614 and
615
616 2. the heap is "fresh"
617 (it is well-defined what data is used and what is not)
618
619 Neither of these conditions would hold just before the mark phase.
620
621 Of course, the lazy sweeping has muddled the distinction between
622 scm_before_sweep_c_hook and scm_after_sweep_c_hook, but even if
623 there were no difference, it would still be useful to have two
624 distinct classes of hook functions since this can prevent some
625 bad interference when several modules adds gc hooks.
626 */
627
628 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
629 scm_gc_sweep ();
630 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
631
632 gc_end_stats ();
633
634 scm_i_thread_wake_up ();
635
636 /*
637 For debugging purposes, you could do
638 scm_i_sweep_all_segments("debug"), but then the remains of the
639 cell aren't left to analyse.
640 */
641 }
642
643 \f
644 /* {GC Protection Helper Functions}
645 */
646
647
648 /*
649 * If within a function you need to protect one or more scheme objects from
650 * garbage collection, pass them as parameters to one of the
651 * scm_remember_upto_here* functions below. These functions don't do
652 * anything, but since the compiler does not know that they are actually
653 * no-ops, it will generate code that calls these functions with the given
654 * parameters. Therefore, you can be sure that the compiler will keep those
655 * scheme values alive (on the stack or in a register) up to the point where
656 * scm_remember_upto_here* is called. In other words, place the call to
657 * scm_remember_upto_here* _behind_ the last code in your function, that
658 * depends on the scheme object to exist.
659 *
660 * Example: We want to make sure that the string object str does not get
661 * garbage collected during the execution of 'some_function' in the code
662 * below, because otherwise the characters belonging to str would be freed and
663 * 'some_function' might access freed memory. To make sure that the compiler
664 * keeps str alive on the stack or in a register such that it is visible to
665 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
666 * call to 'some_function'. Note that this would not be necessary if str was
667 * used anyway after the call to 'some_function'.
668 * char *chars = scm_i_string_chars (str);
669 * some_function (chars);
670 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
671 */
672
673 /* Remove any macro versions of these while defining the functions.
674 Functions are always included in the library, for upward binary
675 compatibility and in case combinations of GCC and non-GCC are used. */
676 #undef scm_remember_upto_here_1
677 #undef scm_remember_upto_here_2
678
679 void
680 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
681 {
682 /* Empty. Protects a single object from garbage collection. */
683 }
684
685 void
686 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
687 {
688 /* Empty. Protects two objects from garbage collection. */
689 }
690
691 void
692 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
693 {
694 /* Empty. Protects any number of objects from garbage collection. */
695 }
696
697 /*
698 These crazy functions prevent garbage collection
699 of arguments after the first argument by
700 ensuring they remain live throughout the
701 function because they are used in the last
702 line of the code block.
703 It'd be better to have a nice compiler hint to
704 aid the conservative stack-scanning GC. --03/09/00 gjb */
705 SCM
706 scm_return_first (SCM elt, ...)
707 {
708 return elt;
709 }
710
711 int
712 scm_return_first_int (int i, ...)
713 {
714 return i;
715 }
716
717
718 SCM
719 scm_permanent_object (SCM obj)
720 {
721 SCM cell = scm_cons (obj, SCM_EOL);
722 SCM_CRITICAL_SECTION_START;
723 SCM_SETCDR (cell, scm_permobjs);
724 scm_permobjs = cell;
725 SCM_CRITICAL_SECTION_END;
726 return obj;
727 }
728
729
730 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
731 other references are dropped, until the object is unprotected by calling
732 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
733 i. e. it is possible to protect the same object several times, but it is
734 necessary to unprotect the object the same number of times to actually get
735 the object unprotected. It is an error to unprotect an object more often
736 than it has been protected before. The function scm_protect_object returns
737 OBJ.
738 */
739
740 /* Implementation note: For every object X, there is a counter which
741 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
742 */
743
744
745
746 SCM
747 scm_gc_protect_object (SCM obj)
748 {
749 SCM handle;
750
751 /* This critical section barrier will be replaced by a mutex. */
752 /* njrev: Indeed; if my comment above is correct, there is the same
753 critsec/mutex inconsistency here. */
754 SCM_CRITICAL_SECTION_START;
755
756 handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
757 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
758
759 protected_obj_count ++;
760
761 SCM_CRITICAL_SECTION_END;
762
763 return obj;
764 }
765
766
767 /* Remove any protection for OBJ established by a prior call to
768 scm_protect_object. This function returns OBJ.
769
770 See scm_protect_object for more information. */
771 SCM
772 scm_gc_unprotect_object (SCM obj)
773 {
774 SCM handle;
775
776 /* This critical section barrier will be replaced by a mutex. */
777 /* njrev: and again. */
778 SCM_CRITICAL_SECTION_START;
779
780 if (scm_gc_running_p)
781 {
782 fprintf (stderr, "scm_unprotect_object called during GC.\n");
783 abort ();
784 }
785
786 handle = scm_hashq_get_handle (scm_protects, obj);
787
788 if (scm_is_false (handle))
789 {
790 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
791 abort ();
792 }
793 else
794 {
795 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
796 if (scm_is_eq (count, scm_from_int (0)))
797 scm_hashq_remove_x (scm_protects, obj);
798 else
799 SCM_SETCDR (handle, count);
800 }
801 protected_obj_count --;
802
803 SCM_CRITICAL_SECTION_END;
804
805 return obj;
806 }
807
808 void
809 scm_gc_register_root (SCM *p)
810 {
811 SCM handle;
812 SCM key = scm_from_ulong ((unsigned long) p);
813
814 /* This critical section barrier will be replaced by a mutex. */
815 /* njrev: and again. */
816 SCM_CRITICAL_SECTION_START;
817
818 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key,
819 scm_from_int (0));
820 /* njrev: note also that the above can probably signal an error */
821 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
822
823 SCM_CRITICAL_SECTION_END;
824 }
825
826 void
827 scm_gc_unregister_root (SCM *p)
828 {
829 SCM handle;
830 SCM key = scm_from_ulong ((unsigned long) p);
831
832 /* This critical section barrier will be replaced by a mutex. */
833 /* njrev: and again. */
834 SCM_CRITICAL_SECTION_START;
835
836 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
837
838 if (scm_is_false (handle))
839 {
840 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
841 abort ();
842 }
843 else
844 {
845 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
846 if (scm_is_eq (count, scm_from_int (0)))
847 scm_hashv_remove_x (scm_gc_registered_roots, key);
848 else
849 SCM_SETCDR (handle, count);
850 }
851
852 SCM_CRITICAL_SECTION_END;
853 }
854
855 void
856 scm_gc_register_roots (SCM *b, unsigned long n)
857 {
858 SCM *p = b;
859 for (; p < b + n; ++p)
860 scm_gc_register_root (p);
861 }
862
863 void
864 scm_gc_unregister_roots (SCM *b, unsigned long n)
865 {
866 SCM *p = b;
867 for (; p < b + n; ++p)
868 scm_gc_unregister_root (p);
869 }
870
871 int scm_i_terminating;
872
873 \f
874
875
876 /*
877 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
878 */
879
880 /* Get an integer from an environment variable. */
881 int
882 scm_getenv_int (const char *var, int def)
883 {
884 char *end = 0;
885 char *val = getenv (var);
886 long res = def;
887 if (!val)
888 return def;
889 res = strtol (val, &end, 10);
890 if (end == val)
891 return def;
892 return res;
893 }
894
895 void
896 scm_storage_prehistory ()
897 {
898 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
899 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
900 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
901 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
902 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
903 }
904
905 scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
906
907 int
908 scm_init_storage ()
909 {
910 size_t j;
911
912 j = SCM_NUM_PROTECTS;
913 while (j)
914 scm_sys_protects[--j] = SCM_BOOL_F;
915
916 scm_gc_init_freelist();
917 scm_gc_init_malloc ();
918
919 j = SCM_HEAP_SEG_SIZE;
920
921
922 /* Initialise the list of ports. */
923 scm_i_port_table = (scm_t_port **)
924 malloc (sizeof (scm_t_port *) * scm_i_port_table_room);
925 if (!scm_i_port_table)
926 return 1;
927
928 #if 0
929 /* We can't have a cleanup handler since we have no thread to run it
930 in. */
931
932 #ifdef HAVE_ATEXIT
933 atexit (cleanup);
934 #else
935 #ifdef HAVE_ON_EXIT
936 on_exit (cleanup, 0);
937 #endif
938 #endif
939
940 #endif
941
942 scm_stand_in_procs = scm_make_weak_key_hash_table (scm_from_int (257));
943 scm_permobjs = SCM_EOL;
944 scm_protects = scm_c_make_hash_table (31);
945 scm_gc_registered_roots = scm_c_make_hash_table (31);
946
947 return 0;
948 }
949
950 \f
951
952 SCM scm_after_gc_hook;
953
954 static SCM gc_async;
955
956 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
957 * is run after the gc, as soon as the asynchronous events are handled by the
958 * evaluator.
959 */
960 static SCM
961 gc_async_thunk (void)
962 {
963 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
964 return SCM_UNSPECIFIED;
965 }
966
967
968 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
969 * the garbage collection. The only purpose of this function is to mark the
970 * gc_async (which will eventually lead to the execution of the
971 * gc_async_thunk).
972 */
973 static void *
974 mark_gc_async (void * hook_data SCM_UNUSED,
975 void *func_data SCM_UNUSED,
976 void *data SCM_UNUSED)
977 {
978 /* If cell access debugging is enabled, the user may choose to perform
979 * additional garbage collections after an arbitrary number of cell
980 * accesses. We don't want the scheme level after-gc-hook to be performed
981 * for each of these garbage collections for the following reason: The
982 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
983 * after-gc-hook was performed with every gc, and if the gc was performed
984 * after a very small number of cell accesses, then the number of cell
985 * accesses during the execution of the after-gc-hook will suffice to cause
986 * the execution of the next gc. Then, guile would keep executing the
987 * after-gc-hook over and over again, and would never come to do other
988 * things.
989 *
990 * To overcome this problem, if cell access debugging with additional
991 * garbage collections is enabled, the after-gc-hook is never run by the
992 * garbage collecter. When running guile with cell access debugging and the
993 * execution of the after-gc-hook is desired, then it is necessary to run
994 * the hook explicitly from the user code. This has the effect, that from
995 * the scheme level point of view it seems that garbage collection is
996 * performed with a much lower frequency than it actually is. Obviously,
997 * this will not work for code that depends on a fixed one to one
998 * relationship between the execution counts of the C level garbage
999 * collection hooks and the execution count of the scheme level
1000 * after-gc-hook.
1001 */
1002
1003 #if (SCM_DEBUG_CELL_ACCESSES == 1)
1004 if (scm_debug_cells_gc_interval == 0)
1005 scm_system_async_mark (gc_async);
1006 #else
1007 scm_system_async_mark (gc_async);
1008 #endif
1009
1010 return NULL;
1011 }
1012
1013 void
1014 scm_init_gc ()
1015 {
1016 scm_gc_init_mark ();
1017
1018 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
1019 scm_c_define ("after-gc-hook", scm_after_gc_hook);
1020
1021 gc_async = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
1022 gc_async_thunk);
1023
1024 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
1025
1026 #include "libguile/gc.x"
1027 }
1028
1029
1030 void
1031 scm_gc_sweep (void)
1032 #define FUNC_NAME "scm_gc_sweep"
1033 {
1034 scm_i_deprecated_memory_return = 0;
1035
1036 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist);
1037 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2);
1038
1039 /*
1040 NOTHING HERE: LAZY SWEEPING !
1041 */
1042 scm_i_reset_segments ();
1043
1044 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
1045 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
1046
1047 /* Invalidate the freelists of other threads. */
1048 scm_i_thread_invalidate_freelists ();
1049 }
1050
1051 #undef FUNC_NAME
1052
1053
1054
1055 /*
1056 Local Variables:
1057 c-file-style: "gnu"
1058 End:
1059 */