Include min-yields in gc-stats output.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 #define _GNU_SOURCE
19
20 /* #define DEBUGINFO */
21
22 #if HAVE_CONFIG_H
23 # include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <errno.h>
28 #include <string.h>
29 #include <assert.h>
30
31 #include "libguile/_scm.h"
32 #include "libguile/eval.h"
33 #include "libguile/stime.h"
34 #include "libguile/stackchk.h"
35 #include "libguile/struct.h"
36 #include "libguile/smob.h"
37 #include "libguile/unif.h"
38 #include "libguile/async.h"
39 #include "libguile/ports.h"
40 #include "libguile/root.h"
41 #include "libguile/strings.h"
42 #include "libguile/vectors.h"
43 #include "libguile/weaks.h"
44 #include "libguile/hashtab.h"
45 #include "libguile/tags.h"
46
47 #include "libguile/private-gc.h"
48 #include "libguile/validate.h"
49 #include "libguile/deprecation.h"
50 #include "libguile/gc.h"
51 #include "libguile/dynwind.h"
52
53 #ifdef GUILE_DEBUG_MALLOC
54 #include "libguile/debug-malloc.h"
55 #endif
56
57 #ifdef HAVE_MALLOC_H
58 #include <malloc.h>
59 #endif
60
61 #ifdef HAVE_UNISTD_H
62 #include <unistd.h>
63 #endif
64
65 /* Lock this mutex before doing lazy sweeping.
66 */
67 scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
68
69 /* Set this to != 0 if every cell that is accessed shall be checked:
70 */
71 int scm_debug_cell_accesses_p = 0;
72 int scm_expensive_debug_cell_accesses_p = 0;
73
74 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
75 * the number of cell accesses after which a gc shall be called.
76 */
77 int scm_debug_cells_gc_interval = 0;
78
79 /*
80 Global variable, so you can switch it off at runtime by setting
81 scm_i_cell_validation_already_running.
82 */
83 int scm_i_cell_validation_already_running ;
84
85 #if (SCM_DEBUG_CELL_ACCESSES == 1)
86
87
88 /*
89
90 Assert that the given object is a valid reference to a valid cell. This
91 test involves to determine whether the object is a cell pointer, whether
92 this pointer actually points into a heap segment and whether the cell
93 pointed to is not a free cell. Further, additional garbage collections may
94 get executed after a user defined number of cell accesses. This helps to
95 find places in the C code where references are dropped for extremely short
96 periods.
97
98 */
99 void
100 scm_i_expensive_validation_check (SCM cell)
101 {
102 if (!scm_in_heap_p (cell))
103 {
104 fprintf (stderr, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
105 (unsigned long) SCM_UNPACK (cell));
106 abort ();
107 }
108
109 /* If desired, perform additional garbage collections after a user
110 * defined number of cell accesses.
111 */
112 if (scm_debug_cells_gc_interval)
113 {
114 static unsigned int counter = 0;
115
116 if (counter != 0)
117 {
118 --counter;
119 }
120 else
121 {
122 counter = scm_debug_cells_gc_interval;
123 scm_gc ();
124 }
125 }
126 }
127
128 void
129 scm_assert_cell_valid (SCM cell)
130 {
131 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
132 {
133 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
134
135 /*
136 During GC, no user-code should be run, and the guile core
137 should use non-protected accessors.
138 */
139 if (scm_gc_running_p)
140 return;
141
142 /*
143 Only scm_in_heap_p and rescanning the heap is wildly
144 expensive.
145 */
146 if (scm_expensive_debug_cell_accesses_p)
147 scm_i_expensive_validation_check (cell);
148
149 if (!SCM_GC_MARK_P (cell))
150 {
151 fprintf (stderr,
152 "scm_assert_cell_valid: this object is unmarked. \n"
153 "It has been garbage-collected in the last GC run: "
154 "%lux\n",
155 (unsigned long) SCM_UNPACK (cell));
156 abort ();
157 }
158
159 scm_i_cell_validation_already_running = 0; /* re-enable */
160 }
161 }
162
163
164
165 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
166 (SCM flag),
167 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
168 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
169 "but no additional calls to garbage collection are issued.\n"
170 "If @var{flag} is a number, strict cell access checking is enabled,\n"
171 "with an additional garbage collection after the given\n"
172 "number of cell accesses.\n"
173 "This procedure only exists when the compile-time flag\n"
174 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
175 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
176 {
177 if (scm_is_false (flag))
178 {
179 scm_debug_cell_accesses_p = 0;
180 }
181 else if (scm_is_eq (flag, SCM_BOOL_T))
182 {
183 scm_debug_cells_gc_interval = 0;
184 scm_debug_cell_accesses_p = 1;
185 scm_expensive_debug_cell_accesses_p = 0;
186 }
187 else
188 {
189 scm_debug_cells_gc_interval = scm_to_signed_integer (flag, 0, INT_MAX);
190 scm_debug_cell_accesses_p = 1;
191 scm_expensive_debug_cell_accesses_p = 1;
192 }
193 return SCM_UNSPECIFIED;
194 }
195 #undef FUNC_NAME
196
197
198 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
199
200 \f
201
202
203 /* scm_mtrigger
204 * is the number of bytes of malloc allocation needed to trigger gc.
205 */
206 unsigned long scm_mtrigger;
207
208 /* GC Statistics Keeping
209 */
210 unsigned long scm_cells_allocated = 0;
211 unsigned long scm_last_cells_allocated = 0;
212 unsigned long scm_mallocated = 0;
213
214 /* Global GC sweep statistics since the last full GC. */
215 static scm_t_sweep_statistics scm_i_gc_sweep_stats = { 0, 0 };
216 static scm_t_sweep_statistics scm_i_gc_sweep_stats_1 = { 0, 0 };
217
218 /* Total count of cells marked/swept. */
219 static double scm_gc_cells_marked_acc = 0.;
220 static double scm_gc_cells_swept_acc = 0.;
221 static double scm_gc_cells_allocated_acc = 0.;
222
223 static unsigned long scm_gc_time_taken = 0;
224 static unsigned long t_before_gc;
225 static unsigned long scm_gc_mark_time_taken = 0;
226
227 static unsigned long scm_gc_times = 0;
228
229 static int scm_gc_cell_yield_percentage = 0;
230 static unsigned long protected_obj_count = 0;
231
232 /* The following are accessed from `gc-malloc.c' and `gc-card.c'. */
233 int scm_gc_malloc_yield_percentage = 0;
234 unsigned long scm_gc_malloc_collected = 0;
235
236
237 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
238 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
239 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
240 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
241 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
242 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
243 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
244 SCM_SYMBOL (sym_times, "gc-times");
245 SCM_SYMBOL (sym_cells_marked, "cells-marked");
246 SCM_SYMBOL (sym_cells_swept, "cells-swept");
247 SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
248 SCM_SYMBOL (sym_cell_yield, "cell-yield");
249 SCM_SYMBOL (sym_min_cell_yield, "min-cell-yield");
250 SCM_SYMBOL (sym_min_double_cell_yield, "min-double-cell-yield");
251 SCM_SYMBOL (sym_protected_objects, "protected-objects");
252 SCM_SYMBOL (sym_total_cells_allocated, "total-cells-allocated");
253
254
255 /* Number of calls to SCM_NEWCELL since startup. */
256 unsigned scm_newcell_count;
257 unsigned scm_newcell2_count;
258
259
260 /* {Scheme Interface to GC}
261 */
262 static SCM
263 tag_table_to_type_alist (void *closure, SCM key, SCM val, SCM acc)
264 {
265 if (scm_is_integer (key))
266 {
267 int c_tag = scm_to_int (key);
268
269 char const * name = scm_i_tag_name (c_tag);
270 if (name != NULL)
271 {
272 key = scm_from_locale_string (name);
273 }
274 else
275 {
276 char s[100];
277 sprintf (s, "tag %d", c_tag);
278 key = scm_from_locale_string (s);
279 }
280 }
281
282 return scm_cons (scm_cons (key, val), acc);
283 }
284
285 SCM_DEFINE (scm_gc_live_object_stats, "gc-live-object-stats", 0, 0, 0,
286 (),
287 "Return an alist of statistics of the current live objects. ")
288 #define FUNC_NAME s_scm_gc_live_object_stats
289 {
290 SCM tab = scm_make_hash_table (scm_from_int (57));
291 SCM alist;
292
293 scm_i_all_segments_statistics (tab);
294
295 alist
296 = scm_internal_hash_fold (&tag_table_to_type_alist, NULL, SCM_EOL, tab);
297
298 return alist;
299 }
300 #undef FUNC_NAME
301
302 extern int scm_gc_malloc_yield_percentage;
303 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
304 (),
305 "Return an association list of statistics about Guile's current\n"
306 "use of storage.\n")
307 #define FUNC_NAME s_scm_gc_stats
308 {
309 long i = 0;
310 SCM heap_segs = SCM_EOL ;
311 unsigned long int local_scm_mtrigger;
312 unsigned long int local_scm_mallocated;
313 unsigned long int local_scm_heap_size;
314 int local_scm_gc_cell_yield_percentage;
315 int local_scm_gc_malloc_yield_percentage;
316 unsigned long int local_scm_cells_allocated;
317 unsigned long int local_scm_gc_time_taken;
318 unsigned long int local_scm_gc_times;
319 unsigned long int local_scm_gc_mark_time_taken;
320 unsigned long int local_protected_obj_count;
321 unsigned long int local_min_cell_yield;
322 unsigned long int local_min_double_cell_yield;
323 double local_scm_gc_cells_swept;
324 double local_scm_gc_cells_marked;
325 double local_scm_total_cells_allocated;
326 SCM answer;
327 unsigned long *bounds = 0;
328 int table_size = scm_i_heap_segment_table_size;
329 SCM_CRITICAL_SECTION_START;
330
331 /*
332 temporarily store the numbers, so as not to cause GC.
333 */
334 bounds = malloc (sizeof (unsigned long) * table_size * 2);
335 if (!bounds)
336 abort();
337 for (i = table_size; i--; )
338 {
339 bounds[2*i] = (unsigned long)scm_i_heap_segment_table[i]->bounds[0];
340 bounds[2*i+1] = (unsigned long)scm_i_heap_segment_table[i]->bounds[1];
341 }
342
343
344 /* Below, we cons to produce the resulting list. We want a snapshot of
345 * the heap situation before consing.
346 */
347 local_scm_mtrigger = scm_mtrigger;
348 local_scm_mallocated = scm_mallocated;
349 local_scm_heap_size = SCM_HEAP_SIZE;
350
351 local_scm_cells_allocated = scm_cells_allocated;
352 local_min_cell_yield = scm_i_master_freelist.min_yield;
353 local_min_double_cell_yield = scm_i_master_freelist2.min_yield;
354
355 local_scm_gc_time_taken = scm_gc_time_taken;
356 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
357 local_scm_gc_times = scm_gc_times;
358 local_scm_gc_malloc_yield_percentage = scm_gc_malloc_yield_percentage;
359 local_scm_gc_cell_yield_percentage= scm_gc_cell_yield_percentage;
360 local_protected_obj_count = protected_obj_count;
361 local_scm_gc_cells_swept =
362 (double) scm_gc_cells_swept_acc
363 + (double) scm_i_gc_sweep_stats.swept;
364 local_scm_gc_cells_marked = scm_gc_cells_marked_acc
365 +(double) scm_i_gc_sweep_stats.swept
366 -(double) scm_i_gc_sweep_stats.collected;
367
368 local_scm_total_cells_allocated = scm_gc_cells_allocated_acc
369 + (double) (scm_cells_allocated - scm_last_cells_allocated);
370
371 for (i = table_size; i--;)
372 {
373 heap_segs = scm_cons (scm_cons (scm_from_ulong (bounds[2*i]),
374 scm_from_ulong (bounds[2*i+1])),
375 heap_segs);
376 }
377 /* njrev: can any of these scm_cons's or scm_list_n signal a memory
378 error? If so we need a frame here. */
379 answer =
380 scm_list_n (scm_cons (sym_gc_time_taken,
381 scm_from_ulong (local_scm_gc_time_taken)),
382 scm_cons (sym_cells_allocated,
383 scm_from_ulong (local_scm_cells_allocated)),
384 scm_cons (sym_total_cells_allocated,
385 scm_from_double (local_scm_total_cells_allocated)),
386 scm_cons (sym_heap_size,
387 scm_from_ulong (local_scm_heap_size)),
388 scm_cons (sym_mallocated,
389 scm_from_ulong (local_scm_mallocated)),
390 scm_cons (sym_mtrigger,
391 scm_from_ulong (local_scm_mtrigger)),
392 scm_cons (sym_times,
393 scm_from_ulong (local_scm_gc_times)),
394 scm_cons (sym_gc_mark_time_taken,
395 scm_from_ulong (local_scm_gc_mark_time_taken)),
396 scm_cons (sym_cells_marked,
397 scm_from_double (local_scm_gc_cells_marked)),
398 scm_cons (sym_cells_swept,
399 scm_from_double (local_scm_gc_cells_swept)),
400 scm_cons (sym_malloc_yield,
401 scm_from_long(local_scm_gc_malloc_yield_percentage)),
402 scm_cons (sym_cell_yield,
403 scm_from_long (local_scm_gc_cell_yield_percentage)),
404 scm_cons (sym_protected_objects,
405 scm_from_ulong (local_protected_obj_count)),
406 scm_cons (sym_min_cell_yield,
407 scm_from_ulong (local_min_cell_yield)),
408 scm_cons (sym_min_double_cell_yield,
409 scm_from_ulong (local_min_double_cell_yield)),
410 scm_cons (sym_heap_segments, heap_segs),
411 SCM_UNDEFINED);
412 SCM_CRITICAL_SECTION_END;
413
414 free (bounds);
415 return answer;
416 }
417 #undef FUNC_NAME
418
419 /* Update the global sweeping/collection statistics by adding SWEEP_STATS to
420 SCM_I_GC_SWEEP_STATS and updating related variables. */
421 static inline void
422 gc_update_stats (scm_t_sweep_statistics sweep_stats)
423 {
424 /* CELLS SWEPT is another word for the number of cells that were examined
425 during GC. YIELD is the number that we cleaned out. MARKED is the number
426 that weren't cleaned. */
427
428 scm_gc_cell_yield_percentage = (sweep_stats.collected * 100) / SCM_HEAP_SIZE;
429
430 scm_i_sweep_statistics_sum (&scm_i_gc_sweep_stats, sweep_stats);
431
432 if ((scm_i_gc_sweep_stats.collected > scm_i_gc_sweep_stats.swept)
433 || (scm_cells_allocated < sweep_stats.collected))
434 {
435 printf ("internal GC error, please report to `"
436 PACKAGE_BUGREPORT "'\n");
437 abort ();
438 }
439
440 scm_gc_cells_allocated_acc +=
441 (double) (scm_cells_allocated - scm_last_cells_allocated);
442
443 scm_cells_allocated -= sweep_stats.collected;
444 scm_last_cells_allocated = scm_cells_allocated;
445 }
446
447 static void
448 gc_start_stats (const char *what SCM_UNUSED)
449 {
450 t_before_gc = scm_c_get_internal_run_time ();
451
452 scm_gc_malloc_collected = 0;
453 }
454
455 static void
456 gc_end_stats (scm_t_sweep_statistics sweep_stats)
457 {
458 unsigned long t = scm_c_get_internal_run_time ();
459
460 scm_gc_time_taken += (t - t_before_gc);
461
462 /* Reset the number of cells swept/collected since the last full GC. */
463 scm_i_gc_sweep_stats_1 = scm_i_gc_sweep_stats;
464 scm_i_gc_sweep_stats.collected = scm_i_gc_sweep_stats.swept = 0;
465
466 gc_update_stats (sweep_stats);
467
468 scm_gc_cells_marked_acc += (double) scm_i_gc_sweep_stats.swept
469 - (double) scm_i_gc_sweep_stats.collected;
470 scm_gc_cells_swept_acc += (double) scm_i_gc_sweep_stats.swept;
471
472 ++scm_gc_times;
473 }
474
475
476 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
477 (SCM obj),
478 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
479 "returned by this function for @var{obj}")
480 #define FUNC_NAME s_scm_object_address
481 {
482 return scm_from_ulong (SCM_UNPACK (obj));
483 }
484 #undef FUNC_NAME
485
486
487 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
488 (),
489 "Scans all of SCM objects and reclaims for further use those that are\n"
490 "no longer accessible.")
491 #define FUNC_NAME s_scm_gc
492 {
493 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
494 scm_gc_running_p = 1;
495 scm_i_gc ("call");
496 /* njrev: It looks as though other places, e.g. scm_realloc,
497 can call scm_i_gc without acquiring the sweep mutex. Does this
498 matter? Also scm_i_gc (or its descendants) touch the
499 scm_sys_protects, which are protected in some cases
500 (e.g. scm_permobjs above in scm_gc_stats) by a critical section,
501 not by the sweep mutex. Shouldn't all the GC-relevant objects be
502 protected in the same way? */
503 scm_gc_running_p = 0;
504 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
505 scm_c_hook_run (&scm_after_gc_c_hook, 0);
506 return SCM_UNSPECIFIED;
507 }
508 #undef FUNC_NAME
509
510
511 \f
512
513 /* The master is global and common while the freelist will be
514 * individual for each thread.
515 */
516
517 SCM
518 scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
519 {
520 SCM cell;
521 int did_gc = 0;
522 scm_t_sweep_statistics sweep_stats;
523
524 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
525 scm_gc_running_p = 1;
526
527 *free_cells = scm_i_sweep_some_segments (freelist, &sweep_stats);
528 gc_update_stats (sweep_stats);
529
530 if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
531 {
532 freelist->heap_segment_idx =
533 scm_i_get_new_heap_segment (freelist,
534 scm_i_gc_sweep_stats,
535 abort_on_error);
536
537 *free_cells = scm_i_sweep_some_segments (freelist, &sweep_stats);
538 gc_update_stats (sweep_stats);
539 }
540
541 if (*free_cells == SCM_EOL)
542 {
543 /*
544 with the advent of lazy sweep, GC yield is only known just
545 before doing the GC.
546 */
547 scm_i_adjust_min_yield (freelist,
548 scm_i_gc_sweep_stats,
549 scm_i_gc_sweep_stats_1);
550
551 /*
552 out of fresh cells. Try to get some new ones.
553 */
554 did_gc = 1;
555 scm_i_gc ("cells");
556
557 *free_cells = scm_i_sweep_some_segments (freelist, &sweep_stats);
558 gc_update_stats (sweep_stats);
559 }
560
561 if (*free_cells == SCM_EOL)
562 {
563 /*
564 failed getting new cells. Get new juice or die.
565 */
566 freelist->heap_segment_idx =
567 scm_i_get_new_heap_segment (freelist,
568 scm_i_gc_sweep_stats,
569 abort_on_error);
570
571 *free_cells = scm_i_sweep_some_segments (freelist, &sweep_stats);
572 gc_update_stats (sweep_stats);
573 }
574
575 if (*free_cells == SCM_EOL)
576 abort ();
577
578 cell = *free_cells;
579
580 *free_cells = SCM_FREE_CELL_CDR (cell);
581
582 scm_gc_running_p = 0;
583 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
584
585 if (did_gc)
586 scm_c_hook_run (&scm_after_gc_c_hook, 0);
587
588 return cell;
589 }
590
591
592 scm_t_c_hook scm_before_gc_c_hook;
593 scm_t_c_hook scm_before_mark_c_hook;
594 scm_t_c_hook scm_before_sweep_c_hook;
595 scm_t_c_hook scm_after_sweep_c_hook;
596 scm_t_c_hook scm_after_gc_c_hook;
597
598 /* Must be called while holding scm_i_sweep_mutex.
599 */
600
601 void
602 scm_i_gc (const char *what)
603 {
604 scm_t_sweep_statistics sweep_stats;
605
606 scm_i_thread_put_to_sleep ();
607
608 scm_c_hook_run (&scm_before_gc_c_hook, 0);
609
610 #ifdef DEBUGINFO
611 fprintf (stderr,"gc reason %s\n", what);
612
613 fprintf (stderr,
614 scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist))
615 ? "*"
616 : (scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist2)) ? "o" : "m"));
617 #endif
618
619 gc_start_stats (what);
620
621 /*
622 Set freelists to NULL so scm_cons() always triggers gc, causing
623 the assertion above to fail.
624 */
625 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
626 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
627
628 /*
629 Let's finish the sweep. The conservative GC might point into the
630 garbage, and marking that would create a mess.
631 */
632 scm_i_sweep_all_segments ("GC", &sweep_stats);
633
634 /* Invariant: the number of cells collected (i.e., freed) must always be
635 lower than or equal to the number of cells "swept" (i.e., visited). */
636 assert (sweep_stats.collected <= sweep_stats.swept);
637
638 if (scm_mallocated < scm_i_deprecated_memory_return)
639 {
640 /* The byte count of allocated objects has underflowed. This is
641 probably because you forgot to report the sizes of objects you
642 have allocated, by calling scm_done_malloc or some such. When
643 the GC freed them, it subtracted their size from
644 scm_mallocated, which underflowed. */
645 fprintf (stderr,
646 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
647 "This is probably because the GC hasn't been correctly informed\n"
648 "about object sizes\n");
649 abort ();
650 }
651 scm_mallocated -= scm_i_deprecated_memory_return;
652
653
654 /* Mark */
655
656 scm_c_hook_run (&scm_before_mark_c_hook, 0);
657 scm_mark_all ();
658 scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
659
660 /* Sweep
661
662 TODO: the after_sweep hook should probably be moved to just before
663 the mark, since that's where the sweep is finished in lazy
664 sweeping.
665
666 MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
667 original meaning implied at least two things: that it would be
668 called when
669
670 1. the freelist is re-initialized (no evaluation possible, though)
671
672 and
673
674 2. the heap is "fresh"
675 (it is well-defined what data is used and what is not)
676
677 Neither of these conditions would hold just before the mark phase.
678
679 Of course, the lazy sweeping has muddled the distinction between
680 scm_before_sweep_c_hook and scm_after_sweep_c_hook, but even if
681 there were no difference, it would still be useful to have two
682 distinct classes of hook functions since this can prevent some
683 bad interference when several modules adds gc hooks.
684 */
685
686 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
687 scm_gc_sweep ();
688 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
689
690 gc_end_stats (sweep_stats);
691
692 scm_i_thread_wake_up ();
693
694 /*
695 For debugging purposes, you could do
696 scm_i_sweep_all_segments("debug"), but then the remains of the
697 cell aren't left to analyse.
698 */
699 }
700
701
702 \f
703 /* {GC Protection Helper Functions}
704 */
705
706
707 /*
708 * If within a function you need to protect one or more scheme objects from
709 * garbage collection, pass them as parameters to one of the
710 * scm_remember_upto_here* functions below. These functions don't do
711 * anything, but since the compiler does not know that they are actually
712 * no-ops, it will generate code that calls these functions with the given
713 * parameters. Therefore, you can be sure that the compiler will keep those
714 * scheme values alive (on the stack or in a register) up to the point where
715 * scm_remember_upto_here* is called. In other words, place the call to
716 * scm_remember_upto_here* _behind_ the last code in your function, that
717 * depends on the scheme object to exist.
718 *
719 * Example: We want to make sure that the string object str does not get
720 * garbage collected during the execution of 'some_function' in the code
721 * below, because otherwise the characters belonging to str would be freed and
722 * 'some_function' might access freed memory. To make sure that the compiler
723 * keeps str alive on the stack or in a register such that it is visible to
724 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
725 * call to 'some_function'. Note that this would not be necessary if str was
726 * used anyway after the call to 'some_function'.
727 * char *chars = scm_i_string_chars (str);
728 * some_function (chars);
729 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
730 */
731
732 /* Remove any macro versions of these while defining the functions.
733 Functions are always included in the library, for upward binary
734 compatibility and in case combinations of GCC and non-GCC are used. */
735 #undef scm_remember_upto_here_1
736 #undef scm_remember_upto_here_2
737
738 void
739 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
740 {
741 /* Empty. Protects a single object from garbage collection. */
742 }
743
744 void
745 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
746 {
747 /* Empty. Protects two objects from garbage collection. */
748 }
749
750 void
751 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
752 {
753 /* Empty. Protects any number of objects from garbage collection. */
754 }
755
756 /*
757 These crazy functions prevent garbage collection
758 of arguments after the first argument by
759 ensuring they remain live throughout the
760 function because they are used in the last
761 line of the code block.
762 It'd be better to have a nice compiler hint to
763 aid the conservative stack-scanning GC. --03/09/00 gjb */
764 SCM
765 scm_return_first (SCM elt, ...)
766 {
767 return elt;
768 }
769
770 int
771 scm_return_first_int (int i, ...)
772 {
773 return i;
774 }
775
776
777 SCM
778 scm_permanent_object (SCM obj)
779 {
780 SCM cell = scm_cons (obj, SCM_EOL);
781 SCM_CRITICAL_SECTION_START;
782 SCM_SETCDR (cell, scm_permobjs);
783 scm_permobjs = cell;
784 SCM_CRITICAL_SECTION_END;
785 return obj;
786 }
787
788
789 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
790 other references are dropped, until the object is unprotected by calling
791 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
792 i. e. it is possible to protect the same object several times, but it is
793 necessary to unprotect the object the same number of times to actually get
794 the object unprotected. It is an error to unprotect an object more often
795 than it has been protected before. The function scm_protect_object returns
796 OBJ.
797 */
798
799 /* Implementation note: For every object X, there is a counter which
800 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
801 */
802
803
804
805 SCM
806 scm_gc_protect_object (SCM obj)
807 {
808 SCM handle;
809
810 /* This critical section barrier will be replaced by a mutex. */
811 /* njrev: Indeed; if my comment above is correct, there is the same
812 critsec/mutex inconsistency here. */
813 SCM_CRITICAL_SECTION_START;
814
815 handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
816 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
817
818 protected_obj_count ++;
819
820 SCM_CRITICAL_SECTION_END;
821
822 return obj;
823 }
824
825
826 /* Remove any protection for OBJ established by a prior call to
827 scm_protect_object. This function returns OBJ.
828
829 See scm_protect_object for more information. */
830 SCM
831 scm_gc_unprotect_object (SCM obj)
832 {
833 SCM handle;
834
835 /* This critical section barrier will be replaced by a mutex. */
836 /* njrev: and again. */
837 SCM_CRITICAL_SECTION_START;
838
839 if (scm_gc_running_p)
840 {
841 fprintf (stderr, "scm_unprotect_object called during GC.\n");
842 abort ();
843 }
844
845 handle = scm_hashq_get_handle (scm_protects, obj);
846
847 if (scm_is_false (handle))
848 {
849 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
850 abort ();
851 }
852 else
853 {
854 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
855 if (scm_is_eq (count, scm_from_int (0)))
856 scm_hashq_remove_x (scm_protects, obj);
857 else
858 SCM_SETCDR (handle, count);
859 }
860 protected_obj_count --;
861
862 SCM_CRITICAL_SECTION_END;
863
864 return obj;
865 }
866
867 void
868 scm_gc_register_root (SCM *p)
869 {
870 SCM handle;
871 SCM key = scm_from_ulong ((unsigned long) p);
872
873 /* This critical section barrier will be replaced by a mutex. */
874 /* njrev: and again. */
875 SCM_CRITICAL_SECTION_START;
876
877 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key,
878 scm_from_int (0));
879 /* njrev: note also that the above can probably signal an error */
880 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
881
882 SCM_CRITICAL_SECTION_END;
883 }
884
885 void
886 scm_gc_unregister_root (SCM *p)
887 {
888 SCM handle;
889 SCM key = scm_from_ulong ((unsigned long) p);
890
891 /* This critical section barrier will be replaced by a mutex. */
892 /* njrev: and again. */
893 SCM_CRITICAL_SECTION_START;
894
895 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
896
897 if (scm_is_false (handle))
898 {
899 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
900 abort ();
901 }
902 else
903 {
904 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
905 if (scm_is_eq (count, scm_from_int (0)))
906 scm_hashv_remove_x (scm_gc_registered_roots, key);
907 else
908 SCM_SETCDR (handle, count);
909 }
910
911 SCM_CRITICAL_SECTION_END;
912 }
913
914 void
915 scm_gc_register_roots (SCM *b, unsigned long n)
916 {
917 SCM *p = b;
918 for (; p < b + n; ++p)
919 scm_gc_register_root (p);
920 }
921
922 void
923 scm_gc_unregister_roots (SCM *b, unsigned long n)
924 {
925 SCM *p = b;
926 for (; p < b + n; ++p)
927 scm_gc_unregister_root (p);
928 }
929
930 int scm_i_terminating;
931
932 \f
933
934
935 /*
936 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
937 */
938
939 /* Get an integer from an environment variable. */
940 int
941 scm_getenv_int (const char *var, int def)
942 {
943 char *end = 0;
944 char *val = getenv (var);
945 long res = def;
946 if (!val)
947 return def;
948 res = strtol (val, &end, 10);
949 if (end == val)
950 return def;
951 return res;
952 }
953
954 void
955 scm_storage_prehistory ()
956 {
957 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
958 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
959 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
960 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
961 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
962 }
963
964 scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
965
966 int
967 scm_init_storage ()
968 {
969 size_t j;
970
971 j = SCM_NUM_PROTECTS;
972 while (j)
973 scm_sys_protects[--j] = SCM_BOOL_F;
974
975 scm_gc_init_freelist();
976 scm_gc_init_malloc ();
977
978 j = SCM_HEAP_SEG_SIZE;
979
980 #if 0
981 /* We can't have a cleanup handler since we have no thread to run it
982 in. */
983
984 #ifdef HAVE_ATEXIT
985 atexit (cleanup);
986 #else
987 #ifdef HAVE_ON_EXIT
988 on_exit (cleanup, 0);
989 #endif
990 #endif
991
992 #endif
993
994 scm_stand_in_procs = scm_make_weak_key_hash_table (scm_from_int (257));
995 scm_permobjs = SCM_EOL;
996 scm_protects = scm_c_make_hash_table (31);
997 scm_gc_registered_roots = scm_c_make_hash_table (31);
998
999 return 0;
1000 }
1001
1002 \f
1003
1004 SCM scm_after_gc_hook;
1005
1006 static SCM gc_async;
1007
1008 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
1009 * is run after the gc, as soon as the asynchronous events are handled by the
1010 * evaluator.
1011 */
1012 static SCM
1013 gc_async_thunk (void)
1014 {
1015 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
1016 return SCM_UNSPECIFIED;
1017 }
1018
1019
1020 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
1021 * the garbage collection. The only purpose of this function is to mark the
1022 * gc_async (which will eventually lead to the execution of the
1023 * gc_async_thunk).
1024 */
1025 static void *
1026 mark_gc_async (void * hook_data SCM_UNUSED,
1027 void *fn_data SCM_UNUSED,
1028 void *data SCM_UNUSED)
1029 {
1030 /* If cell access debugging is enabled, the user may choose to perform
1031 * additional garbage collections after an arbitrary number of cell
1032 * accesses. We don't want the scheme level after-gc-hook to be performed
1033 * for each of these garbage collections for the following reason: The
1034 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
1035 * after-gc-hook was performed with every gc, and if the gc was performed
1036 * after a very small number of cell accesses, then the number of cell
1037 * accesses during the execution of the after-gc-hook will suffice to cause
1038 * the execution of the next gc. Then, guile would keep executing the
1039 * after-gc-hook over and over again, and would never come to do other
1040 * things.
1041 *
1042 * To overcome this problem, if cell access debugging with additional
1043 * garbage collections is enabled, the after-gc-hook is never run by the
1044 * garbage collecter. When running guile with cell access debugging and the
1045 * execution of the after-gc-hook is desired, then it is necessary to run
1046 * the hook explicitly from the user code. This has the effect, that from
1047 * the scheme level point of view it seems that garbage collection is
1048 * performed with a much lower frequency than it actually is. Obviously,
1049 * this will not work for code that depends on a fixed one to one
1050 * relationship between the execution counts of the C level garbage
1051 * collection hooks and the execution count of the scheme level
1052 * after-gc-hook.
1053 */
1054
1055 #if (SCM_DEBUG_CELL_ACCESSES == 1)
1056 if (scm_debug_cells_gc_interval == 0)
1057 scm_system_async_mark (gc_async);
1058 #else
1059 scm_system_async_mark (gc_async);
1060 #endif
1061
1062 return NULL;
1063 }
1064
1065 void
1066 scm_init_gc ()
1067 {
1068 scm_gc_init_mark ();
1069
1070 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
1071 scm_c_define ("after-gc-hook", scm_after_gc_hook);
1072
1073 gc_async = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
1074 gc_async_thunk);
1075
1076 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
1077
1078 #include "libguile/gc.x"
1079 }
1080
1081 #ifdef __ia64__
1082 # ifdef __hpux
1083 # include <sys/param.h>
1084 # include <sys/pstat.h>
1085 void *
1086 scm_ia64_register_backing_store_base (void)
1087 {
1088 struct pst_vm_status vm_status;
1089 int i = 0;
1090 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
1091 if (vm_status.pst_type == PS_RSESTACK)
1092 return (void *) vm_status.pst_vaddr;
1093 abort ();
1094 }
1095 void *
1096 scm_ia64_ar_bsp (const void *ctx)
1097 {
1098 uint64_t bsp;
1099 __uc_get_ar_bsp(ctx, &bsp);
1100 return (void *) bsp;
1101 }
1102 # endif /* hpux */
1103 # ifdef linux
1104 # include <ucontext.h>
1105 void *
1106 scm_ia64_register_backing_store_base (void)
1107 {
1108 extern void *__libc_ia64_register_backing_store_base;
1109 return __libc_ia64_register_backing_store_base;
1110 }
1111 void *
1112 scm_ia64_ar_bsp (const void *opaque)
1113 {
1114 const ucontext_t *ctx = opaque;
1115 return (void *) ctx->uc_mcontext.sc_ar_bsp;
1116 }
1117 # endif /* linux */
1118 #endif /* __ia64__ */
1119
1120 void
1121 scm_gc_sweep (void)
1122 #define FUNC_NAME "scm_gc_sweep"
1123 {
1124 scm_i_deprecated_memory_return = 0;
1125
1126 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist);
1127 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2);
1128
1129 /*
1130 NOTHING HERE: LAZY SWEEPING !
1131 */
1132 scm_i_reset_segments ();
1133
1134 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
1135 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
1136
1137 /* Invalidate the freelists of other threads. */
1138 scm_i_thread_invalidate_freelists ();
1139 }
1140
1141 #undef FUNC_NAME
1142
1143
1144
1145 /*
1146 Local Variables:
1147 c-file-style: "gnu"
1148 End:
1149 */