* tags.h (scm_tc7_variable): New.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42
43 /* #define DEBUGINFO */
44
45 /* SECTION: This code is compiled once.
46 */
47
48 #ifndef MARK_DEPENDENCIES
49
50 \f
51 #include <stdio.h>
52 #include <errno.h>
53 #include <string.h>
54
55 #include "libguile/_scm.h"
56 #include "libguile/eval.h"
57 #include "libguile/stime.h"
58 #include "libguile/stackchk.h"
59 #include "libguile/struct.h"
60 #include "libguile/smob.h"
61 #include "libguile/unif.h"
62 #include "libguile/async.h"
63 #include "libguile/ports.h"
64 #include "libguile/root.h"
65 #include "libguile/strings.h"
66 #include "libguile/vectors.h"
67 #include "libguile/weaks.h"
68 #include "libguile/hashtab.h"
69 #include "libguile/tags.h"
70
71 #include "libguile/validate.h"
72 #include "libguile/deprecation.h"
73 #include "libguile/gc.h"
74
75 #ifdef GUILE_DEBUG_MALLOC
76 #include "libguile/debug-malloc.h"
77 #endif
78
79 #ifdef HAVE_MALLOC_H
80 #include <malloc.h>
81 #endif
82
83 #ifdef HAVE_UNISTD_H
84 #include <unistd.h>
85 #endif
86
87 #ifdef __STDC__
88 #include <stdarg.h>
89 #define var_start(x, y) va_start(x, y)
90 #else
91 #include <varargs.h>
92 #define var_start(x, y) va_start(x)
93 #endif
94
95 \f
96
97 unsigned int scm_gc_running_p = 0;
98
99 \f
100
101 #if (SCM_DEBUG_CELL_ACCESSES == 1)
102
103 scm_t_bits scm_tc16_allocated;
104
105 /* Set this to != 0 if every cell that is accessed shall be checked:
106 */
107 unsigned int scm_debug_cell_accesses_p = 1;
108
109 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
110 * the number of cell accesses after which a gc shall be called.
111 */
112 static unsigned int debug_cells_gc_interval = 0;
113
114
115 /* If an allocated cell is detected during garbage collection, this means that
116 * some code has just obtained the object but was preempted before the
117 * initialization of the object was completed. This meanst that some entries
118 * of the allocated cell may already contain SCM objects. Therefore,
119 * allocated cells are scanned conservatively. */
120 static SCM
121 allocated_mark (SCM allocated)
122 {
123 scm_gc_mark_cell_conservatively (allocated);
124 return SCM_BOOL_F;
125 }
126
127
128 /* Assert that the given object is a valid reference to a valid cell. This
129 * test involves to determine whether the object is a cell pointer, whether
130 * this pointer actually points into a heap segment and whether the cell
131 * pointed to is not a free cell. Further, additional garbage collections may
132 * get executed after a user defined number of cell accesses. This helps to
133 * find places in the C code where references are dropped for extremely short
134 * periods.
135 */
136 void
137 scm_assert_cell_valid (SCM cell)
138 {
139 static unsigned int already_running = 0;
140
141 if (scm_debug_cell_accesses_p && !already_running)
142 {
143 already_running = 1; /* set to avoid recursion */
144
145 if (!scm_cellp (cell))
146 {
147 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lux\n",
148 (unsigned long) SCM_UNPACK (cell));
149 abort ();
150 }
151 else if (!scm_gc_running_p)
152 {
153 /* Dirk::FIXME:: During garbage collection there occur references to
154 free cells. This is allright during conservative marking, but
155 should not happen otherwise (I think). The case of free cells
156 accessed during conservative marking is handled in function
157 scm_mark_locations. However, there still occur accesses to free
158 cells during gc. I don't understand why this happens. If it is
159 a bug and gets fixed, the following test should also work while
160 gc is running.
161 */
162 if (SCM_FREE_CELL_P (cell))
163 {
164 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lux\n",
165 (unsigned long) SCM_UNPACK (cell));
166 abort ();
167 }
168
169 /* If desired, perform additional garbage collections after a user
170 * defined number of cell accesses.
171 */
172 if (debug_cells_gc_interval)
173 {
174 static unsigned int counter = 0;
175
176 if (counter != 0)
177 {
178 --counter;
179 }
180 else
181 {
182 counter = debug_cells_gc_interval;
183 scm_igc ("scm_assert_cell_valid");
184 }
185 }
186 }
187 already_running = 0; /* re-enable */
188 }
189 }
190
191
192 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
193 (SCM flag),
194 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
195 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
196 "but no additional calls to garbage collection are issued.\n"
197 "If @var{flag} is a number, cell access checking is enabled,\n"
198 "with an additional garbage collection after the given\n"
199 "number of cell accesses.\n"
200 "This procedure only exists when the compile-time flag\n"
201 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
202 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
203 {
204 if (SCM_FALSEP (flag)) {
205 scm_debug_cell_accesses_p = 0;
206 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
207 debug_cells_gc_interval = 0;
208 scm_debug_cell_accesses_p = 1;
209 } else if (SCM_INUMP (flag)) {
210 long int f = SCM_INUM (flag);
211 if (f <= 0) SCM_OUT_OF_RANGE (1, flag);
212 debug_cells_gc_interval = f;
213 scm_debug_cell_accesses_p = 1;
214 } else {
215 SCM_WRONG_TYPE_ARG (1, flag);
216 }
217 return SCM_UNSPECIFIED;
218 }
219 #undef FUNC_NAME
220
221 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
222
223 \f
224
225 /* {heap tuning parameters}
226 *
227 * These are parameters for controlling memory allocation. The heap
228 * is the area out of which scm_cons, and object headers are allocated.
229 *
230 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
231 * 64 bit machine. The units of the _SIZE parameters are bytes.
232 * Cons pairs and object headers occupy one heap cell.
233 *
234 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
235 * allocated initially the heap will grow by half its current size
236 * each subsequent time more heap is needed.
237 *
238 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
239 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
240 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
241 * is in scm_init_storage() and alloc_some_heap() in sys.c
242 *
243 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
244 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
245 *
246 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
247 * is needed.
248 *
249 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
250 * trigger a GC.
251 *
252 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
253 * reclaimed by a GC triggered by must_malloc. If less than this is
254 * reclaimed, the trigger threshold is raised. [I don't know what a
255 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
256 * work around a oscillation that caused almost constant GC.]
257 */
258
259 /*
260 * Heap size 45000 and 40% min yield gives quick startup and no extra
261 * heap allocation. Having higher values on min yield may lead to
262 * large heaps, especially if code behaviour is varying its
263 * maximum consumption between different freelists.
264 */
265
266 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
267 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
268 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
269 size_t scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
270 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
271 int scm_default_min_yield_1 = 40;
272
273 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
274 size_t scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
275 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
276 /* The following value may seem large, but note that if we get to GC at
277 * all, this means that we have a numerically intensive application
278 */
279 int scm_default_min_yield_2 = 40;
280
281 size_t scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
282
283 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
284 #ifdef _QC
285 # define SCM_HEAP_SEG_SIZE 32768L
286 #else
287 # ifdef sequent
288 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
289 # else
290 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
291 # endif
292 #endif
293 /* Make heap grow with factor 1.5 */
294 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
295 #define SCM_INIT_MALLOC_LIMIT 100000
296 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
297
298 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
299 aligned inner bounds for allocated storage */
300
301 #ifdef PROT386
302 /*in 386 protected mode we must only adjust the offset */
303 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
304 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
305 #else
306 # ifdef _UNICOS
307 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
308 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
309 # else
310 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
311 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
312 # endif /* UNICOS */
313 #endif /* PROT386 */
314
315 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
316
317 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
318 #define CLUSTER_SIZE_IN_BYTES(freelist) \
319 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
320
321 \f
322 /* scm_freelists
323 */
324
325 typedef struct scm_t_freelist {
326 /* collected cells */
327 SCM cells;
328 /* number of cells left to collect before cluster is full */
329 unsigned int left_to_collect;
330 /* number of clusters which have been allocated */
331 unsigned int clusters_allocated;
332 /* a list of freelists, each of size cluster_size,
333 * except the last one which may be shorter
334 */
335 SCM clusters;
336 SCM *clustertail;
337 /* this is the number of objects in each cluster, including the spine cell */
338 unsigned int cluster_size;
339 /* indicates that we should grow heap instead of GC:ing
340 */
341 int grow_heap_p;
342 /* minimum yield on this list in order not to grow the heap
343 */
344 long min_yield;
345 /* defines min_yield as percent of total heap size
346 */
347 int min_yield_fraction;
348 /* number of cells per object on this list */
349 int span;
350 /* number of collected cells during last GC */
351 unsigned long collected;
352 /* number of collected cells during penultimate GC */
353 unsigned long collected_1;
354 /* total number of cells in heap segments
355 * belonging to this list.
356 */
357 unsigned long heap_size;
358 } scm_t_freelist;
359
360 SCM scm_freelist = SCM_EOL;
361 scm_t_freelist scm_master_freelist = {
362 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0
363 };
364 SCM scm_freelist2 = SCM_EOL;
365 scm_t_freelist scm_master_freelist2 = {
366 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0
367 };
368
369 /* scm_mtrigger
370 * is the number of bytes of must_malloc allocation needed to trigger gc.
371 */
372 unsigned long scm_mtrigger;
373
374 /* scm_gc_heap_lock
375 * If set, don't expand the heap. Set only during gc, during which no allocation
376 * is supposed to take place anyway.
377 */
378 int scm_gc_heap_lock = 0;
379
380 /* GC Blocking
381 * Don't pause for collection if this is set -- just
382 * expand the heap.
383 */
384 int scm_block_gc = 1;
385
386 /* During collection, this accumulates objects holding
387 * weak references.
388 */
389 SCM scm_weak_vectors;
390
391 /* During collection, this accumulates structures which are to be freed.
392 */
393 SCM scm_structs_to_free;
394
395 /* GC Statistics Keeping
396 */
397 unsigned long scm_cells_allocated = 0;
398 unsigned long scm_mallocated = 0;
399 unsigned long scm_gc_cells_collected;
400 unsigned long scm_gc_yield;
401 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
402 unsigned long scm_gc_malloc_collected;
403 unsigned long scm_gc_ports_collected;
404 unsigned long scm_gc_time_taken = 0;
405 static unsigned long t_before_gc;
406 static unsigned long t_before_sweep;
407 unsigned long scm_gc_mark_time_taken = 0;
408 unsigned long scm_gc_sweep_time_taken = 0;
409 unsigned long scm_gc_times = 0;
410 unsigned long scm_gc_cells_swept = 0;
411 double scm_gc_cells_marked_acc = 0.;
412 double scm_gc_cells_swept_acc = 0.;
413
414 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
415 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
416 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
417 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
418 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
419 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
420 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
421 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
422 SCM_SYMBOL (sym_times, "gc-times");
423 SCM_SYMBOL (sym_cells_marked, "cells-marked");
424 SCM_SYMBOL (sym_cells_swept, "cells-swept");
425
426 typedef struct scm_t_heap_seg_data
427 {
428 /* lower and upper bounds of the segment */
429 SCM_CELLPTR bounds[2];
430
431 /* address of the head-of-freelist pointer for this segment's cells.
432 All segments usually point to the same one, scm_freelist. */
433 scm_t_freelist *freelist;
434
435 /* number of cells per object in this segment */
436 int span;
437 } scm_t_heap_seg_data;
438
439
440
441 static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *);
442
443 typedef enum { return_on_error, abort_on_error } policy_on_error;
444 static void alloc_some_heap (scm_t_freelist *, policy_on_error);
445
446
447 #define SCM_HEAP_SIZE \
448 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
449 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
450
451 #define BVEC_GROW_SIZE 256
452 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
453 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
454
455 /* mark space allocation */
456
457 typedef struct scm_t_mark_space
458 {
459 scm_t_c_bvec_limb *bvec_space;
460 struct scm_t_mark_space *next;
461 } scm_t_mark_space;
462
463 static scm_t_mark_space *current_mark_space;
464 static scm_t_mark_space **mark_space_ptr;
465 static ptrdiff_t current_mark_space_offset;
466 static scm_t_mark_space *mark_space_head;
467
468 static scm_t_c_bvec_limb *
469 get_bvec ()
470 #define FUNC_NAME "get_bvec"
471 {
472 scm_t_c_bvec_limb *res;
473
474 if (!current_mark_space)
475 {
476 SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space)));
477 if (!current_mark_space)
478 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
479
480 current_mark_space->bvec_space = NULL;
481 current_mark_space->next = NULL;
482
483 *mark_space_ptr = current_mark_space;
484 mark_space_ptr = &(current_mark_space->next);
485
486 return get_bvec ();
487 }
488
489 if (!(current_mark_space->bvec_space))
490 {
491 SCM_SYSCALL (current_mark_space->bvec_space =
492 (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
493 if (!(current_mark_space->bvec_space))
494 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
495
496 current_mark_space_offset = 0;
497
498 return get_bvec ();
499 }
500
501 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
502 {
503 current_mark_space = NULL;
504
505 return get_bvec ();
506 }
507
508 res = current_mark_space->bvec_space + current_mark_space_offset;
509 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
510
511 return res;
512 }
513 #undef FUNC_NAME
514
515
516 static void
517 clear_mark_space ()
518 {
519 scm_t_mark_space *ms;
520
521 for (ms = mark_space_head; ms; ms = ms->next)
522 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
523 }
524
525
526 \f
527 /* Debugging functions. */
528
529 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
530
531 static void
532 map_free_list (scm_t_freelist *master, SCM freelist)
533 {
534 long last_seg = -1, count = 0;
535 SCM f;
536
537 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
538 {
539 long int this_seg = heap_segment (f);
540
541 if (this_seg == -1)
542 {
543 fprintf (stderr,
544 "map_free_list: can't find segment containing cell %lux\n",
545 (unsigned long int) SCM_UNPACK (cell));
546 abort ();
547 }
548 else if (this_seg != last_seg)
549 {
550 if (last_seg != -1)
551 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
552 (long) count, master->span, (long) last_seg);
553 last_seg = this_seg;
554 count = 0;
555 }
556 count++;
557 }
558 if (last_seg != -1)
559 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
560 (long) count, master->span, (long) last_seg);
561 }
562
563 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
564 (),
565 "Print debugging information about the free-list.\n"
566 "@code{map-free-list} is only included in\n"
567 "@code{--enable-guile-debug} builds of Guile.")
568 #define FUNC_NAME s_scm_map_free_list
569 {
570 size_t i;
571
572 fprintf (stderr, "%ld segments total (%d:%ld",
573 (long) scm_n_heap_segs,
574 scm_heap_table[0].span,
575 (long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]));
576
577 for (i = 1; i != scm_n_heap_segs; i++)
578 fprintf (stderr, ", %d:%ld",
579 scm_heap_table[i].span,
580 (long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]));
581 fprintf (stderr, ")\n");
582 map_free_list (&scm_master_freelist, scm_freelist);
583 map_free_list (&scm_master_freelist2, scm_freelist2);
584 fflush (stderr);
585
586 return SCM_UNSPECIFIED;
587 }
588 #undef FUNC_NAME
589
590 static long last_cluster;
591 static long last_size;
592
593 static long
594 free_list_length (char *title, long i, SCM freelist)
595 {
596 SCM ls;
597 long n = 0;
598 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
599 if (SCM_FREE_CELL_P (ls))
600 ++n;
601 else
602 {
603 fprintf (stderr, "bad cell in %s at position %ld\n", title, (long) n);
604 abort ();
605 }
606 if (n != last_size)
607 {
608 if (i > 0)
609 {
610 if (last_cluster == i - 1)
611 fprintf (stderr, "\t%ld\n", (long) last_size);
612 else
613 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
614 }
615 if (i >= 0)
616 fprintf (stderr, "%s %ld", title, (long) i);
617 else
618 fprintf (stderr, "%s\t%ld\n", title, (long) n);
619 last_cluster = i;
620 last_size = n;
621 }
622 return n;
623 }
624
625 static void
626 free_list_lengths (char *title, scm_t_freelist *master, SCM freelist)
627 {
628 SCM clusters;
629 long i = 0, len, n = 0;
630 fprintf (stderr, "%s\n\n", title);
631 n += free_list_length ("free list", -1, freelist);
632 for (clusters = master->clusters;
633 SCM_NNULLP (clusters);
634 clusters = SCM_CDR (clusters))
635 {
636 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
637 n += len;
638 }
639 if (last_cluster == i - 1)
640 fprintf (stderr, "\t%ld\n", (long) last_size);
641 else
642 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
643 fprintf (stderr, "\ntotal %ld objects\n\n", (long) n);
644 }
645
646 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
647 (),
648 "Print debugging information about the free-list.\n"
649 "@code{free-list-length} is only included in\n"
650 "@code{--enable-guile-debug} builds of Guile.")
651 #define FUNC_NAME s_scm_free_list_length
652 {
653 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
654 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
655 return SCM_UNSPECIFIED;
656 }
657 #undef FUNC_NAME
658
659 #endif
660
661 #ifdef GUILE_DEBUG_FREELIST
662
663 /* Non-zero if freelist debugging is in effect. Set this via
664 `gc-set-debug-check-freelist!'. */
665 static int scm_debug_check_freelist = 0;
666
667 /* Number of calls to SCM_NEWCELL since startup. */
668 static unsigned long scm_newcell_count;
669 static unsigned long scm_newcell2_count;
670
671 /* Search freelist for anything that isn't marked as a free cell.
672 Abort if we find something. */
673 static void
674 scm_check_freelist (SCM freelist)
675 {
676 SCM f;
677 long i = 0;
678
679 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
680 if (!SCM_FREE_CELL_P (f))
681 {
682 fprintf (stderr, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
683 (long) scm_newcell_count, (long) i);
684 abort ();
685 }
686 }
687
688 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
689 (SCM flag),
690 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
691 "on each cell allocation. This procedure only exists when the\n"
692 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
693 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
694 {
695 /* [cmm] I did a double-take when I read this code the first time.
696 well, FWIW. */
697 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
698 return SCM_UNSPECIFIED;
699 }
700 #undef FUNC_NAME
701
702
703 SCM
704 scm_debug_newcell (void)
705 {
706 SCM new;
707
708 scm_newcell_count++;
709 if (scm_debug_check_freelist)
710 {
711 scm_check_freelist (scm_freelist);
712 scm_gc();
713 }
714
715 /* The rest of this is supposed to be identical to the SCM_NEWCELL
716 macro. */
717 if (SCM_NULLP (scm_freelist))
718 {
719 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
720 SCM_GC_SET_ALLOCATED (new);
721 }
722 else
723 {
724 new = scm_freelist;
725 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
726 SCM_GC_SET_ALLOCATED (new);
727 }
728
729 return new;
730 }
731
732 SCM
733 scm_debug_newcell2 (void)
734 {
735 SCM new;
736
737 scm_newcell2_count++;
738 if (scm_debug_check_freelist)
739 {
740 scm_check_freelist (scm_freelist2);
741 scm_gc ();
742 }
743
744 /* The rest of this is supposed to be identical to the SCM_NEWCELL
745 macro. */
746 if (SCM_NULLP (scm_freelist2))
747 {
748 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
749 SCM_GC_SET_ALLOCATED (new);
750 }
751 else
752 {
753 new = scm_freelist2;
754 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
755 SCM_GC_SET_ALLOCATED (new);
756 }
757
758 return new;
759 }
760
761 #endif /* GUILE_DEBUG_FREELIST */
762
763 \f
764
765 static unsigned long
766 master_cells_allocated (scm_t_freelist *master)
767 {
768 /* the '- 1' below is to ignore the cluster spine cells. */
769 long objects = master->clusters_allocated * (master->cluster_size - 1);
770 if (SCM_NULLP (master->clusters))
771 objects -= master->left_to_collect;
772 return master->span * objects;
773 }
774
775 static unsigned long
776 freelist_length (SCM freelist)
777 {
778 long n;
779 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
780 ++n;
781 return n;
782 }
783
784 static unsigned long
785 compute_cells_allocated ()
786 {
787 return (scm_cells_allocated
788 + master_cells_allocated (&scm_master_freelist)
789 + master_cells_allocated (&scm_master_freelist2)
790 - scm_master_freelist.span * freelist_length (scm_freelist)
791 - scm_master_freelist2.span * freelist_length (scm_freelist2));
792 }
793
794 /* {Scheme Interface to GC}
795 */
796
797 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
798 (),
799 "Return an association list of statistics about Guile's current\n"
800 "use of storage.")
801 #define FUNC_NAME s_scm_gc_stats
802 {
803 long i;
804 long n;
805 SCM heap_segs;
806 unsigned long int local_scm_mtrigger;
807 unsigned long int local_scm_mallocated;
808 unsigned long int local_scm_heap_size;
809 unsigned long int local_scm_cells_allocated;
810 unsigned long int local_scm_gc_time_taken;
811 unsigned long int local_scm_gc_times;
812 unsigned long int local_scm_gc_mark_time_taken;
813 unsigned long int local_scm_gc_sweep_time_taken;
814 double local_scm_gc_cells_swept;
815 double local_scm_gc_cells_marked;
816 SCM answer;
817
818 SCM_DEFER_INTS;
819
820 ++scm_block_gc;
821
822 retry:
823 heap_segs = SCM_EOL;
824 n = scm_n_heap_segs;
825 for (i = scm_n_heap_segs; i--; )
826 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
827 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
828 heap_segs);
829 if (scm_n_heap_segs != n)
830 goto retry;
831
832 --scm_block_gc;
833
834 /* Below, we cons to produce the resulting list. We want a snapshot of
835 * the heap situation before consing.
836 */
837 local_scm_mtrigger = scm_mtrigger;
838 local_scm_mallocated = scm_mallocated;
839 local_scm_heap_size = SCM_HEAP_SIZE;
840 local_scm_cells_allocated = compute_cells_allocated ();
841 local_scm_gc_time_taken = scm_gc_time_taken;
842 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
843 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
844 local_scm_gc_times = scm_gc_times;
845 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
846 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
847
848 answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
849 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
850 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
851 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
852 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
853 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
854 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
855 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
856 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
857 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
858 scm_cons (sym_heap_segments, heap_segs),
859 SCM_UNDEFINED);
860 SCM_ALLOW_INTS;
861 return answer;
862 }
863 #undef FUNC_NAME
864
865
866 static void
867 gc_start_stats (const char *what SCM_UNUSED)
868 {
869 t_before_gc = scm_c_get_internal_run_time ();
870 scm_gc_cells_swept = 0;
871 scm_gc_cells_collected = 0;
872 scm_gc_yield_1 = scm_gc_yield;
873 scm_gc_yield = (scm_cells_allocated
874 + master_cells_allocated (&scm_master_freelist)
875 + master_cells_allocated (&scm_master_freelist2));
876 scm_gc_malloc_collected = 0;
877 scm_gc_ports_collected = 0;
878 }
879
880
881 static void
882 gc_end_stats ()
883 {
884 unsigned long t = scm_c_get_internal_run_time ();
885 scm_gc_time_taken += (t - t_before_gc);
886 scm_gc_sweep_time_taken += (t - t_before_sweep);
887 ++scm_gc_times;
888
889 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
890 scm_gc_cells_swept_acc += scm_gc_cells_swept;
891 }
892
893
894 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
895 (SCM obj),
896 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
897 "returned by this function for @var{obj}")
898 #define FUNC_NAME s_scm_object_address
899 {
900 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
901 }
902 #undef FUNC_NAME
903
904
905 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
906 (),
907 "Scans all of SCM objects and reclaims for further use those that are\n"
908 "no longer accessible.")
909 #define FUNC_NAME s_scm_gc
910 {
911 SCM_DEFER_INTS;
912 scm_igc ("call");
913 SCM_ALLOW_INTS;
914 return SCM_UNSPECIFIED;
915 }
916 #undef FUNC_NAME
917
918
919 \f
920 /* {C Interface For When GC is Triggered}
921 */
922
923 static void
924 adjust_min_yield (scm_t_freelist *freelist)
925 {
926 /* min yield is adjusted upwards so that next predicted total yield
927 * (allocated cells actually freed by GC) becomes
928 * `min_yield_fraction' of total heap size. Note, however, that
929 * the absolute value of min_yield will correspond to `collected'
930 * on one master (the one which currently is triggering GC).
931 *
932 * The reason why we look at total yield instead of cells collected
933 * on one list is that we want to take other freelists into account.
934 * On this freelist, we know that (local) yield = collected cells,
935 * but that's probably not the case on the other lists.
936 *
937 * (We might consider computing a better prediction, for example
938 * by computing an average over multiple GC:s.)
939 */
940 if (freelist->min_yield_fraction)
941 {
942 /* Pick largest of last two yields. */
943 long delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
944 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
945 #ifdef DEBUGINFO
946 fprintf (stderr, " after GC = %lu, delta = %ld\n",
947 (long) scm_cells_allocated,
948 (long) delta);
949 #endif
950 if (delta > 0)
951 freelist->min_yield += delta;
952 }
953 }
954
955
956 /* When we get POSIX threads support, the master will be global and
957 * common while the freelist will be individual for each thread.
958 */
959
960 SCM
961 scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist)
962 {
963 SCM cell;
964 ++scm_ints_disabled;
965 do
966 {
967 if (SCM_NULLP (master->clusters))
968 {
969 if (master->grow_heap_p || scm_block_gc)
970 {
971 /* In order to reduce gc frequency, try to allocate a new heap
972 * segment first, even if gc might find some free cells. If we
973 * can't obtain a new heap segment, we will try gc later.
974 */
975 master->grow_heap_p = 0;
976 alloc_some_heap (master, return_on_error);
977 }
978 if (SCM_NULLP (master->clusters))
979 {
980 /* The heap was not grown, either because it wasn't scheduled to
981 * grow, or because there was not enough memory available. In
982 * both cases we have to try gc to get some free cells.
983 */
984 #ifdef DEBUGINFO
985 fprintf (stderr, "allocated = %lu, ",
986 (long) (scm_cells_allocated
987 + master_cells_allocated (&scm_master_freelist)
988 + master_cells_allocated (&scm_master_freelist2)));
989 #endif
990 scm_igc ("cells");
991 adjust_min_yield (master);
992 if (SCM_NULLP (master->clusters))
993 {
994 /* gc could not free any cells. Now, we _must_ allocate a
995 * new heap segment, because there is no other possibility
996 * to provide a new cell for the caller.
997 */
998 alloc_some_heap (master, abort_on_error);
999 }
1000 }
1001 }
1002 cell = SCM_CAR (master->clusters);
1003 master->clusters = SCM_CDR (master->clusters);
1004 ++master->clusters_allocated;
1005 }
1006 while (SCM_NULLP (cell));
1007
1008 #ifdef GUILE_DEBUG_FREELIST
1009 scm_check_freelist (cell);
1010 #endif
1011
1012 --scm_ints_disabled;
1013 *freelist = SCM_FREE_CELL_CDR (cell);
1014 return cell;
1015 }
1016
1017
1018 #if 0
1019 /* This is a support routine which can be used to reserve a cluster
1020 * for some special use, such as debugging. It won't be useful until
1021 * free cells are preserved between garbage collections.
1022 */
1023
1024 void
1025 scm_alloc_cluster (scm_t_freelist *master)
1026 {
1027 SCM freelist, cell;
1028 cell = scm_gc_for_newcell (master, &freelist);
1029 SCM_SETCDR (cell, freelist);
1030 return cell;
1031 }
1032 #endif
1033
1034
1035 scm_t_c_hook scm_before_gc_c_hook;
1036 scm_t_c_hook scm_before_mark_c_hook;
1037 scm_t_c_hook scm_before_sweep_c_hook;
1038 scm_t_c_hook scm_after_sweep_c_hook;
1039 scm_t_c_hook scm_after_gc_c_hook;
1040
1041
1042 void
1043 scm_igc (const char *what)
1044 {
1045 long j;
1046
1047 ++scm_gc_running_p;
1048 scm_c_hook_run (&scm_before_gc_c_hook, 0);
1049 #ifdef DEBUGINFO
1050 fprintf (stderr,
1051 SCM_NULLP (scm_freelist)
1052 ? "*"
1053 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1054 #endif
1055 /* During the critical section, only the current thread may run. */
1056 SCM_CRITICAL_SECTION_START;
1057
1058 /* fprintf (stderr, "gc: %s\n", what); */
1059
1060 if (!scm_stack_base || scm_block_gc)
1061 {
1062 --scm_gc_running_p;
1063 return;
1064 }
1065
1066 gc_start_stats (what);
1067
1068 if (scm_gc_heap_lock)
1069 /* We've invoked the collector while a GC is already in progress.
1070 That should never happen. */
1071 abort ();
1072
1073 ++scm_gc_heap_lock;
1074
1075 /* flush dead entries from the continuation stack */
1076 {
1077 long x;
1078 long bound;
1079 SCM * elts;
1080 elts = SCM_VELTS (scm_continuation_stack);
1081 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
1082 x = SCM_INUM (scm_continuation_stack_ptr);
1083 while (x < bound)
1084 {
1085 elts[x] = SCM_BOOL_F;
1086 ++x;
1087 }
1088 }
1089
1090 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1091
1092 clear_mark_space ();
1093
1094 #ifndef USE_THREADS
1095
1096 /* Mark objects on the C stack. */
1097 SCM_FLUSH_REGISTER_WINDOWS;
1098 /* This assumes that all registers are saved into the jmp_buf */
1099 setjmp (scm_save_regs_gc_mark);
1100 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1101 ( (size_t) (sizeof (SCM_STACKITEM) - 1 +
1102 sizeof scm_save_regs_gc_mark)
1103 / sizeof (SCM_STACKITEM)));
1104
1105 {
1106 unsigned long stack_len = scm_stack_size (scm_stack_base);
1107 #ifdef SCM_STACK_GROWS_UP
1108 scm_mark_locations (scm_stack_base, stack_len);
1109 #else
1110 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1111 #endif
1112 }
1113
1114 #else /* USE_THREADS */
1115
1116 /* Mark every thread's stack and registers */
1117 scm_threads_mark_stacks ();
1118
1119 #endif /* USE_THREADS */
1120
1121 j = SCM_NUM_PROTECTS;
1122 while (j--)
1123 scm_gc_mark (scm_sys_protects[j]);
1124
1125 /* mark the registered roots */
1126 {
1127 size_t i;
1128 for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) {
1129 SCM l = SCM_VELTS (scm_gc_registered_roots)[i];
1130 for (; !SCM_NULLP (l); l = SCM_CDR (l)) {
1131 SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL));
1132 scm_gc_mark (*p);
1133 }
1134 }
1135 }
1136
1137 /* FIXME: we should have a means to register C functions to be run
1138 * in different phases of GC
1139 */
1140 scm_mark_subr_table ();
1141
1142 #ifndef USE_THREADS
1143 scm_gc_mark (scm_root->handle);
1144 #endif
1145
1146 t_before_sweep = scm_c_get_internal_run_time ();
1147 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1148
1149 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1150
1151 scm_gc_sweep ();
1152
1153 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1154
1155 --scm_gc_heap_lock;
1156 gc_end_stats ();
1157
1158 SCM_CRITICAL_SECTION_END;
1159 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1160 --scm_gc_running_p;
1161 }
1162
1163 \f
1164
1165 /* {Mark/Sweep}
1166 */
1167
1168 #define MARK scm_gc_mark
1169 #define FNAME "scm_gc_mark"
1170
1171 #endif /*!MARK_DEPENDENCIES*/
1172
1173 /* Mark an object precisely.
1174 */
1175 void
1176 MARK (SCM p)
1177 #define FUNC_NAME FNAME
1178 {
1179 register long i;
1180 register SCM ptr;
1181 scm_t_bits cell_type;
1182
1183 #ifndef MARK_DEPENDENCIES
1184 # define RECURSE scm_gc_mark
1185 #else
1186 /* go through the usual marking, but not for self-cycles. */
1187 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1188 #endif
1189 ptr = p;
1190
1191 #ifdef MARK_DEPENDENCIES
1192 goto gc_mark_loop_first_time;
1193 #endif
1194
1195 /* A simple hack for debugging. Chose the second branch to get a
1196 meaningful backtrace for crashes inside the GC.
1197 */
1198 #if 1
1199 #define goto_gc_mark_loop goto gc_mark_loop
1200 #define goto_gc_mark_nimp goto gc_mark_nimp
1201 #else
1202 #define goto_gc_mark_loop RECURSE(ptr); return
1203 #define goto_gc_mark_nimp RECURSE(ptr); return
1204 #endif
1205
1206 gc_mark_loop:
1207 if (SCM_IMP (ptr))
1208 return;
1209
1210 gc_mark_nimp:
1211
1212 #ifdef MARK_DEPENDENCIES
1213 if (SCM_EQ_P (ptr, p))
1214 return;
1215
1216 scm_gc_mark (ptr);
1217 return;
1218
1219 gc_mark_loop_first_time:
1220 #endif
1221
1222 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1223 /* We are in debug mode. Check the ptr exhaustively. */
1224 if (!scm_cellp (ptr))
1225 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1226 #else
1227 /* In non-debug mode, do at least some cheap testing. */
1228 if (!SCM_CELLP (ptr))
1229 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1230 #endif
1231
1232 #ifndef MARK_DEPENDENCIES
1233
1234 if (SCM_GCMARKP (ptr))
1235 return;
1236
1237 SCM_SETGCMARK (ptr);
1238
1239 #endif
1240
1241 cell_type = SCM_GC_CELL_TYPE (ptr);
1242 switch (SCM_ITAG7 (cell_type))
1243 {
1244 case scm_tcs_cons_nimcar:
1245 if (SCM_IMP (SCM_CDR (ptr)))
1246 {
1247 ptr = SCM_CAR (ptr);
1248 goto_gc_mark_nimp;
1249 }
1250 RECURSE (SCM_CAR (ptr));
1251 ptr = SCM_CDR (ptr);
1252 goto_gc_mark_nimp;
1253 case scm_tcs_cons_imcar:
1254 ptr = SCM_CDR (ptr);
1255 goto_gc_mark_loop;
1256 case scm_tc7_pws:
1257 RECURSE (SCM_SETTER (ptr));
1258 ptr = SCM_PROCEDURE (ptr);
1259 goto_gc_mark_loop;
1260 case scm_tcs_cons_gloc:
1261 {
1262 /* Dirk:FIXME:: The following code is super ugly: ptr may be a
1263 * struct or a gloc. If it is a gloc, the cell word #0 of ptr
1264 * is the address of a scm_tc16_variable smob. If it is a
1265 * struct, the cell word #0 of ptr is a pointer to a struct
1266 * vtable data region. (The fact that these are accessed in
1267 * the same way restricts the possibilites to change the data
1268 * layout of structs or heap cells.) To discriminate between
1269 * the two, it is guaranteed that the scm_vtable_index_vcell
1270 * element of the prospective vtable is always zero. For a
1271 * gloc, this location has the CDR of the variable smob, which
1272 * is guaranteed to be non-zero.
1273 */
1274 scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1275 scm_t_bits * vtable_data = (scm_t_bits *) word0; /* access as struct */
1276 if (vtable_data [scm_vtable_index_vcell] != 0)
1277 {
1278 /* ptr is a gloc */
1279 SCM gloc_car = SCM_PACK (word0);
1280 RECURSE (gloc_car);
1281 ptr = SCM_CDR (ptr);
1282 goto gc_mark_loop;
1283 }
1284 else
1285 {
1286 /* ptr is a struct */
1287 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1288 long len = SCM_SYMBOL_LENGTH (layout);
1289 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1290 scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr);
1291
1292 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1293 {
1294 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1295 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1296 }
1297 if (len)
1298 {
1299 long x;
1300
1301 for (x = 0; x < len - 2; x += 2, ++struct_data)
1302 if (fields_desc[x] == 'p')
1303 RECURSE (SCM_PACK (*struct_data));
1304 if (fields_desc[x] == 'p')
1305 {
1306 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1307 for (x = *struct_data++; x; --x, ++struct_data)
1308 RECURSE (SCM_PACK (*struct_data));
1309 else
1310 RECURSE (SCM_PACK (*struct_data));
1311 }
1312 }
1313 /* mark vtable */
1314 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1315 goto_gc_mark_loop;
1316 }
1317 }
1318 break;
1319 case scm_tcs_closures:
1320 if (SCM_IMP (SCM_ENV (ptr)))
1321 {
1322 ptr = SCM_CLOSCAR (ptr);
1323 goto_gc_mark_nimp;
1324 }
1325 RECURSE (SCM_CLOSCAR (ptr));
1326 ptr = SCM_ENV (ptr);
1327 goto_gc_mark_nimp;
1328 case scm_tc7_vector:
1329 i = SCM_VECTOR_LENGTH (ptr);
1330 if (i == 0)
1331 break;
1332 while (--i > 0)
1333 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1334 RECURSE (SCM_VELTS (ptr)[i]);
1335 ptr = SCM_VELTS (ptr)[0];
1336 goto_gc_mark_loop;
1337 #ifdef CCLO
1338 case scm_tc7_cclo:
1339 {
1340 size_t i = SCM_CCLO_LENGTH (ptr);
1341 size_t j;
1342 for (j = 1; j != i; ++j)
1343 {
1344 SCM obj = SCM_CCLO_REF (ptr, j);
1345 if (!SCM_IMP (obj))
1346 RECURSE (obj);
1347 }
1348 ptr = SCM_CCLO_REF (ptr, 0);
1349 goto_gc_mark_loop;
1350 }
1351 #endif
1352 #ifdef HAVE_ARRAYS
1353 case scm_tc7_bvect:
1354 case scm_tc7_byvect:
1355 case scm_tc7_ivect:
1356 case scm_tc7_uvect:
1357 case scm_tc7_fvect:
1358 case scm_tc7_dvect:
1359 case scm_tc7_cvect:
1360 case scm_tc7_svect:
1361 #ifdef HAVE_LONG_LONGS
1362 case scm_tc7_llvect:
1363 #endif
1364 #endif
1365 case scm_tc7_string:
1366 break;
1367
1368 case scm_tc7_substring:
1369 ptr = SCM_CDR (ptr);
1370 goto_gc_mark_loop;
1371
1372 case scm_tc7_wvect:
1373 SCM_SET_WVECT_GC_CHAIN (ptr, scm_weak_vectors);
1374 scm_weak_vectors = ptr;
1375 if (SCM_IS_WHVEC_ANY (ptr))
1376 {
1377 long x;
1378 long len;
1379 int weak_keys;
1380 int weak_values;
1381
1382 len = SCM_VECTOR_LENGTH (ptr);
1383 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1384 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1385
1386 for (x = 0; x < len; ++x)
1387 {
1388 SCM alist;
1389 alist = SCM_VELTS (ptr)[x];
1390
1391 /* mark everything on the alist except the keys or
1392 * values, according to weak_values and weak_keys. */
1393 while ( SCM_CONSP (alist)
1394 && !SCM_GCMARKP (alist)
1395 && SCM_CONSP (SCM_CAR (alist)))
1396 {
1397 SCM kvpair;
1398 SCM next_alist;
1399
1400 kvpair = SCM_CAR (alist);
1401 next_alist = SCM_CDR (alist);
1402 /*
1403 * Do not do this:
1404 * SCM_SETGCMARK (alist);
1405 * SCM_SETGCMARK (kvpair);
1406 *
1407 * It may be that either the key or value is protected by
1408 * an escaped reference to part of the spine of this alist.
1409 * If we mark the spine here, and only mark one or neither of the
1410 * key and value, they may never be properly marked.
1411 * This leads to a horrible situation in which an alist containing
1412 * freelist cells is exported.
1413 *
1414 * So only mark the spines of these arrays last of all marking.
1415 * If somebody confuses us by constructing a weak vector
1416 * with a circular alist then we are hosed, but at least we
1417 * won't prematurely drop table entries.
1418 */
1419 if (!weak_keys)
1420 RECURSE (SCM_CAR (kvpair));
1421 if (!weak_values)
1422 RECURSE (SCM_CDR (kvpair));
1423 alist = next_alist;
1424 }
1425 if (SCM_NIMP (alist))
1426 RECURSE (alist);
1427 }
1428 }
1429 break;
1430
1431 case scm_tc7_symbol:
1432 ptr = SCM_PROP_SLOTS (ptr);
1433 goto_gc_mark_loop;
1434 case scm_tc7_variable:
1435 ptr = SCM_CELL_OBJECT_1 (ptr);
1436 goto_gc_mark_loop;
1437 case scm_tcs_subrs:
1438 break;
1439 case scm_tc7_port:
1440 i = SCM_PTOBNUM (ptr);
1441 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1442 if (!(i < scm_numptob))
1443 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1444 #endif
1445 if (SCM_PTAB_ENTRY(ptr))
1446 RECURSE (SCM_FILENAME (ptr));
1447 if (scm_ptobs[i].mark)
1448 {
1449 ptr = (scm_ptobs[i].mark) (ptr);
1450 goto_gc_mark_loop;
1451 }
1452 else
1453 return;
1454 break;
1455 case scm_tc7_smob:
1456 switch (SCM_TYP16 (ptr))
1457 { /* should be faster than going through scm_smobs */
1458 case scm_tc_free_cell:
1459 /* We have detected a free cell. This can happen if non-object data
1460 * on the C stack points into guile's heap and is scanned during
1461 * conservative marking. */
1462 #if (SCM_DEBUG_CELL_ACCESSES == 0)
1463 /* If cell debugging is disabled, there is a second situation in
1464 * which a free cell can be encountered, namely if with preemptive
1465 * threading one thread has just obtained a fresh cell and was
1466 * preempted before the cell initialization was completed. In this
1467 * case, some entries of the cell may already contain objects.
1468 * Thus, if cell debugging is disabled, free cells are scanned
1469 * conservatively. */
1470 scm_gc_mark_cell_conservatively (ptr);
1471 #else /* SCM_DEBUG_CELL_ACCESSES == 1 */
1472 /* With cell debugging enabled, a freshly obtained but not fully
1473 * initialized cell is guaranteed to be of type scm_tc16_allocated.
1474 * Thus, no conservative scanning for free cells is necessary, but
1475 * instead cells of type scm_tc16_allocated have to be scanned
1476 * conservatively. This is done in the mark function of the
1477 * scm_tc16_allocated smob type. */
1478 #endif
1479 break;
1480 case scm_tc16_big:
1481 case scm_tc16_real:
1482 case scm_tc16_complex:
1483 break;
1484 default:
1485 i = SCM_SMOBNUM (ptr);
1486 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1487 if (!(i < scm_numsmob))
1488 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1489 #endif
1490 if (scm_smobs[i].mark)
1491 {
1492 ptr = (scm_smobs[i].mark) (ptr);
1493 goto_gc_mark_loop;
1494 }
1495 else
1496 return;
1497 }
1498 break;
1499 default:
1500 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1501 }
1502 #undef RECURSE
1503 }
1504 #undef FUNC_NAME
1505
1506 #ifndef MARK_DEPENDENCIES
1507
1508 #undef MARK
1509 #undef FNAME
1510
1511 /* And here we define `scm_gc_mark_dependencies', by including this
1512 * same file in itself.
1513 */
1514 #define MARK scm_gc_mark_dependencies
1515 #define FNAME "scm_gc_mark_dependencies"
1516 #define MARK_DEPENDENCIES
1517 #include "gc.c"
1518 #undef MARK_DEPENDENCIES
1519 #undef MARK
1520 #undef FNAME
1521
1522
1523 /* Determine whether the given value does actually represent a cell in some
1524 * heap segment. If this is the case, the number of the heap segment is
1525 * returned. Otherwise, -1 is returned. Binary search is used in order to
1526 * determine the heap segment that contains the cell.*/
1527 /* FIXME: To be used within scm_gc_mark_cell_conservatively,
1528 * scm_mark_locations and scm_cellp this function should be an inline
1529 * function. */
1530 static long int
1531 heap_segment (SCM obj)
1532 {
1533 if (!SCM_CELLP (obj))
1534 return -1;
1535 else
1536 {
1537 SCM_CELLPTR ptr = SCM2PTR (obj);
1538 unsigned long int i = 0;
1539 unsigned long int j = scm_n_heap_segs - 1;
1540
1541 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1542 return -1;
1543 else if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1544 return -1;
1545 else
1546 {
1547 while (i < j)
1548 {
1549 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[1]))
1550 {
1551 break;
1552 }
1553 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1554 {
1555 i = j;
1556 break;
1557 }
1558 else
1559 {
1560 unsigned long int k = (i + j) / 2;
1561
1562 if (k == i)
1563 return -1;
1564 else if (SCM_PTR_LT (ptr, scm_heap_table[k].bounds[1]))
1565 {
1566 j = k;
1567 ++i;
1568 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1569 return -1;
1570 }
1571 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1572 {
1573 i = k;
1574 --j;
1575 if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1576 return -1;
1577 }
1578 }
1579 }
1580
1581 if (!DOUBLECELL_ALIGNED_P (obj) && scm_heap_table[i].span == 2)
1582 return -1;
1583 else if (SCM_GC_IN_CARD_HEADERP (ptr))
1584 return -1;
1585 else
1586 return i;
1587 }
1588 }
1589 }
1590
1591
1592 /* Mark the entries of a cell conservatively. The given cell is known to be
1593 * on the heap. Still we have to determine its heap segment in order to
1594 * figure out whether it is a single or a double cell. Then, each of the cell
1595 * elements itself is checked and potentially marked. */
1596 void
1597 scm_gc_mark_cell_conservatively (SCM cell)
1598 {
1599 unsigned long int cell_segment = heap_segment (cell);
1600 unsigned int span = scm_heap_table[cell_segment].span;
1601 unsigned int i;
1602
1603 for (i = 1; i != span * 2; ++i)
1604 {
1605 SCM obj = SCM_CELL_OBJECT (cell, i);
1606 long int obj_segment = heap_segment (obj);
1607 if (obj_segment >= 0)
1608 scm_gc_mark (obj);
1609 }
1610 }
1611
1612
1613 /* Mark a region conservatively */
1614 void
1615 scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
1616 {
1617 unsigned long m;
1618
1619 for (m = 0; m < n; ++m)
1620 {
1621 SCM obj = * (SCM *) &x[m];
1622 long int segment = heap_segment (obj);
1623 if (segment >= 0)
1624 scm_gc_mark (obj);
1625 }
1626 }
1627
1628
1629 /* The function scm_cellp determines whether an SCM value can be regarded as a
1630 * pointer to a cell on the heap.
1631 */
1632 int
1633 scm_cellp (SCM value)
1634 {
1635 long int segment = heap_segment (value);
1636 return (segment >= 0);
1637 }
1638
1639
1640 static void
1641 gc_sweep_freelist_start (scm_t_freelist *freelist)
1642 {
1643 freelist->cells = SCM_EOL;
1644 freelist->left_to_collect = freelist->cluster_size;
1645 freelist->clusters_allocated = 0;
1646 freelist->clusters = SCM_EOL;
1647 freelist->clustertail = &freelist->clusters;
1648 freelist->collected_1 = freelist->collected;
1649 freelist->collected = 0;
1650 }
1651
1652 static void
1653 gc_sweep_freelist_finish (scm_t_freelist *freelist)
1654 {
1655 long collected;
1656 *freelist->clustertail = freelist->cells;
1657 if (!SCM_NULLP (freelist->cells))
1658 {
1659 SCM c = freelist->cells;
1660 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1661 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
1662 freelist->collected +=
1663 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1664 }
1665 scm_gc_cells_collected += freelist->collected;
1666
1667 /* Although freelist->min_yield is used to test freelist->collected
1668 * (which is the local GC yield for freelist), it is adjusted so
1669 * that *total* yield is freelist->min_yield_fraction of total heap
1670 * size. This means that a too low yield is compensated by more
1671 * heap on the list which is currently doing most work, which is
1672 * just what we want.
1673 */
1674 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1675 freelist->grow_heap_p = (collected < freelist->min_yield);
1676 }
1677
1678 #define NEXT_DATA_CELL(ptr, span) \
1679 do { \
1680 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1681 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1682 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1683 : nxt__); \
1684 } while (0)
1685
1686 void
1687 scm_gc_sweep ()
1688 #define FUNC_NAME "scm_gc_sweep"
1689 {
1690 register SCM_CELLPTR ptr;
1691 register SCM nfreelist;
1692 register scm_t_freelist *freelist;
1693 register unsigned long m;
1694 register int span;
1695 size_t i;
1696 size_t seg_size;
1697
1698 m = 0;
1699
1700 gc_sweep_freelist_start (&scm_master_freelist);
1701 gc_sweep_freelist_start (&scm_master_freelist2);
1702
1703 for (i = 0; i < scm_n_heap_segs; i++)
1704 {
1705 register long left_to_collect;
1706 register size_t j;
1707
1708 /* Unmarked cells go onto the front of the freelist this heap
1709 segment points to. Rather than updating the real freelist
1710 pointer as we go along, we accumulate the new head in
1711 nfreelist. Then, if it turns out that the entire segment is
1712 free, we free (i.e., malloc's free) the whole segment, and
1713 simply don't assign nfreelist back into the real freelist. */
1714 freelist = scm_heap_table[i].freelist;
1715 nfreelist = freelist->cells;
1716 left_to_collect = freelist->left_to_collect;
1717 span = scm_heap_table[i].span;
1718
1719 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1720 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1721
1722 /* use only data cells in seg_size */
1723 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1724
1725 scm_gc_cells_swept += seg_size;
1726
1727 for (j = seg_size + span; j -= span; ptr += span)
1728 {
1729 SCM scmptr;
1730
1731 if (SCM_GC_IN_CARD_HEADERP (ptr))
1732 {
1733 SCM_CELLPTR nxt;
1734
1735 /* cheat here */
1736 nxt = ptr;
1737 NEXT_DATA_CELL (nxt, span);
1738 j += span;
1739
1740 ptr = nxt - span;
1741 continue;
1742 }
1743
1744 scmptr = PTR2SCM (ptr);
1745
1746 if (SCM_GCMARKP (scmptr))
1747 continue;
1748
1749 switch SCM_TYP7 (scmptr)
1750 {
1751 case scm_tcs_cons_gloc:
1752 {
1753 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1754 * struct or a gloc. See the corresponding comment in
1755 * scm_gc_mark.
1756 */
1757 scm_t_bits word0 = (SCM_CELL_WORD_0 (scmptr)
1758 - scm_tc3_cons_gloc);
1759 /* access as struct */
1760 scm_t_bits * vtable_data = (scm_t_bits *) word0;
1761 if (vtable_data[scm_vtable_index_vcell] == 0)
1762 {
1763 /* Structs need to be freed in a special order.
1764 * This is handled by GC C hooks in struct.c.
1765 */
1766 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1767 scm_structs_to_free = scmptr;
1768 continue;
1769 }
1770 /* fall through so that scmptr gets collected */
1771 }
1772 break;
1773 case scm_tcs_cons_imcar:
1774 case scm_tcs_cons_nimcar:
1775 case scm_tcs_closures:
1776 case scm_tc7_pws:
1777 break;
1778 case scm_tc7_wvect:
1779 case scm_tc7_vector:
1780 {
1781 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1782 if (length > 0)
1783 {
1784 m += length * sizeof (scm_t_bits);
1785 scm_must_free (SCM_VECTOR_BASE (scmptr));
1786 }
1787 break;
1788 }
1789 #ifdef CCLO
1790 case scm_tc7_cclo:
1791 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1792 scm_must_free (SCM_CCLO_BASE (scmptr));
1793 break;
1794 #endif
1795 #ifdef HAVE_ARRAYS
1796 case scm_tc7_bvect:
1797 {
1798 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1799 if (length > 0)
1800 {
1801 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1802 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1803 }
1804 }
1805 break;
1806 case scm_tc7_byvect:
1807 case scm_tc7_ivect:
1808 case scm_tc7_uvect:
1809 case scm_tc7_svect:
1810 #ifdef HAVE_LONG_LONGS
1811 case scm_tc7_llvect:
1812 #endif
1813 case scm_tc7_fvect:
1814 case scm_tc7_dvect:
1815 case scm_tc7_cvect:
1816 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1817 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1818 break;
1819 #endif
1820 case scm_tc7_substring:
1821 break;
1822 case scm_tc7_string:
1823 m += SCM_STRING_LENGTH (scmptr) + 1;
1824 scm_must_free (SCM_STRING_CHARS (scmptr));
1825 break;
1826 case scm_tc7_symbol:
1827 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1828 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1829 break;
1830 case scm_tcs_subrs:
1831 /* the various "subrs" (primitives) are never freed */
1832 continue;
1833 case scm_tc7_port:
1834 if SCM_OPENP (scmptr)
1835 {
1836 int k = SCM_PTOBNUM (scmptr);
1837 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1838 if (!(k < scm_numptob))
1839 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1840 #endif
1841 /* Keep "revealed" ports alive. */
1842 if (scm_revealed_count (scmptr) > 0)
1843 continue;
1844 /* Yes, I really do mean scm_ptobs[k].free */
1845 /* rather than ftobs[k].close. .close */
1846 /* is for explicit CLOSE-PORT by user */
1847 m += (scm_ptobs[k].free) (scmptr);
1848 SCM_SETSTREAM (scmptr, 0);
1849 scm_remove_from_port_table (scmptr);
1850 scm_gc_ports_collected++;
1851 SCM_CLR_PORT_OPEN_FLAG (scmptr);
1852 }
1853 break;
1854 case scm_tc7_smob:
1855 switch SCM_TYP16 (scmptr)
1856 {
1857 case scm_tc_free_cell:
1858 case scm_tc16_real:
1859 break;
1860 #ifdef SCM_BIGDIG
1861 case scm_tc16_big:
1862 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1863 scm_must_free (SCM_BDIGITS (scmptr));
1864 break;
1865 #endif /* def SCM_BIGDIG */
1866 case scm_tc16_complex:
1867 m += sizeof (scm_t_complex);
1868 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1869 break;
1870 default:
1871 {
1872 int k;
1873 k = SCM_SMOBNUM (scmptr);
1874 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1875 if (!(k < scm_numsmob))
1876 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1877 #endif
1878 if (scm_smobs[k].free)
1879 m += (scm_smobs[k].free) (scmptr);
1880 break;
1881 }
1882 }
1883 break;
1884 default:
1885 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1886 }
1887
1888 if (!--left_to_collect)
1889 {
1890 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
1891 *freelist->clustertail = scmptr;
1892 freelist->clustertail = SCM_CDRLOC (scmptr);
1893
1894 nfreelist = SCM_EOL;
1895 freelist->collected += span * freelist->cluster_size;
1896 left_to_collect = freelist->cluster_size;
1897 }
1898 else
1899 {
1900 /* Stick the new cell on the front of nfreelist. It's
1901 critical that we mark this cell as freed; otherwise, the
1902 conservative collector might trace it as some other type
1903 of object. */
1904 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1905 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1906 nfreelist = scmptr;
1907 }
1908 }
1909
1910 #ifdef GC_FREE_SEGMENTS
1911 if (n == seg_size)
1912 {
1913 register long j;
1914
1915 freelist->heap_size -= seg_size;
1916 free ((char *) scm_heap_table[i].bounds[0]);
1917 scm_heap_table[i].bounds[0] = 0;
1918 for (j = i + 1; j < scm_n_heap_segs; j++)
1919 scm_heap_table[j - 1] = scm_heap_table[j];
1920 scm_n_heap_segs -= 1;
1921 i--; /* We need to scan the segment just moved. */
1922 }
1923 else
1924 #endif /* ifdef GC_FREE_SEGMENTS */
1925 {
1926 /* Update the real freelist pointer to point to the head of
1927 the list of free cells we've built for this segment. */
1928 freelist->cells = nfreelist;
1929 freelist->left_to_collect = left_to_collect;
1930 }
1931
1932 #ifdef GUILE_DEBUG_FREELIST
1933 scm_map_free_list ();
1934 #endif
1935 }
1936
1937 gc_sweep_freelist_finish (&scm_master_freelist);
1938 gc_sweep_freelist_finish (&scm_master_freelist2);
1939
1940 /* When we move to POSIX threads private freelists should probably
1941 be GC-protected instead. */
1942 scm_freelist = SCM_EOL;
1943 scm_freelist2 = SCM_EOL;
1944
1945 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1946 scm_gc_yield -= scm_cells_allocated;
1947
1948 if (scm_mallocated < m)
1949 /* The byte count of allocated objects has underflowed. This is
1950 probably because you forgot to report the sizes of objects you
1951 have allocated, by calling scm_done_malloc or some such. When
1952 the GC freed them, it subtracted their size from
1953 scm_mallocated, which underflowed. */
1954 abort ();
1955
1956 scm_mallocated -= m;
1957 scm_gc_malloc_collected = m;
1958 }
1959 #undef FUNC_NAME
1960
1961
1962 \f
1963 /* {Front end to malloc}
1964 *
1965 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1966 * scm_done_free
1967 *
1968 * These functions provide services comparable to malloc, realloc, and
1969 * free. They should be used when allocating memory that will be under
1970 * control of the garbage collector, i.e., if the memory may be freed
1971 * during garbage collection.
1972 */
1973
1974 /* scm_must_malloc
1975 * Return newly malloced storage or throw an error.
1976 *
1977 * The parameter WHAT is a string for error reporting.
1978 * If the threshold scm_mtrigger will be passed by this
1979 * allocation, or if the first call to malloc fails,
1980 * garbage collect -- on the presumption that some objects
1981 * using malloced storage may be collected.
1982 *
1983 * The limit scm_mtrigger may be raised by this allocation.
1984 */
1985 void *
1986 scm_must_malloc (size_t size, const char *what)
1987 {
1988 void *ptr;
1989 unsigned long nm = scm_mallocated + size;
1990
1991 if (nm < size)
1992 /* The byte count of allocated objects has overflowed. This is
1993 probably because you forgot to report the correct size of freed
1994 memory in some of your smob free methods. */
1995 abort ();
1996
1997 if (nm <= scm_mtrigger)
1998 {
1999 SCM_SYSCALL (ptr = malloc (size));
2000 if (NULL != ptr)
2001 {
2002 scm_mallocated = nm;
2003 #ifdef GUILE_DEBUG_MALLOC
2004 scm_malloc_register (ptr, what);
2005 #endif
2006 return ptr;
2007 }
2008 }
2009
2010 scm_igc (what);
2011
2012 nm = scm_mallocated + size;
2013
2014 if (nm < size)
2015 /* The byte count of allocated objects has overflowed. This is
2016 probably because you forgot to report the correct size of freed
2017 memory in some of your smob free methods. */
2018 abort ();
2019
2020 SCM_SYSCALL (ptr = malloc (size));
2021 if (NULL != ptr)
2022 {
2023 scm_mallocated = nm;
2024 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2025 if (nm > scm_mtrigger)
2026 scm_mtrigger = nm + nm / 2;
2027 else
2028 scm_mtrigger += scm_mtrigger / 2;
2029 }
2030 #ifdef GUILE_DEBUG_MALLOC
2031 scm_malloc_register (ptr, what);
2032 #endif
2033
2034 return ptr;
2035 }
2036
2037 scm_memory_error (what);
2038 }
2039
2040
2041 /* scm_must_realloc
2042 * is similar to scm_must_malloc.
2043 */
2044 void *
2045 scm_must_realloc (void *where,
2046 size_t old_size,
2047 size_t size,
2048 const char *what)
2049 {
2050 void *ptr;
2051 unsigned long nm;
2052
2053 if (size <= old_size)
2054 return where;
2055
2056 nm = scm_mallocated + size - old_size;
2057
2058 if (nm < (size - old_size))
2059 /* The byte count of allocated objects has overflowed. This is
2060 probably because you forgot to report the correct size of freed
2061 memory in some of your smob free methods. */
2062 abort ();
2063
2064 if (nm <= scm_mtrigger)
2065 {
2066 SCM_SYSCALL (ptr = realloc (where, size));
2067 if (NULL != ptr)
2068 {
2069 scm_mallocated = nm;
2070 #ifdef GUILE_DEBUG_MALLOC
2071 scm_malloc_reregister (where, ptr, what);
2072 #endif
2073 return ptr;
2074 }
2075 }
2076
2077 scm_igc (what);
2078
2079 nm = scm_mallocated + size - old_size;
2080
2081 if (nm < (size - old_size))
2082 /* The byte count of allocated objects has overflowed. This is
2083 probably because you forgot to report the correct size of freed
2084 memory in some of your smob free methods. */
2085 abort ();
2086
2087 SCM_SYSCALL (ptr = realloc (where, size));
2088 if (NULL != ptr)
2089 {
2090 scm_mallocated = nm;
2091 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2092 if (nm > scm_mtrigger)
2093 scm_mtrigger = nm + nm / 2;
2094 else
2095 scm_mtrigger += scm_mtrigger / 2;
2096 }
2097 #ifdef GUILE_DEBUG_MALLOC
2098 scm_malloc_reregister (where, ptr, what);
2099 #endif
2100 return ptr;
2101 }
2102
2103 scm_memory_error (what);
2104 }
2105
2106 char *
2107 scm_must_strndup (const char *str, size_t length)
2108 {
2109 char * dst = scm_must_malloc (length + 1, "scm_must_strndup");
2110 memcpy (dst, str, length);
2111 dst[length] = 0;
2112 return dst;
2113 }
2114
2115 char *
2116 scm_must_strdup (const char *str)
2117 {
2118 return scm_must_strndup (str, strlen (str));
2119 }
2120
2121 void
2122 scm_must_free (void *obj)
2123 #define FUNC_NAME "scm_must_free"
2124 {
2125 #ifdef GUILE_DEBUG_MALLOC
2126 scm_malloc_unregister (obj);
2127 #endif
2128 if (obj)
2129 free (obj);
2130 else
2131 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
2132 }
2133 #undef FUNC_NAME
2134
2135
2136 /* Announce that there has been some malloc done that will be freed
2137 * during gc. A typical use is for a smob that uses some malloced
2138 * memory but can not get it from scm_must_malloc (for whatever
2139 * reason). When a new object of this smob is created you call
2140 * scm_done_malloc with the size of the object. When your smob free
2141 * function is called, be sure to include this size in the return
2142 * value.
2143 *
2144 * If you can't actually free the memory in the smob free function,
2145 * for whatever reason (like reference counting), you still can (and
2146 * should) report the amount of memory freed when you actually free it.
2147 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2148 * eh? Or even better, call scm_done_free. */
2149
2150 void
2151 scm_done_malloc (long size)
2152 {
2153 if (size < 0) {
2154 if (scm_mallocated < size)
2155 /* The byte count of allocated objects has underflowed. This is
2156 probably because you forgot to report the sizes of objects you
2157 have allocated, by calling scm_done_malloc or some such. When
2158 the GC freed them, it subtracted their size from
2159 scm_mallocated, which underflowed. */
2160 abort ();
2161 } else {
2162 unsigned long nm = scm_mallocated + size;
2163 if (nm < size)
2164 /* The byte count of allocated objects has overflowed. This is
2165 probably because you forgot to report the correct size of freed
2166 memory in some of your smob free methods. */
2167 abort ();
2168 }
2169
2170 scm_mallocated += size;
2171
2172 if (scm_mallocated > scm_mtrigger)
2173 {
2174 scm_igc ("foreign mallocs");
2175 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2176 {
2177 if (scm_mallocated > scm_mtrigger)
2178 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2179 else
2180 scm_mtrigger += scm_mtrigger / 2;
2181 }
2182 }
2183 }
2184
2185 void
2186 scm_done_free (long size)
2187 {
2188 if (size >= 0) {
2189 if (scm_mallocated < size)
2190 /* The byte count of allocated objects has underflowed. This is
2191 probably because you forgot to report the sizes of objects you
2192 have allocated, by calling scm_done_malloc or some such. When
2193 the GC freed them, it subtracted their size from
2194 scm_mallocated, which underflowed. */
2195 abort ();
2196 } else {
2197 unsigned long nm = scm_mallocated + size;
2198 if (nm < size)
2199 /* The byte count of allocated objects has overflowed. This is
2200 probably because you forgot to report the correct size of freed
2201 memory in some of your smob free methods. */
2202 abort ();
2203 }
2204
2205 scm_mallocated -= size;
2206 }
2207
2208
2209 \f
2210 /* {Heap Segments}
2211 *
2212 * Each heap segment is an array of objects of a particular size.
2213 * Every segment has an associated (possibly shared) freelist.
2214 * A table of segment records is kept that records the upper and
2215 * lower extents of the segment; this is used during the conservative
2216 * phase of gc to identify probably gc roots (because they point
2217 * into valid segments at reasonable offsets). */
2218
2219 /* scm_expmem
2220 * is true if the first segment was smaller than INIT_HEAP_SEG.
2221 * If scm_expmem is set to one, subsequent segment allocations will
2222 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2223 */
2224 int scm_expmem = 0;
2225
2226 size_t scm_max_segment_size;
2227
2228 /* scm_heap_org
2229 * is the lowest base address of any heap segment.
2230 */
2231 SCM_CELLPTR scm_heap_org;
2232
2233 scm_t_heap_seg_data * scm_heap_table = 0;
2234 static size_t heap_segment_table_size = 0;
2235 size_t scm_n_heap_segs = 0;
2236
2237 /* init_heap_seg
2238 * initializes a new heap segment and returns the number of objects it contains.
2239 *
2240 * The segment origin and segment size in bytes are input parameters.
2241 * The freelist is both input and output.
2242 *
2243 * This function presumes that the scm_heap_table has already been expanded
2244 * to accomodate a new segment record and that the markbit space was reserved
2245 * for all the cards in this segment.
2246 */
2247
2248 #define INIT_CARD(card, span) \
2249 do { \
2250 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2251 if ((span) == 2) \
2252 SCM_GC_SET_CARD_DOUBLECELL (card); \
2253 } while (0)
2254
2255 static size_t
2256 init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist)
2257 {
2258 register SCM_CELLPTR ptr;
2259 SCM_CELLPTR seg_end;
2260 size_t new_seg_index;
2261 ptrdiff_t n_new_cells;
2262 int span = freelist->span;
2263
2264 if (seg_org == NULL)
2265 return 0;
2266
2267 /* Align the begin ptr up.
2268 */
2269 ptr = SCM_GC_CARD_UP (seg_org);
2270
2271 /* Compute the ceiling on valid object pointers w/in this segment.
2272 */
2273 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2274
2275 /* Find the right place and insert the segment record.
2276 */
2277 new_seg_index = 0;
2278 while (new_seg_index < scm_n_heap_segs
2279 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org))
2280 new_seg_index++;
2281
2282 {
2283 int i;
2284 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2285 scm_heap_table[i] = scm_heap_table[i - 1];
2286 }
2287
2288 ++scm_n_heap_segs;
2289
2290 scm_heap_table[new_seg_index].span = span;
2291 scm_heap_table[new_seg_index].freelist = freelist;
2292 scm_heap_table[new_seg_index].bounds[0] = ptr;
2293 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2294
2295 /*n_new_cells*/
2296 n_new_cells = seg_end - ptr;
2297
2298 freelist->heap_size += n_new_cells;
2299
2300 /* Partition objects in this segment into clusters */
2301 {
2302 SCM clusters;
2303 SCM *clusterp = &clusters;
2304
2305 NEXT_DATA_CELL (ptr, span);
2306 while (ptr < seg_end)
2307 {
2308 scm_cell *nxt = ptr;
2309 scm_cell *prv = NULL;
2310 scm_cell *last_card = NULL;
2311 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2312 NEXT_DATA_CELL(nxt, span);
2313
2314 /* Allocate cluster spine
2315 */
2316 *clusterp = PTR2SCM (ptr);
2317 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2318 clusterp = SCM_CDRLOC (*clusterp);
2319 ptr = nxt;
2320
2321 while (n_data_cells--)
2322 {
2323 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2324 SCM scmptr = PTR2SCM (ptr);
2325 nxt = ptr;
2326 NEXT_DATA_CELL (nxt, span);
2327 prv = ptr;
2328
2329 if (card != last_card)
2330 {
2331 INIT_CARD (card, span);
2332 last_card = card;
2333 }
2334
2335 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2336 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
2337
2338 ptr = nxt;
2339 }
2340
2341 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2342 }
2343
2344 /* sanity check */
2345 {
2346 scm_cell *ref = seg_end;
2347 NEXT_DATA_CELL (ref, span);
2348 if (ref != ptr)
2349 /* [cmm] looks like the segment size doesn't divide cleanly by
2350 cluster size. bad cmm! */
2351 abort();
2352 }
2353
2354 /* Patch up the last cluster pointer in the segment
2355 * to join it to the input freelist.
2356 */
2357 *clusterp = freelist->clusters;
2358 freelist->clusters = clusters;
2359 }
2360
2361 #ifdef DEBUGINFO
2362 fprintf (stderr, "H");
2363 #endif
2364 return size;
2365 }
2366
2367 static size_t
2368 round_to_cluster_size (scm_t_freelist *freelist, size_t len)
2369 {
2370 size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2371
2372 return
2373 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2374 + ALIGNMENT_SLACK (freelist);
2375 }
2376
2377 static void
2378 alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy)
2379 #define FUNC_NAME "alloc_some_heap"
2380 {
2381 SCM_CELLPTR ptr;
2382 size_t len;
2383
2384 if (scm_gc_heap_lock)
2385 {
2386 /* Critical code sections (such as the garbage collector) aren't
2387 * supposed to add heap segments.
2388 */
2389 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2390 abort ();
2391 }
2392
2393 if (scm_n_heap_segs == heap_segment_table_size)
2394 {
2395 /* We have to expand the heap segment table to have room for the new
2396 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2397 * init_heap_seg only if the allocation of the segment itself succeeds.
2398 */
2399 size_t new_table_size = scm_n_heap_segs + 1;
2400 size_t size = new_table_size * sizeof (scm_t_heap_seg_data);
2401 scm_t_heap_seg_data *new_heap_table;
2402
2403 SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *)
2404 realloc ((char *)scm_heap_table, size)));
2405 if (!new_heap_table)
2406 {
2407 if (error_policy == abort_on_error)
2408 {
2409 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2410 abort ();
2411 }
2412 else
2413 {
2414 return;
2415 }
2416 }
2417 else
2418 {
2419 scm_heap_table = new_heap_table;
2420 heap_segment_table_size = new_table_size;
2421 }
2422 }
2423
2424 /* Pick a size for the new heap segment.
2425 * The rule for picking the size of a segment is explained in
2426 * gc.h
2427 */
2428 {
2429 /* Assure that the new segment is predicted to be large enough.
2430 *
2431 * New yield should at least equal GC fraction of new heap size, i.e.
2432 *
2433 * y + dh > f * (h + dh)
2434 *
2435 * y : yield
2436 * f : min yield fraction
2437 * h : heap size
2438 * dh : size of new heap segment
2439 *
2440 * This gives dh > (f * h - y) / (1 - f)
2441 */
2442 int f = freelist->min_yield_fraction;
2443 unsigned long h = SCM_HEAP_SIZE;
2444 size_t min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2445 len = SCM_EXPHEAP (freelist->heap_size);
2446 #ifdef DEBUGINFO
2447 fprintf (stderr, "(%ld < %ld)", (long) len, (long) min_cells);
2448 #endif
2449 if (len < min_cells)
2450 len = min_cells + freelist->cluster_size;
2451 len *= sizeof (scm_cell);
2452 /* force new sampling */
2453 freelist->collected = LONG_MAX;
2454 }
2455
2456 if (len > scm_max_segment_size)
2457 len = scm_max_segment_size;
2458
2459 {
2460 size_t smallest;
2461
2462 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2463
2464 if (len < smallest)
2465 len = smallest;
2466
2467 /* Allocate with decaying ambition. */
2468 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2469 && (len >= smallest))
2470 {
2471 size_t rounded_len = round_to_cluster_size (freelist, len);
2472 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2473 if (ptr)
2474 {
2475 init_heap_seg (ptr, rounded_len, freelist);
2476 return;
2477 }
2478 len /= 2;
2479 }
2480 }
2481
2482 if (error_policy == abort_on_error)
2483 {
2484 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2485 abort ();
2486 }
2487 }
2488 #undef FUNC_NAME
2489
2490 \f
2491 /* {GC Protection Helper Functions}
2492 */
2493
2494
2495 /*
2496 * If within a function you need to protect one or more scheme objects from
2497 * garbage collection, pass them as parameters to one of the
2498 * scm_remember_upto_here* functions below. These functions don't do
2499 * anything, but since the compiler does not know that they are actually
2500 * no-ops, it will generate code that calls these functions with the given
2501 * parameters. Therefore, you can be sure that the compiler will keep those
2502 * scheme values alive (on the stack or in a register) up to the point where
2503 * scm_remember_upto_here* is called. In other words, place the call to
2504 * scm_remember_upto_here* _behind_ the last code in your function, that
2505 * depends on the scheme object to exist.
2506 *
2507 * Example: We want to make sure, that the string object str does not get
2508 * garbage collected during the execution of 'some_function', because
2509 * otherwise the characters belonging to str would be freed and
2510 * 'some_function' might access freed memory. To make sure that the compiler
2511 * keeps str alive on the stack or in a register such that it is visible to
2512 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2513 * call to 'some_function'. Note that this would not be necessary if str was
2514 * used anyway after the call to 'some_function'.
2515 * char *chars = SCM_STRING_CHARS (str);
2516 * some_function (chars);
2517 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2518 */
2519
2520 void
2521 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
2522 {
2523 /* Empty. Protects a single object from garbage collection. */
2524 }
2525
2526 void
2527 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
2528 {
2529 /* Empty. Protects two objects from garbage collection. */
2530 }
2531
2532 void
2533 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
2534 {
2535 /* Empty. Protects any number of objects from garbage collection. */
2536 }
2537
2538
2539 #if (SCM_DEBUG_DEPRECATED == 0)
2540
2541 void
2542 scm_remember (SCM *ptr)
2543 {
2544 scm_c_issue_deprecation_warning ("`scm_remember' is deprecated. "
2545 "Use the `scm_remember_upto_here*' family of functions instead.");
2546 }
2547
2548 SCM
2549 scm_protect_object (SCM obj)
2550 {
2551 scm_c_issue_deprecation_warning ("`scm_protect_object' is deprecated. "
2552 "Use `scm_gc_protect_object' instead.");
2553 return scm_gc_protect_object (obj);
2554 }
2555
2556 SCM
2557 scm_unprotect_object (SCM obj)
2558 {
2559 scm_c_issue_deprecation_warning ("`scm_unprotect_object' is deprecated. "
2560 "Use `scm_gc_unprotect_object' instead.");
2561 return scm_gc_unprotect_object (obj);
2562 }
2563
2564 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2565
2566 /*
2567 These crazy functions prevent garbage collection
2568 of arguments after the first argument by
2569 ensuring they remain live throughout the
2570 function because they are used in the last
2571 line of the code block.
2572 It'd be better to have a nice compiler hint to
2573 aid the conservative stack-scanning GC. --03/09/00 gjb */
2574 SCM
2575 scm_return_first (SCM elt, ...)
2576 {
2577 return elt;
2578 }
2579
2580 int
2581 scm_return_first_int (int i, ...)
2582 {
2583 return i;
2584 }
2585
2586
2587 SCM
2588 scm_permanent_object (SCM obj)
2589 {
2590 SCM_REDEFER_INTS;
2591 scm_permobjs = scm_cons (obj, scm_permobjs);
2592 SCM_REALLOW_INTS;
2593 return obj;
2594 }
2595
2596
2597 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2598 other references are dropped, until the object is unprotected by calling
2599 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
2600 i. e. it is possible to protect the same object several times, but it is
2601 necessary to unprotect the object the same number of times to actually get
2602 the object unprotected. It is an error to unprotect an object more often
2603 than it has been protected before. The function scm_protect_object returns
2604 OBJ.
2605 */
2606
2607 /* Implementation note: For every object X, there is a counter which
2608 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
2609 */
2610
2611 SCM
2612 scm_gc_protect_object (SCM obj)
2613 {
2614 SCM handle;
2615
2616 /* This critical section barrier will be replaced by a mutex. */
2617 SCM_REDEFER_INTS;
2618
2619 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2620 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2621
2622 SCM_REALLOW_INTS;
2623
2624 return obj;
2625 }
2626
2627
2628 /* Remove any protection for OBJ established by a prior call to
2629 scm_protect_object. This function returns OBJ.
2630
2631 See scm_protect_object for more information. */
2632 SCM
2633 scm_gc_unprotect_object (SCM obj)
2634 {
2635 SCM handle;
2636
2637 /* This critical section barrier will be replaced by a mutex. */
2638 SCM_REDEFER_INTS;
2639
2640 handle = scm_hashq_get_handle (scm_protects, obj);
2641
2642 if (SCM_FALSEP (handle))
2643 {
2644 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2645 abort ();
2646 }
2647 else
2648 {
2649 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2650 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2651 scm_hashq_remove_x (scm_protects, obj);
2652 else
2653 SCM_SETCDR (handle, count);
2654 }
2655
2656 SCM_REALLOW_INTS;
2657
2658 return obj;
2659 }
2660
2661 void
2662 scm_gc_register_root (SCM *p)
2663 {
2664 SCM handle;
2665 SCM key = scm_long2num ((long) p);
2666
2667 /* This critical section barrier will be replaced by a mutex. */
2668 SCM_REDEFER_INTS;
2669
2670 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
2671 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2672
2673 SCM_REALLOW_INTS;
2674 }
2675
2676 void
2677 scm_gc_unregister_root (SCM *p)
2678 {
2679 SCM handle;
2680 SCM key = scm_long2num ((long) p);
2681
2682 /* This critical section barrier will be replaced by a mutex. */
2683 SCM_REDEFER_INTS;
2684
2685 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
2686
2687 if (SCM_FALSEP (handle))
2688 {
2689 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
2690 abort ();
2691 }
2692 else
2693 {
2694 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2695 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2696 scm_hashv_remove_x (scm_gc_registered_roots, key);
2697 else
2698 SCM_SETCDR (handle, count);
2699 }
2700
2701 SCM_REALLOW_INTS;
2702 }
2703
2704 void
2705 scm_gc_register_roots (SCM *b, unsigned long n)
2706 {
2707 SCM *p = b;
2708 for (; p < b + n; ++p)
2709 scm_gc_register_root (p);
2710 }
2711
2712 void
2713 scm_gc_unregister_roots (SCM *b, unsigned long n)
2714 {
2715 SCM *p = b;
2716 for (; p < b + n; ++p)
2717 scm_gc_unregister_root (p);
2718 }
2719
2720 int terminating;
2721
2722 /* called on process termination. */
2723 #ifdef HAVE_ATEXIT
2724 static void
2725 cleanup (void)
2726 #else
2727 #ifdef HAVE_ON_EXIT
2728 extern int on_exit (void (*procp) (), int arg);
2729
2730 static void
2731 cleanup (int status, void *arg)
2732 #else
2733 #error Dont know how to setup a cleanup handler on your system.
2734 #endif
2735 #endif
2736 {
2737 terminating = 1;
2738 scm_flush_all_ports ();
2739 }
2740
2741 \f
2742 static int
2743 make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist)
2744 {
2745 size_t rounded_size = round_to_cluster_size (freelist, init_heap_size);
2746
2747 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2748 rounded_size,
2749 freelist))
2750 {
2751 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2752 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2753 rounded_size,
2754 freelist))
2755 return 1;
2756 }
2757 else
2758 scm_expmem = 1;
2759
2760 if (freelist->min_yield_fraction)
2761 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2762 / 100);
2763 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2764
2765 return 0;
2766 }
2767
2768 \f
2769 static void
2770 init_freelist (scm_t_freelist *freelist,
2771 int span,
2772 long cluster_size,
2773 int min_yield)
2774 {
2775 freelist->clusters = SCM_EOL;
2776 freelist->cluster_size = cluster_size + 1;
2777 freelist->left_to_collect = 0;
2778 freelist->clusters_allocated = 0;
2779 freelist->min_yield = 0;
2780 freelist->min_yield_fraction = min_yield;
2781 freelist->span = span;
2782 freelist->collected = 0;
2783 freelist->collected_1 = 0;
2784 freelist->heap_size = 0;
2785 }
2786
2787
2788 /* Get an integer from an environment variable. */
2789 static int
2790 scm_i_getenv_int (const char *var, int def)
2791 {
2792 char *end, *val = getenv (var);
2793 long res;
2794 if (!val)
2795 return def;
2796 res = strtol (val, &end, 10);
2797 if (end == val)
2798 return def;
2799 return res;
2800 }
2801
2802
2803 int
2804 scm_init_storage ()
2805 {
2806 unsigned long gc_trigger_1;
2807 unsigned long gc_trigger_2;
2808 size_t init_heap_size_1;
2809 size_t init_heap_size_2;
2810 size_t j;
2811
2812 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2813 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2814 scm_set_smob_mark (scm_tc16_allocated, allocated_mark);
2815 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2816
2817 j = SCM_NUM_PROTECTS;
2818 while (j)
2819 scm_sys_protects[--j] = SCM_BOOL_F;
2820 scm_block_gc = 1;
2821
2822 scm_freelist = SCM_EOL;
2823 scm_freelist2 = SCM_EOL;
2824 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2825 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2826 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2827 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2828 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2829
2830 scm_expmem = 0;
2831
2832 j = SCM_HEAP_SEG_SIZE;
2833 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2834 scm_heap_table = ((scm_t_heap_seg_data *)
2835 scm_must_malloc (sizeof (scm_t_heap_seg_data) * 2, "hplims"));
2836 heap_segment_table_size = 2;
2837
2838 mark_space_ptr = &mark_space_head;
2839
2840 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2841 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2842 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2843 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2844 return 1;
2845
2846 /* scm_hplims[0] can change. do not remove scm_heap_org */
2847 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2848
2849 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2850 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2851 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2852 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2853 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2854
2855 /* Initialise the list of ports. */
2856 scm_t_portable = (scm_t_port **)
2857 malloc (sizeof (scm_t_port *) * scm_t_portable_room);
2858 if (!scm_t_portable)
2859 return 1;
2860
2861 #ifdef HAVE_ATEXIT
2862 atexit (cleanup);
2863 #else
2864 #ifdef HAVE_ON_EXIT
2865 on_exit (cleanup, 0);
2866 #endif
2867 #endif
2868
2869 scm_stand_in_procs = SCM_EOL;
2870 scm_permobjs = SCM_EOL;
2871 scm_protects = scm_c_make_hash_table (31);
2872 scm_gc_registered_roots = scm_c_make_hash_table (31);
2873
2874 return 0;
2875 }
2876
2877 \f
2878
2879 SCM scm_after_gc_hook;
2880
2881 static SCM gc_async;
2882
2883 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2884 * is run after the gc, as soon as the asynchronous events are handled by the
2885 * evaluator.
2886 */
2887 static SCM
2888 gc_async_thunk (void)
2889 {
2890 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2891 return SCM_UNSPECIFIED;
2892 }
2893
2894
2895 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2896 * the garbage collection. The only purpose of this function is to mark the
2897 * gc_async (which will eventually lead to the execution of the
2898 * gc_async_thunk).
2899 */
2900 static void *
2901 mark_gc_async (void * hook_data SCM_UNUSED,
2902 void *func_data SCM_UNUSED,
2903 void *data SCM_UNUSED)
2904 {
2905 /* If cell access debugging is enabled, the user may choose to perform
2906 * additional garbage collections after an arbitrary number of cell
2907 * accesses. We don't want the scheme level after-gc-hook to be performed
2908 * for each of these garbage collections for the following reason: The
2909 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2910 * after-gc-hook was performed with every gc, and if the gc was performed
2911 * after a very small number of cell accesses, then the number of cell
2912 * accesses during the execution of the after-gc-hook will suffice to cause
2913 * the execution of the next gc. Then, guile would keep executing the
2914 * after-gc-hook over and over again, and would never come to do other
2915 * things.
2916 *
2917 * To overcome this problem, if cell access debugging with additional
2918 * garbage collections is enabled, the after-gc-hook is never run by the
2919 * garbage collecter. When running guile with cell access debugging and the
2920 * execution of the after-gc-hook is desired, then it is necessary to run
2921 * the hook explicitly from the user code. This has the effect, that from
2922 * the scheme level point of view it seems that garbage collection is
2923 * performed with a much lower frequency than it actually is. Obviously,
2924 * this will not work for code that depends on a fixed one to one
2925 * relationship between the execution counts of the C level garbage
2926 * collection hooks and the execution count of the scheme level
2927 * after-gc-hook.
2928 */
2929 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2930 if (debug_cells_gc_interval == 0)
2931 scm_system_async_mark (gc_async);
2932 #else
2933 scm_system_async_mark (gc_async);
2934 #endif
2935
2936 return NULL;
2937 }
2938
2939
2940 void
2941 scm_init_gc ()
2942 {
2943 SCM after_gc_thunk;
2944
2945 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
2946 scm_c_define ("after-gc-hook", scm_after_gc_hook);
2947
2948 after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
2949 gc_async_thunk);
2950 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2951
2952 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2953
2954 #ifndef SCM_MAGIC_SNARFER
2955 #include "libguile/gc.x"
2956 #endif
2957 }
2958
2959 #endif /*MARK_DEPENDENCIES*/
2960
2961 /*
2962 Local Variables:
2963 c-file-style: "gnu"
2964 End:
2965 */