* gc.c, gc.h (scm_map_free_list): Define also if GUILE_DEBUG is
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 \f
46 #include <stdio.h>
47 #include "_scm.h"
48 #include "stime.h"
49 #include "stackchk.h"
50 #include "struct.h"
51 #include "genio.h"
52 #include "weaks.h"
53 #include "guardians.h"
54 #include "smob.h"
55 #include "unif.h"
56 #include "async.h"
57
58 #include "validate.h"
59 #include "gc.h"
60
61 #ifdef HAVE_MALLOC_H
62 #include <malloc.h>
63 #endif
64
65 #ifdef HAVE_UNISTD_H
66 #include <unistd.h>
67 #endif
68
69 #ifdef __STDC__
70 #include <stdarg.h>
71 #define var_start(x, y) va_start(x, y)
72 #else
73 #include <varargs.h>
74 #define var_start(x, y) va_start(x)
75 #endif
76
77 \f
78 /* {heap tuning parameters}
79 *
80 * These are parameters for controlling memory allocation. The heap
81 * is the area out of which scm_cons, and object headers are allocated.
82 *
83 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
84 * 64 bit machine. The units of the _SIZE parameters are bytes.
85 * Cons pairs and object headers occupy one heap cell.
86 *
87 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
88 * allocated initially the heap will grow by half its current size
89 * each subsequent time more heap is needed.
90 *
91 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
92 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
93 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
94 * is in scm_init_storage() and alloc_some_heap() in sys.c
95 *
96 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
97 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
98 *
99 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
100 * is needed.
101 *
102 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
103 * trigger a GC.
104 *
105 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
106 * reclaimed by a GC triggered by must_malloc. If less than this is
107 * reclaimed, the trigger threshold is raised. [I don't know what a
108 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
109 * work around a oscillation that caused almost constant GC.]
110 */
111
112 #define SCM_INIT_HEAP_SIZE (40000L * sizeof (scm_cell))
113 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
114 #ifdef _QC
115 # define SCM_HEAP_SEG_SIZE 32768L
116 #else
117 # ifdef sequent
118 # define SCM_HEAP_SEG_SIZE (7000L*sizeof(scm_cell))
119 # else
120 # define SCM_HEAP_SEG_SIZE (16384L*sizeof(scm_cell))
121 # endif
122 #endif
123 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
124 #define SCM_INIT_MALLOC_LIMIT 100000
125 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
126
127 #define SCM_GC_TRIGGER 10000
128 #define SCM_GC_TRIGGER2 10000
129
130 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
131 bounds for allocated storage */
132
133 #ifdef PROT386
134 /*in 386 protected mode we must only adjust the offset */
135 # define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
136 # define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
137 #else
138 # ifdef _UNICOS
139 # define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
140 # define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
141 # else
142 # define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
143 # define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
144 # endif /* UNICOS */
145 #endif /* PROT386 */
146
147
148 \f
149 /* scm_freelists
150 */
151
152 #ifdef GUILE_NEW_GC_SCHEME
153 SCM scm_freelist = SCM_EOL;
154 scm_freelist_t scm_master_freelist = {
155 SCM_EOL, 0, SCM_EOL, SCM_EOL, 0, 0, 1, 0, 0
156 };
157 SCM scm_freelist2 = SCM_EOL;
158 scm_freelist_t scm_master_freelist2 = {
159 SCM_EOL, 0, SCM_EOL, SCM_EOL, 0, 0, 2, 0, 0
160 };
161 #else
162 scm_freelist_t scm_freelist = { SCM_EOL, 1, 0, 0 };
163 scm_freelist_t scm_freelist2 = { SCM_EOL, 2, 0, 0 };
164 #endif
165
166 /* scm_mtrigger
167 * is the number of bytes of must_malloc allocation needed to trigger gc.
168 */
169 unsigned long scm_mtrigger;
170
171
172 /* scm_gc_heap_lock
173 * If set, don't expand the heap. Set only during gc, during which no allocation
174 * is supposed to take place anyway.
175 */
176 int scm_gc_heap_lock = 0;
177
178 /* GC Blocking
179 * Don't pause for collection if this is set -- just
180 * expand the heap.
181 */
182
183 int scm_block_gc = 1;
184
185 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
186 * collection (GC) more space is allocated for the heap.
187 */
188 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
189
190 /* During collection, this accumulates objects holding
191 * weak references.
192 */
193 SCM scm_weak_vectors;
194
195 /* GC Statistics Keeping
196 */
197 unsigned long scm_cells_allocated = 0;
198 long scm_mallocated = 0;
199 /* unsigned long scm_gc_cells_collected; */
200 unsigned long scm_gc_malloc_collected;
201 unsigned long scm_gc_ports_collected;
202 unsigned long scm_gc_rt;
203 unsigned long scm_gc_time_taken = 0;
204
205 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
206 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
207 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
208 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
209 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
210 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
211
212
213 struct scm_heap_seg_data
214 {
215 /* lower and upper bounds of the segment */
216 SCM_CELLPTR bounds[2];
217
218 /* address of the head-of-freelist pointer for this segment's cells.
219 All segments usually point to the same one, scm_freelist. */
220 scm_freelist_t *freelistp;
221
222 /* number of SCM words per object in this segment */
223 int span;
224
225 /* If SEG_DATA->valid is non-zero, the conservative marking
226 functions will apply SEG_DATA->valid to the purported pointer and
227 SEG_DATA, and mark the object iff the function returns non-zero.
228 At the moment, I don't think anyone uses this. */
229 int (*valid) ();
230 };
231
232
233
234
235 static void scm_mark_weak_vector_spines (void);
236 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
237 static void alloc_some_heap (scm_freelist_t *);
238
239
240 \f
241 /* Debugging functions. */
242
243 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
244
245 /* Return the number of the heap segment containing CELL. */
246 static int
247 which_seg (SCM cell)
248 {
249 int i;
250
251 for (i = 0; i < scm_n_heap_segs; i++)
252 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
253 && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
254 return i;
255 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
256 SCM_UNPACK (cell));
257 abort ();
258 }
259
260
261 #ifdef GUILE_NEW_GC_SCHEME
262 static void
263 map_free_list (scm_freelist_t *master, SCM freelist)
264 {
265 int last_seg = -1, count = 0;
266 SCM f;
267
268 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
269 {
270 int this_seg = which_seg (f);
271
272 if (this_seg != last_seg)
273 {
274 if (last_seg != -1)
275 fprintf (stderr, " %5d %d-cells in segment %d\n",
276 count, master->span, last_seg);
277 last_seg = this_seg;
278 count = 0;
279 }
280 count++;
281 }
282 if (last_seg != -1)
283 fprintf (stderr, " %5d %d-cells in segment %d\n",
284 count, master->span, last_seg);
285 }
286 #else
287 static void
288 map_free_list (scm_freelist_t *freelistp)
289 {
290 int last_seg = -1, count = 0;
291 SCM f;
292
293 for (f = freelistp->cells; SCM_NIMP (f); f = SCM_CDR (f))
294 {
295 int this_seg = which_seg (f);
296
297 if (this_seg != last_seg)
298 {
299 if (last_seg != -1)
300 fprintf (stderr, " %5d %d-cells in segment %d\n",
301 count, freelistp->span, last_seg);
302 last_seg = this_seg;
303 count = 0;
304 }
305 count++;
306 }
307 if (last_seg != -1)
308 fprintf (stderr, " %5d %d-cells in segment %d\n",
309 count, freelistp->span, last_seg);
310 }
311 #endif
312
313 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
314 (),
315 "Print debugging information about the free-list.\n"
316 "`map-free-list' is only included in GUILE_DEBUG_FREELIST builds of Guile.")
317 #define FUNC_NAME s_scm_map_free_list
318 {
319 fprintf (stderr, "%d segments total\n", scm_n_heap_segs);
320 #ifdef GUILE_NEW_GC_SCHEME
321 map_free_list (&scm_master_freelist, scm_freelist);
322 map_free_list (&scm_master_freelist2, scm_freelist2);
323 #else
324 map_free_list (&scm_freelist);
325 map_free_list (&scm_freelist2);
326 #endif
327 fflush (stderr);
328
329 return SCM_UNSPECIFIED;
330 }
331 #undef FUNC_NAME
332
333 #endif
334
335 #ifdef GUILE_DEBUG_FREELIST
336
337 /* Number of calls to SCM_NEWCELL since startup. */
338 static unsigned long scm_newcell_count;
339 static unsigned long scm_newcell2_count;
340
341 /* Search freelist for anything that isn't marked as a free cell.
342 Abort if we find something. */
343 #ifdef GUILE_NEW_GC_SCHEME
344 static void
345 scm_check_freelist (SCM freelist)
346 {
347 SCM f;
348 int i = 0;
349
350 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
351 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
352 {
353 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
354 scm_newcell_count, i);
355 fflush (stderr);
356 abort ();
357 }
358 }
359 #else
360 static void
361 scm_check_freelist (scm_freelist_t *freelistp)
362 {
363 SCM f;
364 int i = 0;
365
366 for (f = freelistp->cells; SCM_NIMP (f); f = SCM_CDR (f), i++)
367 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
368 {
369 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
370 scm_newcell_count, i);
371 fflush (stderr);
372 abort ();
373 }
374 }
375 #endif
376
377 static int scm_debug_check_freelist = 0;
378
379 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
380 (SCM flag),
381 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
382 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
383 "compile-time flag was selected.\n")
384 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
385 {
386 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
387 return SCM_UNSPECIFIED;
388 }
389 #undef FUNC_NAME
390
391
392 #ifdef GUILE_NEW_GC_SCHEME
393
394 SCM
395 scm_debug_newcell (void)
396 {
397 SCM new;
398
399 scm_newcell_count++;
400 if (scm_debug_check_freelist)
401 {
402 scm_check_freelist (scm_freelist);
403 scm_gc();
404 }
405
406 /* The rest of this is supposed to be identical to the SCM_NEWCELL
407 macro. */
408 if (SCM_IMP (scm_freelist))
409 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
410 else
411 {
412 new = scm_freelist;
413 scm_freelist = SCM_CDR (scm_freelist);
414 SCM_SETCAR (new, scm_tc16_allocated);
415 }
416
417 return new;
418 }
419
420 SCM
421 scm_debug_newcell2 (void)
422 {
423 SCM new;
424
425 scm_newcell2_count++;
426 if (scm_debug_check_freelist)
427 {
428 scm_check_freelist (scm_freelist2);
429 scm_gc ();
430 }
431
432 /* The rest of this is supposed to be identical to the SCM_NEWCELL
433 macro. */
434 if (SCM_IMP (scm_freelist2))
435 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
436 else
437 {
438 new = scm_freelist2;
439 scm_freelist2 = SCM_CDR (scm_freelist2);
440 SCM_SETCAR (new, scm_tc16_allocated);
441 }
442
443 return new;
444 }
445
446 #else /* GUILE_NEW_GC_SCHEME */
447
448 SCM
449 scm_debug_newcell (void)
450 {
451 SCM new;
452
453 scm_newcell_count++;
454 if (scm_debug_check_freelist)
455 {
456 scm_check_freelist (&scm_freelist);
457 scm_gc();
458 }
459
460 /* The rest of this is supposed to be identical to the SCM_NEWCELL
461 macro. */
462 if (SCM_IMP (scm_freelist.cells))
463 new = scm_gc_for_newcell (&scm_freelist);
464 else
465 {
466 new = scm_freelist.cells;
467 scm_freelist.cells = SCM_CDR (scm_freelist.cells);
468 SCM_SETCAR (new, scm_tc16_allocated);
469 ++scm_cells_allocated;
470 }
471
472 return new;
473 }
474
475 SCM
476 scm_debug_newcell2 (void)
477 {
478 SCM new;
479
480 scm_newcell2_count++;
481 if (scm_debug_check_freelist) {
482 scm_check_freelist (&scm_freelist2);
483 scm_gc();
484 }
485
486 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
487 macro. */
488 if (SCM_IMP (scm_freelist2.cells))
489 new = scm_gc_for_newcell (&scm_freelist2);
490 else
491 {
492 new = scm_freelist2.cells;
493 scm_freelist2.cells = SCM_CDR (scm_freelist2.cells);
494 SCM_SETCAR (new, scm_tc16_allocated);
495 scm_cells_allocated += 2;
496 }
497
498 return new;
499 }
500
501 #endif /* GUILE_NEW_GC_SCHEME */
502 #endif /* GUILE_DEBUG_FREELIST */
503
504 \f
505
506 /* {Scheme Interface to GC}
507 */
508
509 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
510 (),
511 "Returns an association list of statistics about Guile's current use of storage. ")
512 #define FUNC_NAME s_scm_gc_stats
513 {
514 int i;
515 int n;
516 SCM heap_segs;
517 long int local_scm_mtrigger;
518 long int local_scm_mallocated;
519 long int local_scm_heap_size;
520 long int local_scm_cells_allocated;
521 long int local_scm_gc_time_taken;
522 SCM answer;
523
524 SCM_DEFER_INTS;
525 scm_block_gc = 1;
526 retry:
527 heap_segs = SCM_EOL;
528 n = scm_n_heap_segs;
529 for (i = scm_n_heap_segs; i--; )
530 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
531 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
532 heap_segs);
533 if (scm_n_heap_segs != n)
534 goto retry;
535 scm_block_gc = 0;
536
537 /// ? ?? ?
538 local_scm_mtrigger = scm_mtrigger;
539 local_scm_mallocated = scm_mallocated;
540 #ifdef GUILE_NEW_GC_SCHEME
541 local_scm_heap_size = scm_master_freelist.heap_size; /*fixme*/
542 #else
543 local_scm_heap_size = scm_freelist.heap_size; /*fixme*/
544 #endif
545 local_scm_cells_allocated = scm_cells_allocated;
546 local_scm_gc_time_taken = scm_gc_time_taken;
547
548 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
549 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
550 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
551 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
552 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
553 scm_cons (sym_heap_segments, heap_segs),
554 SCM_UNDEFINED);
555 SCM_ALLOW_INTS;
556 return answer;
557 }
558 #undef FUNC_NAME
559
560
561 void
562 scm_gc_start (const char *what)
563 {
564 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
565 /* scm_gc_cells_collected = 0; */
566 scm_gc_malloc_collected = 0;
567 scm_gc_ports_collected = 0;
568 }
569
570 void
571 scm_gc_end ()
572 {
573 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
574 scm_gc_time_taken += scm_gc_rt;
575 scm_system_async_mark (scm_gc_async);
576 }
577
578
579 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
580 (SCM obj),
581 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
582 "returned by this function for @var{obj}")
583 #define FUNC_NAME s_scm_object_address
584 {
585 return scm_ulong2num ((unsigned long) obj);
586 }
587 #undef FUNC_NAME
588
589
590 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
591 (),
592 "Scans all of SCM objects and reclaims for further use those that are\n"
593 "no longer accessible.")
594 #define FUNC_NAME s_scm_gc
595 {
596 SCM_DEFER_INTS;
597 scm_igc ("call");
598 SCM_ALLOW_INTS;
599 return SCM_UNSPECIFIED;
600 }
601 #undef FUNC_NAME
602
603
604 \f
605 /* {C Interface For When GC is Triggered}
606 */
607
608 #ifdef GUILE_NEW_GC_SCHEME
609
610 /* When we get POSIX threads support, the master will be global and
611 common while the freelist will be individual for each thread. */
612
613 SCM
614 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
615 {
616 SCM cell;
617 ++scm_ints_disabled;
618 if (master->triggeredp)
619 scm_igc ("cells");
620 else if (SCM_NULLP (master->clusters))
621 alloc_some_heap (master);
622 else if (SCM_NULLP (SCM_CDR (master->clusters)))
623 /* we are satisfied; GC instead of alloc next time around */
624 master->triggeredp = 1;
625 --scm_ints_disabled;
626 cell = SCM_CAR (master->clusters);
627 master->clusters = SCM_CDR (master->clusters);
628 *freelist = SCM_CDR (cell);
629 SCM_SETCAR (cell, scm_tc16_allocated);
630 return cell;
631 }
632
633 #else /* GUILE_NEW_GC_SCHEME */
634
635 void
636 scm_gc_for_alloc (scm_freelist_t *freelistp)
637 {
638 SCM_REDEFER_INTS;
639 scm_igc ("cells");
640 #ifdef GUILE_DEBUG_FREELIST
641 fprintf (stderr, "Collected: %d, min_yield: %d\n",
642 freelistp->collected, MIN_GC_YIELD (freelistp));
643 #endif
644 if ((freelistp->collected < MIN_GC_YIELD (freelistp))
645 || SCM_IMP (freelistp->cells))
646 alloc_some_heap (freelistp);
647 SCM_REALLOW_INTS;
648 }
649
650
651 SCM
652 scm_gc_for_newcell (scm_freelist_t *freelistp)
653 {
654 SCM fl;
655 scm_gc_for_alloc (freelistp);
656 fl = freelistp->cells;
657 freelistp->cells = SCM_CDR (fl);
658 SCM_SETCAR (fl, scm_tc16_allocated);
659 return fl;
660 }
661
662 #endif /* GUILE_NEW_GC_SCHEME */
663
664 void
665 scm_igc (const char *what)
666 {
667 int j;
668
669 #ifdef USE_THREADS
670 /* During the critical section, only the current thread may run. */
671 SCM_THREAD_CRITICAL_SECTION_START;
672 #endif
673
674 /* fprintf (stderr, "gc: %s\n", what); */
675
676 scm_gc_start (what);
677
678 if (!scm_stack_base || scm_block_gc)
679 {
680 scm_gc_end ();
681 return;
682 }
683
684 if (scm_mallocated < 0)
685 /* The byte count of allocated objects has underflowed. This is
686 probably because you forgot to report the sizes of objects you
687 have allocated, by calling scm_done_malloc or some such. When
688 the GC freed them, it subtracted their size from
689 scm_mallocated, which underflowed. */
690 abort ();
691
692 if (scm_gc_heap_lock)
693 /* We've invoked the collector while a GC is already in progress.
694 That should never happen. */
695 abort ();
696
697 ++scm_gc_heap_lock;
698
699 scm_weak_vectors = SCM_EOL;
700
701 scm_guardian_gc_init ();
702
703 /* unprotect any struct types with no instances */
704 #if 0
705 {
706 SCM type_list;
707 SCM * pos;
708
709 pos = &scm_type_obj_list;
710 type_list = scm_type_obj_list;
711 while (type_list != SCM_EOL)
712 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
713 {
714 pos = SCM_CDRLOC (type_list);
715 type_list = SCM_CDR (type_list);
716 }
717 else
718 {
719 *pos = SCM_CDR (type_list);
720 type_list = SCM_CDR (type_list);
721 }
722 }
723 #endif
724
725 /* flush dead entries from the continuation stack */
726 {
727 int x;
728 int bound;
729 SCM * elts;
730 elts = SCM_VELTS (scm_continuation_stack);
731 bound = SCM_LENGTH (scm_continuation_stack);
732 x = SCM_INUM (scm_continuation_stack_ptr);
733 while (x < bound)
734 {
735 elts[x] = SCM_BOOL_F;
736 ++x;
737 }
738 }
739
740 #ifndef USE_THREADS
741
742 /* Protect from the C stack. This must be the first marking
743 * done because it provides information about what objects
744 * are "in-use" by the C code. "in-use" objects are those
745 * for which the values from SCM_LENGTH and SCM_CHARS must remain
746 * usable. This requirement is stricter than a liveness
747 * requirement -- in particular, it constrains the implementation
748 * of scm_vector_set_length_x.
749 */
750 SCM_FLUSH_REGISTER_WINDOWS;
751 /* This assumes that all registers are saved into the jmp_buf */
752 setjmp (scm_save_regs_gc_mark);
753 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
754 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
755 sizeof scm_save_regs_gc_mark)
756 / sizeof (SCM_STACKITEM)));
757
758 {
759 /* stack_len is long rather than scm_sizet in order to guarantee that
760 &stack_len is long aligned */
761 #ifdef SCM_STACK_GROWS_UP
762 #ifdef nosve
763 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
764 #else
765 long stack_len = scm_stack_size (scm_stack_base);
766 #endif
767 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
768 #else
769 #ifdef nosve
770 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
771 #else
772 long stack_len = scm_stack_size (scm_stack_base);
773 #endif
774 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
775 #endif
776 }
777
778 #else /* USE_THREADS */
779
780 /* Mark every thread's stack and registers */
781 scm_threads_mark_stacks ();
782
783 #endif /* USE_THREADS */
784
785 /* FIXME: insert a phase to un-protect string-data preserved
786 * in scm_vector_set_length_x.
787 */
788
789 j = SCM_NUM_PROTECTS;
790 while (j--)
791 scm_gc_mark (scm_sys_protects[j]);
792
793 /* FIXME: we should have a means to register C functions to be run
794 * in different phases of GC
795 */
796 scm_mark_subr_table ();
797
798 #ifndef USE_THREADS
799 scm_gc_mark (scm_root->handle);
800 #endif
801
802 scm_mark_weak_vector_spines ();
803
804 scm_guardian_zombify ();
805
806 scm_gc_sweep ();
807
808 --scm_gc_heap_lock;
809 scm_gc_end ();
810
811 #ifdef USE_THREADS
812 SCM_THREAD_CRITICAL_SECTION_END;
813 #endif
814 }
815
816 \f
817 /* {Mark/Sweep}
818 */
819
820
821
822 /* Mark an object precisely.
823 */
824 void
825 scm_gc_mark (SCM p)
826 {
827 register long i;
828 register SCM ptr;
829
830 ptr = p;
831
832 gc_mark_loop:
833 if (SCM_IMP (ptr))
834 return;
835
836 gc_mark_nimp:
837 if (SCM_NCELLP (ptr))
838 scm_wta (ptr, "rogue pointer in heap", NULL);
839
840 switch (SCM_TYP7 (ptr))
841 {
842 case scm_tcs_cons_nimcar:
843 if (SCM_GCMARKP (ptr))
844 break;
845 SCM_SETGCMARK (ptr);
846 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
847 {
848 ptr = SCM_CAR (ptr);
849 goto gc_mark_nimp;
850 }
851 scm_gc_mark (SCM_CAR (ptr));
852 ptr = SCM_GCCDR (ptr);
853 goto gc_mark_nimp;
854 case scm_tcs_cons_imcar:
855 if (SCM_GCMARKP (ptr))
856 break;
857 SCM_SETGCMARK (ptr);
858 ptr = SCM_GCCDR (ptr);
859 goto gc_mark_loop;
860 case scm_tc7_pws:
861 if (SCM_GCMARKP (ptr))
862 break;
863 SCM_SETGCMARK (ptr);
864 scm_gc_mark (SCM_CELL_WORD (ptr, 2));
865 ptr = SCM_GCCDR (ptr);
866 goto gc_mark_loop;
867 case scm_tcs_cons_gloc:
868 if (SCM_GCMARKP (ptr))
869 break;
870 SCM_SETGCMARK (ptr);
871 {
872 SCM vcell;
873 vcell = SCM_CAR (ptr) - 1L;
874 switch (SCM_UNPACK (SCM_CDR (vcell)))
875 {
876 default:
877 scm_gc_mark (vcell);
878 ptr = SCM_GCCDR (ptr);
879 goto gc_mark_loop;
880 case 1: /* ! */
881 case 0: /* ! */
882 {
883 SCM layout;
884 SCM * vtable_data;
885 int len;
886 char * fields_desc;
887 register SCM * mem;
888 register int x;
889
890 vtable_data = (SCM *)vcell;
891 layout = vtable_data[scm_vtable_index_layout];
892 len = SCM_LENGTH (layout);
893 fields_desc = SCM_CHARS (layout);
894 /* We're using SCM_GCCDR here like STRUCT_DATA, except
895 that it removes the mark */
896 mem = (SCM *)SCM_GCCDR (ptr);
897
898 if (SCM_UNPACK (vtable_data[scm_struct_i_flags]) & SCM_STRUCTF_ENTITY)
899 {
900 scm_gc_mark (mem[scm_struct_i_procedure]);
901 scm_gc_mark (mem[scm_struct_i_setter]);
902 }
903 if (len)
904 {
905 for (x = 0; x < len - 2; x += 2, ++mem)
906 if (fields_desc[x] == 'p')
907 scm_gc_mark (*mem);
908 if (fields_desc[x] == 'p')
909 {
910 int j;
911 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
912 for (j = (long int) *mem; x; --x)
913 scm_gc_mark (*++mem);
914 else
915 scm_gc_mark (*mem);
916 }
917 }
918 if (!SCM_CDR (vcell))
919 {
920 SCM_SETGCMARK (vcell);
921 ptr = vtable_data[scm_vtable_index_vtable];
922 goto gc_mark_loop;
923 }
924 }
925 }
926 }
927 break;
928 case scm_tcs_closures:
929 if (SCM_GCMARKP (ptr))
930 break;
931 SCM_SETGCMARK (ptr);
932 if (SCM_IMP (SCM_CDR (ptr)))
933 {
934 ptr = SCM_CLOSCAR (ptr);
935 goto gc_mark_nimp;
936 }
937 scm_gc_mark (SCM_CLOSCAR (ptr));
938 ptr = SCM_GCCDR (ptr);
939 goto gc_mark_nimp;
940 case scm_tc7_vector:
941 case scm_tc7_lvector:
942 #ifdef CCLO
943 case scm_tc7_cclo:
944 #endif
945 if (SCM_GC8MARKP (ptr))
946 break;
947 SCM_SETGC8MARK (ptr);
948 i = SCM_LENGTH (ptr);
949 if (i == 0)
950 break;
951 while (--i > 0)
952 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
953 scm_gc_mark (SCM_VELTS (ptr)[i]);
954 ptr = SCM_VELTS (ptr)[0];
955 goto gc_mark_loop;
956 case scm_tc7_contin:
957 if SCM_GC8MARKP
958 (ptr) break;
959 SCM_SETGC8MARK (ptr);
960 if (SCM_VELTS (ptr))
961 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
962 (scm_sizet)
963 (SCM_LENGTH (ptr) +
964 (sizeof (SCM_STACKITEM) + -1 +
965 sizeof (scm_contregs)) /
966 sizeof (SCM_STACKITEM)));
967 break;
968 #ifdef HAVE_ARRAYS
969 case scm_tc7_bvect:
970 case scm_tc7_byvect:
971 case scm_tc7_ivect:
972 case scm_tc7_uvect:
973 case scm_tc7_fvect:
974 case scm_tc7_dvect:
975 case scm_tc7_cvect:
976 case scm_tc7_svect:
977 #ifdef HAVE_LONG_LONGS
978 case scm_tc7_llvect:
979 #endif
980 #endif
981 case scm_tc7_string:
982 SCM_SETGC8MARK (ptr);
983 break;
984
985 case scm_tc7_substring:
986 if (SCM_GC8MARKP(ptr))
987 break;
988 SCM_SETGC8MARK (ptr);
989 ptr = SCM_CDR (ptr);
990 goto gc_mark_loop;
991
992 case scm_tc7_wvect:
993 if (SCM_GC8MARKP(ptr))
994 break;
995 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
996 scm_weak_vectors = ptr;
997 SCM_SETGC8MARK (ptr);
998 if (SCM_IS_WHVEC_ANY (ptr))
999 {
1000 int x;
1001 int len;
1002 int weak_keys;
1003 int weak_values;
1004
1005 len = SCM_LENGTH (ptr);
1006 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1007 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1008
1009 for (x = 0; x < len; ++x)
1010 {
1011 SCM alist;
1012 alist = SCM_VELTS (ptr)[x];
1013
1014 /* mark everything on the alist except the keys or
1015 * values, according to weak_values and weak_keys. */
1016 while ( SCM_CONSP (alist)
1017 && !SCM_GCMARKP (alist)
1018 && SCM_CONSP (SCM_CAR (alist)))
1019 {
1020 SCM kvpair;
1021 SCM next_alist;
1022
1023 kvpair = SCM_CAR (alist);
1024 next_alist = SCM_CDR (alist);
1025 /*
1026 * Do not do this:
1027 * SCM_SETGCMARK (alist);
1028 * SCM_SETGCMARK (kvpair);
1029 *
1030 * It may be that either the key or value is protected by
1031 * an escaped reference to part of the spine of this alist.
1032 * If we mark the spine here, and only mark one or neither of the
1033 * key and value, they may never be properly marked.
1034 * This leads to a horrible situation in which an alist containing
1035 * freelist cells is exported.
1036 *
1037 * So only mark the spines of these arrays last of all marking.
1038 * If somebody confuses us by constructing a weak vector
1039 * with a circular alist then we are hosed, but at least we
1040 * won't prematurely drop table entries.
1041 */
1042 if (!weak_keys)
1043 scm_gc_mark (SCM_CAR (kvpair));
1044 if (!weak_values)
1045 scm_gc_mark (SCM_GCCDR (kvpair));
1046 alist = next_alist;
1047 }
1048 if (SCM_NIMP (alist))
1049 scm_gc_mark (alist);
1050 }
1051 }
1052 break;
1053
1054 case scm_tc7_msymbol:
1055 if (SCM_GC8MARKP(ptr))
1056 break;
1057 SCM_SETGC8MARK (ptr);
1058 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1059 ptr = SCM_SYMBOL_PROPS (ptr);
1060 goto gc_mark_loop;
1061 case scm_tc7_ssymbol:
1062 if (SCM_GC8MARKP(ptr))
1063 break;
1064 SCM_SETGC8MARK (ptr);
1065 break;
1066 case scm_tcs_subrs:
1067 break;
1068 case scm_tc7_port:
1069 i = SCM_PTOBNUM (ptr);
1070 if (!(i < scm_numptob))
1071 goto def;
1072 if (SCM_GC8MARKP (ptr))
1073 break;
1074 SCM_SETGC8MARK (ptr);
1075 if (SCM_PTAB_ENTRY(ptr))
1076 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1077 if (scm_ptobs[i].mark)
1078 {
1079 ptr = (scm_ptobs[i].mark) (ptr);
1080 goto gc_mark_loop;
1081 }
1082 else
1083 return;
1084 break;
1085 case scm_tc7_smob:
1086 if (SCM_GC8MARKP (ptr))
1087 break;
1088 SCM_SETGC8MARK (ptr);
1089 switch (SCM_GCTYP16 (ptr))
1090 { /* should be faster than going through scm_smobs */
1091 case scm_tc_free_cell:
1092 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1093 case scm_tc16_allocated:
1094 case scm_tc16_big:
1095 case scm_tc16_real:
1096 case scm_tc16_complex:
1097 break;
1098 default:
1099 i = SCM_SMOBNUM (ptr);
1100 if (!(i < scm_numsmob))
1101 goto def;
1102 if (scm_smobs[i].mark)
1103 {
1104 ptr = (scm_smobs[i].mark) (ptr);
1105 goto gc_mark_loop;
1106 }
1107 else
1108 return;
1109 }
1110 break;
1111 default:
1112 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1113 }
1114 }
1115
1116
1117 /* Mark a Region Conservatively
1118 */
1119
1120 void
1121 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1122 {
1123 register long m = n;
1124 register int i, j;
1125 register SCM_CELLPTR ptr;
1126
1127 while (0 <= --m)
1128 if (SCM_CELLP (*(SCM **) (& x[m])))
1129 {
1130 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & x[m]));
1131 i = 0;
1132 j = scm_n_heap_segs - 1;
1133 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1134 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1135 {
1136 while (i <= j)
1137 {
1138 int seg_id;
1139 seg_id = -1;
1140 if ( (i == j)
1141 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1142 seg_id = i;
1143 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1144 seg_id = j;
1145 else
1146 {
1147 int k;
1148 k = (i + j) / 2;
1149 if (k == i)
1150 break;
1151 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1152 {
1153 j = k;
1154 ++i;
1155 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1156 continue;
1157 else
1158 break;
1159 }
1160 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1161 {
1162 i = k;
1163 --j;
1164 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1165 continue;
1166 else
1167 break;
1168 }
1169 }
1170 if ( !scm_heap_table[seg_id].valid
1171 || scm_heap_table[seg_id].valid (ptr,
1172 &scm_heap_table[seg_id]))
1173 scm_gc_mark (*(SCM *) & x[m]);
1174 break;
1175 }
1176
1177 }
1178 }
1179 }
1180
1181
1182 /* The following is a C predicate which determines if an SCM value can be
1183 regarded as a pointer to a cell on the heap. The code is duplicated
1184 from scm_mark_locations. */
1185
1186
1187 int
1188 scm_cellp (SCM value)
1189 {
1190 register int i, j;
1191 register SCM_CELLPTR ptr;
1192
1193 if SCM_CELLP (*(SCM **) (& value))
1194 {
1195 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & value));
1196 i = 0;
1197 j = scm_n_heap_segs - 1;
1198 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1199 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1200 {
1201 while (i <= j)
1202 {
1203 int seg_id;
1204 seg_id = -1;
1205 if ( (i == j)
1206 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1207 seg_id = i;
1208 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1209 seg_id = j;
1210 else
1211 {
1212 int k;
1213 k = (i + j) / 2;
1214 if (k == i)
1215 break;
1216 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1217 {
1218 j = k;
1219 ++i;
1220 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1221 continue;
1222 else
1223 break;
1224 }
1225 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1226 {
1227 i = k;
1228 --j;
1229 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1230 continue;
1231 else
1232 break;
1233 }
1234 }
1235 if ( !scm_heap_table[seg_id].valid
1236 || scm_heap_table[seg_id].valid (ptr,
1237 &scm_heap_table[seg_id]))
1238 return 1;
1239 break;
1240 }
1241
1242 }
1243 }
1244 return 0;
1245 }
1246
1247
1248 static void
1249 scm_mark_weak_vector_spines ()
1250 {
1251 SCM w;
1252
1253 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1254 {
1255 if (SCM_IS_WHVEC_ANY (w))
1256 {
1257 SCM *ptr;
1258 SCM obj;
1259 int j;
1260 int n;
1261
1262 obj = w;
1263 ptr = SCM_VELTS (w);
1264 n = SCM_LENGTH (w);
1265 for (j = 0; j < n; ++j)
1266 {
1267 SCM alist;
1268
1269 alist = ptr[j];
1270 while ( SCM_CONSP (alist)
1271 && !SCM_GCMARKP (alist)
1272 && SCM_CONSP (SCM_CAR (alist)))
1273 {
1274 SCM_SETGCMARK (alist);
1275 SCM_SETGCMARK (SCM_CAR (alist));
1276 alist = SCM_GCCDR (alist);
1277 }
1278 }
1279 }
1280 }
1281 }
1282
1283
1284
1285 void
1286 scm_gc_sweep ()
1287 {
1288 register SCM_CELLPTR ptr;
1289 #ifdef SCM_POINTERS_MUNGED
1290 register SCM scmptr;
1291 #else
1292 #undef scmptr
1293 #define scmptr (SCM)ptr
1294 #endif
1295 register SCM nfreelist;
1296 register scm_freelist_t *hp_freelist;
1297 register long m;
1298 register int span;
1299 long i;
1300 scm_sizet seg_size;
1301
1302 m = 0;
1303
1304 #ifdef GUILE_NEW_GC_SCHEME
1305 /* Reset all free list pointers. We'll reconstruct them completely
1306 while scanning. */
1307 for (i = 0; i < scm_n_heap_segs; i++)
1308 {
1309 scm_heap_table[i].freelistp->cells = SCM_EOL;
1310 scm_heap_table[i].freelistp->n_objects
1311 = scm_heap_table[i].freelistp->gc_trigger;
1312 scm_heap_table[i].freelistp->clusters = SCM_EOL;
1313 scm_heap_table[i].freelistp->clustertail
1314 = &scm_heap_table[i].freelistp->clusters;
1315 scm_heap_table[i].freelistp->triggeredp = 0;
1316 }
1317 #else
1318 /* Reset all free list pointers. We'll reconstruct them completely
1319 while scanning. */
1320 for (i = 0; i < scm_n_heap_segs; i++)
1321 scm_heap_table[i].freelistp->cells = SCM_EOL;
1322 #endif
1323
1324 for (i = 0; i < scm_n_heap_segs; i++)
1325 {
1326 register scm_sizet n = 0;
1327 register scm_sizet j;
1328 #ifdef GUILE_NEW_GC_SCHEME
1329 register scm_sizet n_objects;
1330 #endif
1331
1332 /* Unmarked cells go onto the front of the freelist this heap
1333 segment points to. Rather than updating the real freelist
1334 pointer as we go along, we accumulate the new head in
1335 nfreelist. Then, if it turns out that the entire segment is
1336 free, we free (i.e., malloc's free) the whole segment, and
1337 simply don't assign nfreelist back into the real freelist. */
1338 hp_freelist = scm_heap_table[i].freelistp;
1339 nfreelist = hp_freelist->cells;
1340 #ifdef GUILE_NEW_GC_SCHEME
1341 n_objects = hp_freelist->n_objects;
1342 #endif
1343 span = scm_heap_table[i].span;
1344 hp_freelist->collected = 0;
1345
1346 ptr = CELL_UP (scm_heap_table[i].bounds[0]);
1347 seg_size = CELL_DN (scm_heap_table[i].bounds[1]) - ptr;
1348 for (j = seg_size + span; j -= span; ptr += span)
1349 {
1350 #ifdef SCM_POINTERS_MUNGED
1351 scmptr = PTR2SCM (ptr);
1352 #endif
1353 switch SCM_TYP7 (scmptr)
1354 {
1355 case scm_tcs_cons_gloc:
1356 if (SCM_GCMARKP (scmptr))
1357 {
1358 if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1)
1359 SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0);
1360 goto cmrkcontinue;
1361 }
1362 {
1363 SCM vcell;
1364 vcell = SCM_CAR (scmptr) - 1L;
1365
1366 if ((SCM_CDR (vcell) == 0) || (SCM_UNPACK (SCM_CDR (vcell)) == 1))
1367 {
1368 scm_struct_free_t free
1369 = (scm_struct_free_t) ((SCM*) vcell)[scm_struct_i_free];
1370 m += free ((SCM *) vcell, (SCM *) SCM_GCCDR (scmptr));
1371 }
1372 }
1373 break;
1374 case scm_tcs_cons_imcar:
1375 case scm_tcs_cons_nimcar:
1376 case scm_tcs_closures:
1377 case scm_tc7_pws:
1378 if (SCM_GCMARKP (scmptr))
1379 goto cmrkcontinue;
1380 break;
1381 case scm_tc7_wvect:
1382 if (SCM_GC8MARKP (scmptr))
1383 {
1384 goto c8mrkcontinue;
1385 }
1386 else
1387 {
1388 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1389 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1390 break;
1391 }
1392
1393 case scm_tc7_vector:
1394 case scm_tc7_lvector:
1395 #ifdef CCLO
1396 case scm_tc7_cclo:
1397 #endif
1398 if (SCM_GC8MARKP (scmptr))
1399 goto c8mrkcontinue;
1400
1401 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1402 freechars:
1403 scm_must_free (SCM_CHARS (scmptr));
1404 /* SCM_SETCHARS(scmptr, 0);*/
1405 break;
1406 #ifdef HAVE_ARRAYS
1407 case scm_tc7_bvect:
1408 if SCM_GC8MARKP (scmptr)
1409 goto c8mrkcontinue;
1410 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1411 goto freechars;
1412 case scm_tc7_byvect:
1413 if SCM_GC8MARKP (scmptr)
1414 goto c8mrkcontinue;
1415 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1416 goto freechars;
1417 case scm_tc7_ivect:
1418 case scm_tc7_uvect:
1419 if SCM_GC8MARKP (scmptr)
1420 goto c8mrkcontinue;
1421 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1422 goto freechars;
1423 case scm_tc7_svect:
1424 if SCM_GC8MARKP (scmptr)
1425 goto c8mrkcontinue;
1426 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1427 goto freechars;
1428 #ifdef HAVE_LONG_LONGS
1429 case scm_tc7_llvect:
1430 if SCM_GC8MARKP (scmptr)
1431 goto c8mrkcontinue;
1432 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1433 goto freechars;
1434 #endif
1435 case scm_tc7_fvect:
1436 if SCM_GC8MARKP (scmptr)
1437 goto c8mrkcontinue;
1438 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1439 goto freechars;
1440 case scm_tc7_dvect:
1441 if SCM_GC8MARKP (scmptr)
1442 goto c8mrkcontinue;
1443 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1444 goto freechars;
1445 case scm_tc7_cvect:
1446 if SCM_GC8MARKP (scmptr)
1447 goto c8mrkcontinue;
1448 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1449 goto freechars;
1450 #endif
1451 case scm_tc7_substring:
1452 if (SCM_GC8MARKP (scmptr))
1453 goto c8mrkcontinue;
1454 break;
1455 case scm_tc7_string:
1456 if (SCM_GC8MARKP (scmptr))
1457 goto c8mrkcontinue;
1458 m += SCM_HUGE_LENGTH (scmptr) + 1;
1459 goto freechars;
1460 case scm_tc7_msymbol:
1461 if (SCM_GC8MARKP (scmptr))
1462 goto c8mrkcontinue;
1463 m += ( SCM_LENGTH (scmptr)
1464 + 1
1465 + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
1466 scm_must_free ((char *)SCM_SLOTS (scmptr));
1467 break;
1468 case scm_tc7_contin:
1469 if SCM_GC8MARKP (scmptr)
1470 goto c8mrkcontinue;
1471 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1472 if (SCM_VELTS (scmptr))
1473 goto freechars;
1474 case scm_tc7_ssymbol:
1475 if SCM_GC8MARKP(scmptr)
1476 goto c8mrkcontinue;
1477 break;
1478 case scm_tcs_subrs:
1479 continue;
1480 case scm_tc7_port:
1481 if SCM_GC8MARKP (scmptr)
1482 goto c8mrkcontinue;
1483 if SCM_OPENP (scmptr)
1484 {
1485 int k = SCM_PTOBNUM (scmptr);
1486 if (!(k < scm_numptob))
1487 goto sweeperr;
1488 /* Keep "revealed" ports alive. */
1489 if (scm_revealed_count (scmptr) > 0)
1490 continue;
1491 /* Yes, I really do mean scm_ptobs[k].free */
1492 /* rather than ftobs[k].close. .close */
1493 /* is for explicit CLOSE-PORT by user */
1494 m += (scm_ptobs[k].free) (scmptr);
1495 SCM_SETSTREAM (scmptr, 0);
1496 scm_remove_from_port_table (scmptr);
1497 scm_gc_ports_collected++;
1498 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1499 }
1500 break;
1501 case scm_tc7_smob:
1502 switch SCM_GCTYP16 (scmptr)
1503 {
1504 case scm_tc_free_cell:
1505 case scm_tc16_real:
1506 if SCM_GC8MARKP (scmptr)
1507 goto c8mrkcontinue;
1508 break;
1509 #ifdef SCM_BIGDIG
1510 case scm_tc16_big:
1511 if SCM_GC8MARKP (scmptr)
1512 goto c8mrkcontinue;
1513 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1514 goto freechars;
1515 #endif /* def SCM_BIGDIG */
1516 case scm_tc16_complex:
1517 if SCM_GC8MARKP (scmptr)
1518 goto c8mrkcontinue;
1519 m += 2 * sizeof (double);
1520 goto freechars;
1521 default:
1522 if SCM_GC8MARKP (scmptr)
1523 goto c8mrkcontinue;
1524
1525 {
1526 int k;
1527 k = SCM_SMOBNUM (scmptr);
1528 if (!(k < scm_numsmob))
1529 goto sweeperr;
1530 m += (scm_smobs[k].free) ((SCM) scmptr);
1531 break;
1532 }
1533 }
1534 break;
1535 default:
1536 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1537 }
1538 #if 0
1539 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1540 exit (2);
1541 #endif
1542 #ifndef GUILE_NEW_GC_SCHEME
1543 n += span;
1544 #else
1545 if (--n_objects < 0)
1546 {
1547 SCM_SETCAR (scmptr, nfreelist);
1548 *hp_freelist->clustertail = scmptr;
1549 hp_freelist->clustertail = SCM_CDRLOC (scmptr);
1550
1551 nfreelist = SCM_EOL;
1552 n += span * (hp_freelist->gc_trigger - n_objects + 1);
1553 n_objects = hp_freelist->gc_trigger;
1554 }
1555 else
1556 #endif
1557 {
1558 /* Stick the new cell on the front of nfreelist. It's
1559 critical that we mark this cell as freed; otherwise, the
1560 conservative collector might trace it as some other type
1561 of object. */
1562 SCM_SETCAR (scmptr, scm_tc_free_cell);
1563 SCM_SETCDR (scmptr, nfreelist);
1564 nfreelist = scmptr;
1565 }
1566
1567 continue;
1568 c8mrkcontinue:
1569 SCM_CLRGC8MARK (scmptr);
1570 continue;
1571 cmrkcontinue:
1572 SCM_CLRGCMARK (scmptr);
1573 }
1574 #ifdef GC_FREE_SEGMENTS
1575 if (n == seg_size)
1576 {
1577 register long j;
1578
1579 hp_freelist->heap_size -= seg_size;
1580 free ((char *) scm_heap_table[i].bounds[0]);
1581 scm_heap_table[i].bounds[0] = 0;
1582 for (j = i + 1; j < scm_n_heap_segs; j++)
1583 scm_heap_table[j - 1] = scm_heap_table[j];
1584 scm_n_heap_segs -= 1;
1585 i--; /* We need to scan the segment just moved. */
1586 }
1587 else
1588 #endif /* ifdef GC_FREE_SEGMENTS */
1589 {
1590 /* Update the real freelist pointer to point to the head of
1591 the list of free cells we've built for this segment. */
1592 hp_freelist->cells = nfreelist;
1593 #ifdef GUILE_NEW_GC_SCHEME
1594 hp_freelist->n_objects = n_objects;
1595 #endif
1596 }
1597
1598 #ifdef GUILE_NEW_GC_SCHEME
1599 j = span * (hp_freelist->gc_trigger - n_objects);
1600 /* sum up---if this is last turn for this freelist */
1601 hp_freelist->collected += n + j;
1602 n -= j; /* compensate for the sum up */
1603 #else
1604 hp_freelist->collected += n;
1605 #endif
1606 scm_cells_allocated += hp_freelist->heap_size - hp_freelist->collected;
1607
1608 #ifdef GUILE_DEBUG_FREELIST
1609 #ifdef GUILE_NEW_GC_SCHEME
1610 scm_check_freelist (hp_freelist == &scm_master_freelist
1611 ? scm_freelist
1612 : scm_freelist2);
1613 #else
1614 scm_check_freelist (hp_freelist);
1615 #endif
1616 scm_map_free_list ();
1617 #endif
1618 }
1619
1620 #ifdef GUILE_NEW_GC_SCHEME
1621 for (i = 0; i < scm_n_heap_segs; i++)
1622 if (scm_heap_table[i].freelistp->clustertail != NULL)
1623 {
1624 scm_freelist_t *hp_freelist = scm_heap_table[i].freelistp;
1625 if (hp_freelist->gc_trigger - hp_freelist->n_objects > 1)
1626 {
1627 SCM c = hp_freelist->cells;
1628 hp_freelist->n_objects = hp_freelist->gc_trigger;
1629 SCM_SETCAR (c, SCM_CDR (c));
1630 SCM_SETCDR (c, SCM_EOL);
1631 *hp_freelist->clustertail = c;
1632 }
1633 else
1634 *hp_freelist->clustertail = SCM_EOL;
1635 hp_freelist->clustertail = NULL;
1636 }
1637
1638 /* When we move to POSIX threads private freelists should probably
1639 be GC-protected instead. */
1640 scm_freelist = SCM_EOL;
1641 scm_freelist2 = SCM_EOL;
1642 #endif
1643
1644 /* Scan weak vectors. */
1645 {
1646 SCM *ptr, w;
1647 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1648 {
1649 if (!SCM_IS_WHVEC_ANY (w))
1650 {
1651 register long j, n;
1652
1653 ptr = SCM_VELTS (w);
1654 n = SCM_LENGTH (w);
1655 for (j = 0; j < n; ++j)
1656 if (SCM_FREEP (ptr[j]))
1657 ptr[j] = SCM_BOOL_F;
1658 }
1659 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1660 {
1661 SCM obj = w;
1662 register long n = SCM_LENGTH (w);
1663 register long j;
1664
1665 ptr = SCM_VELTS (w);
1666
1667 for (j = 0; j < n; ++j)
1668 {
1669 SCM * fixup;
1670 SCM alist;
1671 int weak_keys;
1672 int weak_values;
1673
1674 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1675 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1676
1677 fixup = ptr + j;
1678 alist = *fixup;
1679
1680 while ( SCM_CONSP (alist)
1681 && SCM_CONSP (SCM_CAR (alist)))
1682 {
1683 SCM key;
1684 SCM value;
1685
1686 key = SCM_CAAR (alist);
1687 value = SCM_CDAR (alist);
1688 if ( (weak_keys && SCM_FREEP (key))
1689 || (weak_values && SCM_FREEP (value)))
1690 {
1691 *fixup = SCM_CDR (alist);
1692 }
1693 else
1694 fixup = SCM_CDRLOC (alist);
1695 alist = SCM_CDR (alist);
1696 }
1697 }
1698 }
1699 }
1700 }
1701 scm_mallocated -= m;
1702 scm_gc_malloc_collected = m;
1703 }
1704
1705
1706 \f
1707
1708 /* {Front end to malloc}
1709 *
1710 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1711 *
1712 * These functions provide services comperable to malloc, realloc, and
1713 * free. They are for allocating malloced parts of scheme objects.
1714 * The primary purpose of the front end is to impose calls to gc.
1715 */
1716
1717 /* scm_must_malloc
1718 * Return newly malloced storage or throw an error.
1719 *
1720 * The parameter WHAT is a string for error reporting.
1721 * If the threshold scm_mtrigger will be passed by this
1722 * allocation, or if the first call to malloc fails,
1723 * garbage collect -- on the presumption that some objects
1724 * using malloced storage may be collected.
1725 *
1726 * The limit scm_mtrigger may be raised by this allocation.
1727 */
1728 void *
1729 scm_must_malloc (scm_sizet size, const char *what)
1730 {
1731 void *ptr;
1732 unsigned long nm = scm_mallocated + size;
1733
1734 if (nm <= scm_mtrigger)
1735 {
1736 SCM_SYSCALL (ptr = malloc (size));
1737 if (NULL != ptr)
1738 {
1739 scm_mallocated = nm;
1740 return ptr;
1741 }
1742 }
1743
1744 scm_igc (what);
1745
1746 nm = scm_mallocated + size;
1747 SCM_SYSCALL (ptr = malloc (size));
1748 if (NULL != ptr)
1749 {
1750 scm_mallocated = nm;
1751 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1752 if (nm > scm_mtrigger)
1753 scm_mtrigger = nm + nm / 2;
1754 else
1755 scm_mtrigger += scm_mtrigger / 2;
1756 }
1757 return ptr;
1758 }
1759
1760 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1761 return 0; /* never reached */
1762 }
1763
1764
1765 /* scm_must_realloc
1766 * is similar to scm_must_malloc.
1767 */
1768 void *
1769 scm_must_realloc (void *where,
1770 scm_sizet old_size,
1771 scm_sizet size,
1772 const char *what)
1773 {
1774 void *ptr;
1775 scm_sizet nm = scm_mallocated + size - old_size;
1776
1777 if (nm <= scm_mtrigger)
1778 {
1779 SCM_SYSCALL (ptr = realloc (where, size));
1780 if (NULL != ptr)
1781 {
1782 scm_mallocated = nm;
1783 return ptr;
1784 }
1785 }
1786
1787 scm_igc (what);
1788
1789 nm = scm_mallocated + size - old_size;
1790 SCM_SYSCALL (ptr = realloc (where, size));
1791 if (NULL != ptr)
1792 {
1793 scm_mallocated = nm;
1794 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1795 if (nm > scm_mtrigger)
1796 scm_mtrigger = nm + nm / 2;
1797 else
1798 scm_mtrigger += scm_mtrigger / 2;
1799 }
1800 return ptr;
1801 }
1802
1803 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1804 return 0; /* never reached */
1805 }
1806
1807 void
1808 scm_must_free (void *obj)
1809 {
1810 if (obj)
1811 free (obj);
1812 else
1813 scm_wta (SCM_INUM0, "already free", "");
1814 }
1815
1816 /* Announce that there has been some malloc done that will be freed
1817 * during gc. A typical use is for a smob that uses some malloced
1818 * memory but can not get it from scm_must_malloc (for whatever
1819 * reason). When a new object of this smob is created you call
1820 * scm_done_malloc with the size of the object. When your smob free
1821 * function is called, be sure to include this size in the return
1822 * value. */
1823
1824 void
1825 scm_done_malloc (long size)
1826 {
1827 scm_mallocated += size;
1828
1829 if (scm_mallocated > scm_mtrigger)
1830 {
1831 scm_igc ("foreign mallocs");
1832 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1833 {
1834 if (scm_mallocated > scm_mtrigger)
1835 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1836 else
1837 scm_mtrigger += scm_mtrigger / 2;
1838 }
1839 }
1840 }
1841
1842
1843 \f
1844
1845 /* {Heap Segments}
1846 *
1847 * Each heap segment is an array of objects of a particular size.
1848 * Every segment has an associated (possibly shared) freelist.
1849 * A table of segment records is kept that records the upper and
1850 * lower extents of the segment; this is used during the conservative
1851 * phase of gc to identify probably gc roots (because they point
1852 * into valid segments at reasonable offsets). */
1853
1854 /* scm_expmem
1855 * is true if the first segment was smaller than INIT_HEAP_SEG.
1856 * If scm_expmem is set to one, subsequent segment allocations will
1857 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1858 */
1859 int scm_expmem = 0;
1860
1861 /* scm_heap_org
1862 * is the lowest base address of any heap segment.
1863 */
1864 SCM_CELLPTR scm_heap_org;
1865
1866 struct scm_heap_seg_data * scm_heap_table = 0;
1867 int scm_n_heap_segs = 0;
1868
1869 /* init_heap_seg
1870 * initializes a new heap segment and return the number of objects it contains.
1871 *
1872 * The segment origin, segment size in bytes, and the span of objects
1873 * in cells are input parameters. The freelist is both input and output.
1874 *
1875 * This function presume that the scm_heap_table has already been expanded
1876 * to accomodate a new segment record.
1877 */
1878
1879
1880 static scm_sizet
1881 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelistp)
1882 {
1883 register SCM_CELLPTR ptr;
1884 #ifdef SCM_POINTERS_MUNGED
1885 register SCM scmptr;
1886 #else
1887 #undef scmptr
1888 #define scmptr ptr
1889 #endif
1890 SCM_CELLPTR seg_end;
1891 int new_seg_index;
1892 int n_new_cells;
1893 int span = freelistp->span;
1894
1895 if (seg_org == NULL)
1896 return 0;
1897
1898 ptr = seg_org;
1899
1900 size = (size / sizeof (scm_cell) / span) * span * sizeof (scm_cell);
1901
1902 /* Compute the ceiling on valid object pointers w/in this segment.
1903 */
1904 seg_end = CELL_DN ((char *) ptr + size);
1905
1906 /* Find the right place and insert the segment record.
1907 *
1908 */
1909 for (new_seg_index = 0;
1910 ( (new_seg_index < scm_n_heap_segs)
1911 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1912 new_seg_index++)
1913 ;
1914
1915 {
1916 int i;
1917 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1918 scm_heap_table[i] = scm_heap_table[i - 1];
1919 }
1920
1921 ++scm_n_heap_segs;
1922
1923 scm_heap_table[new_seg_index].valid = 0;
1924 scm_heap_table[new_seg_index].span = span;
1925 scm_heap_table[new_seg_index].freelistp = freelistp;
1926 scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
1927 scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
1928
1929
1930 /* Compute the least valid object pointer w/in this segment
1931 */
1932 ptr = CELL_UP (ptr);
1933
1934
1935 /*n_new_cells*/
1936 n_new_cells = seg_end - ptr;
1937
1938 #ifdef GUILE_NEW_GC_SCHEME
1939
1940 freelistp->heap_size += n_new_cells;
1941
1942 /* Partition objects in this segment into clusters
1943 */
1944 {
1945 SCM clusters;
1946 SCM *clusterp = &clusters;
1947 int trigger = span * freelistp->gc_trigger;
1948 int n, c = 0;
1949
1950 while (n_new_cells > span)
1951 {
1952 if (n_new_cells > trigger)
1953 n = span + trigger;
1954 else
1955 n = n_new_cells;
1956 n_new_cells -= n;
1957 n -= span;
1958 c += span;
1959
1960 *clusterp = PTR2SCM (ptr);
1961 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
1962 clusterp = SCM_CDRLOC (*clusterp);
1963
1964 ptr += span;
1965 seg_end = ptr + n;
1966 while (ptr < seg_end)
1967 {
1968 #ifdef SCM_POINTERS_MUNGED
1969 scmptr = PTR2SCM (ptr);
1970 #endif
1971 SCM_SETCAR (scmptr, scm_tc_free_cell);
1972 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
1973 ptr += span;
1974 }
1975 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
1976 }
1977
1978 /* Correction for cluster cells + spill */
1979 freelistp->heap_size -= c + n_new_cells;
1980
1981 /* Patch up the last cluster pointer in the segment
1982 * to join it to the input freelist.
1983 */
1984 *clusterp = freelistp->clusters;
1985 freelistp->clusters = clusters;
1986 }
1987
1988 #else /* GUILE_NEW_GC_SCHEME */
1989
1990 /* Prepend objects in this segment to the freelist.
1991 */
1992 while (ptr < seg_end)
1993 {
1994 #ifdef SCM_POINTERS_MUNGED
1995 scmptr = PTR2SCM (ptr);
1996 #endif
1997 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
1998 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
1999 ptr += span;
2000 }
2001
2002 ptr -= span;
2003
2004 /* Patch up the last freelist pointer in the segment
2005 * to join it to the input freelist.
2006 */
2007 SCM_SETCDR (PTR2SCM (ptr), freelistp->cells);
2008 freelistp->cells = PTR2SCM (CELL_UP (seg_org));
2009
2010 freelistp->heap_size += n_new_cells;
2011
2012 #endif /* GUILE_NEW_GC_SCHEME */
2013
2014 return size;
2015 #ifdef scmptr
2016 #undef scmptr
2017 #endif
2018 }
2019
2020
2021 static void
2022 alloc_some_heap (scm_freelist_t *freelistp)
2023 {
2024 struct scm_heap_seg_data * tmptable;
2025 SCM_CELLPTR ptr;
2026 scm_sizet len;
2027
2028 /* Critical code sections (such as the garbage collector)
2029 * aren't supposed to add heap segments.
2030 */
2031 if (scm_gc_heap_lock)
2032 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
2033
2034 /* Expand the heap tables to have room for the new segment.
2035 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
2036 * only if the allocation of the segment itself succeeds.
2037 */
2038 len = (1 + scm_n_heap_segs) * sizeof (struct scm_heap_seg_data);
2039
2040 SCM_SYSCALL (tmptable = ((struct scm_heap_seg_data *)
2041 realloc ((char *)scm_heap_table, len)));
2042 if (!tmptable)
2043 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
2044 else
2045 scm_heap_table = tmptable;
2046
2047
2048 /* Pick a size for the new heap segment.
2049 * The rule for picking the size of a segment is explained in
2050 * gc.h
2051 */
2052 if (scm_expmem)
2053 {
2054 len = (scm_sizet) SCM_EXPHEAP (freelistp->heap_size * sizeof (scm_cell));
2055 if ((scm_sizet) SCM_EXPHEAP (freelistp->heap_size * sizeof (scm_cell))
2056 != len)
2057 len = 0;
2058 }
2059 else
2060 len = SCM_HEAP_SEG_SIZE;
2061
2062 {
2063 scm_sizet smallest;
2064
2065 smallest = (freelistp->span * sizeof (scm_cell));
2066 if (len < smallest)
2067 len = (freelistp->span * sizeof (scm_cell));
2068
2069 /* Allocate with decaying ambition. */
2070 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2071 && (len >= smallest))
2072 {
2073 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (len));
2074 if (ptr)
2075 {
2076 init_heap_seg (ptr, len, freelistp);
2077 return;
2078 }
2079 len /= 2;
2080 }
2081 }
2082
2083 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2084 }
2085
2086
2087
2088 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2089 (SCM name),
2090 "")
2091 #define FUNC_NAME s_scm_unhash_name
2092 {
2093 int x;
2094 int bound;
2095 SCM_VALIDATE_SYMBOL (1,name);
2096 SCM_DEFER_INTS;
2097 bound = scm_n_heap_segs;
2098 for (x = 0; x < bound; ++x)
2099 {
2100 SCM_CELLPTR p;
2101 SCM_CELLPTR pbound;
2102 p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
2103 pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
2104 while (p < pbound)
2105 {
2106 SCM incar;
2107 incar = p->car;
2108 if (1 == (7 & (int)incar))
2109 {
2110 --incar;
2111 if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
2112 && (SCM_CDR (incar) != 0)
2113 && (SCM_UNPACK (SCM_CDR (incar)) != 1))
2114 {
2115 p->car = name;
2116 }
2117 }
2118 ++p;
2119 }
2120 }
2121 SCM_ALLOW_INTS;
2122 return name;
2123 }
2124 #undef FUNC_NAME
2125
2126
2127 \f
2128 /* {GC Protection Helper Functions}
2129 */
2130
2131
2132 void
2133 scm_remember (SCM *ptr)
2134 { /* empty */ }
2135
2136
2137 /*
2138 These crazy functions prevent garbage collection
2139 of arguments after the first argument by
2140 ensuring they remain live throughout the
2141 function because they are used in the last
2142 line of the code block.
2143 It'd be better to have a nice compiler hint to
2144 aid the conservative stack-scanning GC. --03/09/00 gjb */
2145 SCM
2146 scm_return_first (SCM elt, ...)
2147 {
2148 return elt;
2149 }
2150
2151 int
2152 scm_return_first_int (int i, ...)
2153 {
2154 return i;
2155 }
2156
2157
2158 SCM
2159 scm_permanent_object (SCM obj)
2160 {
2161 SCM_REDEFER_INTS;
2162 scm_permobjs = scm_cons (obj, scm_permobjs);
2163 SCM_REALLOW_INTS;
2164 return obj;
2165 }
2166
2167
2168 /* Protect OBJ from the garbage collector. OBJ will not be freed,
2169 even if all other references are dropped, until someone applies
2170 scm_unprotect_object to it. This function returns OBJ.
2171
2172 Calls to scm_protect_object nest. For every object OBJ, there is a
2173 counter which scm_protect_object(OBJ) increments and
2174 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
2175 an object's counter is greater than zero, the garbage collector
2176 will not free it.
2177
2178 Of course, that's not how it's implemented. scm_protect_object and
2179 scm_unprotect_object just maintain a list of references to things.
2180 Since the GC knows about this list, all objects it mentions stay
2181 alive. scm_protect_object adds its argument to the list;
2182 scm_unprotect_object removes the first occurrence of its argument
2183 to the list. */
2184 SCM
2185 scm_protect_object (SCM obj)
2186 {
2187 scm_protects = scm_cons (obj, scm_protects);
2188
2189 return obj;
2190 }
2191
2192
2193 /* Remove any protection for OBJ established by a prior call to
2194 scm_protect_object. This function returns OBJ.
2195
2196 See scm_protect_object for more information. */
2197 SCM
2198 scm_unprotect_object (SCM obj)
2199 {
2200 SCM *tail_ptr = &scm_protects;
2201
2202 while (SCM_CONSP (*tail_ptr))
2203 if (SCM_CAR (*tail_ptr) == obj)
2204 {
2205 *tail_ptr = SCM_CDR (*tail_ptr);
2206 break;
2207 }
2208 else
2209 tail_ptr = SCM_CDRLOC (*tail_ptr);
2210
2211 return obj;
2212 }
2213
2214 int terminating;
2215
2216 /* called on process termination. */
2217 #ifdef HAVE_ATEXIT
2218 static void
2219 cleanup (void)
2220 #else
2221 #ifdef HAVE_ON_EXIT
2222 extern int on_exit (void (*procp) (), int arg);
2223
2224 static void
2225 cleanup (int status, void *arg)
2226 #else
2227 #error Dont know how to setup a cleanup handler on your system.
2228 #endif
2229 #endif
2230 {
2231 terminating = 1;
2232 scm_flush_all_ports ();
2233 }
2234
2235 \f
2236 static int
2237 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelistp)
2238 {
2239 if (0L == init_heap_size)
2240 init_heap_size = SCM_INIT_HEAP_SIZE;
2241 if (!init_heap_seg ((SCM_CELLPTR) malloc (init_heap_size),
2242 init_heap_size,
2243 freelistp))
2244 {
2245 init_heap_size = SCM_HEAP_SEG_SIZE;
2246 if (!init_heap_seg ((SCM_CELLPTR) malloc (init_heap_size),
2247 init_heap_size,
2248 freelistp))
2249 return 1;
2250 }
2251 else
2252 scm_expmem = 1;
2253
2254 return 0;
2255 }
2256
2257 \f
2258 #ifdef GUILE_NEW_GC_SCHEME
2259 int
2260 scm_init_storage (scm_sizet init_heap_size, int gc_trigger,
2261 scm_sizet init_heap2_size, int gc_trigger2)
2262 #else
2263 int
2264 scm_init_storage (scm_sizet init_heap_size, scm_sizet init_heap2_size)
2265 #endif
2266 {
2267 scm_sizet j;
2268
2269 j = SCM_NUM_PROTECTS;
2270 while (j)
2271 scm_sys_protects[--j] = SCM_BOOL_F;
2272 scm_block_gc = 1;
2273
2274 #ifdef GUILE_NEW_GC_SCHEME
2275 scm_freelist = SCM_EOL;
2276 scm_master_freelist.clusters = SCM_EOL;
2277 scm_master_freelist.triggeredp = 0;
2278 scm_master_freelist.gc_trigger
2279 = gc_trigger ? gc_trigger : SCM_GC_TRIGGER;
2280 scm_master_freelist.span = 1;
2281 scm_master_freelist.collected = 0;
2282 scm_master_freelist.heap_size = 0;
2283 #else
2284 scm_freelist.cells = SCM_EOL;
2285 scm_freelist.span = 1;
2286 scm_freelist.collected = 0;
2287 scm_freelist.heap_size = 0;
2288 #endif
2289
2290 #ifdef GUILE_NEW_GC_SCHEME
2291 scm_freelist2 = SCM_EOL;
2292 scm_master_freelist2.clusters = SCM_EOL;
2293 scm_master_freelist2.triggeredp = 0;
2294 scm_master_freelist2.gc_trigger
2295 = gc_trigger2 ? gc_trigger2 : SCM_GC_TRIGGER2;
2296 scm_master_freelist2.span = 2;
2297 scm_master_freelist2.collected = 0;
2298 scm_master_freelist2.heap_size = 0;
2299 #else
2300 scm_freelist2.cells = SCM_EOL;
2301 scm_freelist2.span = 2;
2302 scm_freelist2.collected = 0;
2303 scm_freelist2.heap_size = 0;
2304 #endif
2305
2306 scm_expmem = 0;
2307
2308 j = SCM_HEAP_SEG_SIZE;
2309 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2310 scm_heap_table = ((struct scm_heap_seg_data *)
2311 scm_must_malloc (sizeof (struct scm_heap_seg_data) * 2, "hplims"));
2312
2313 #ifdef GUILE_NEW_GC_SCHEME
2314 if (make_initial_segment (init_heap_size, &scm_master_freelist) ||
2315 make_initial_segment (init_heap2_size, &scm_master_freelist2))
2316 return 1;
2317 #else
2318 if (make_initial_segment (init_heap_size, &scm_freelist) ||
2319 make_initial_segment (init_heap2_size, &scm_freelist2))
2320 return 1;
2321 #endif
2322
2323 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0]);
2324
2325 /* scm_hplims[0] can change. do not remove scm_heap_org */
2326 scm_weak_vectors = SCM_EOL;
2327
2328 /* Initialise the list of ports. */
2329 scm_port_table = (scm_port **)
2330 malloc (sizeof (scm_port *) * scm_port_table_room);
2331 if (!scm_port_table)
2332 return 1;
2333
2334 #ifdef HAVE_ATEXIT
2335 atexit (cleanup);
2336 #else
2337 #ifdef HAVE_ON_EXIT
2338 on_exit (cleanup, 0);
2339 #endif
2340 #endif
2341
2342 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2343 SCM_SETCDR (scm_undefineds, scm_undefineds);
2344
2345 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2346 scm_nullstr = scm_makstr (0L, 0);
2347 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2348 scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2349 scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim));
2350 scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2351 scm_stand_in_procs = SCM_EOL;
2352 scm_permobjs = SCM_EOL;
2353 scm_protects = SCM_EOL;
2354 scm_asyncs = SCM_EOL;
2355 scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2356 scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2357 #ifdef SCM_BIGDIG
2358 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2359 #endif
2360 return 0;
2361 }
2362 \f
2363
2364 void
2365 scm_init_gc ()
2366 {
2367 #include "gc.x"
2368 }