*** empty log message ***
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 \f
46 #include <stdio.h>
47 #include "_scm.h"
48 #include "stime.h"
49 #include "stackchk.h"
50 #include "struct.h"
51 #include "genio.h"
52 #include "weaks.h"
53 #include "guardians.h"
54 #include "smob.h"
55 #include "unif.h"
56 #include "async.h"
57
58 #include "validate.h"
59 #include "gc.h"
60
61 #ifdef HAVE_MALLOC_H
62 #include <malloc.h>
63 #endif
64
65 #ifdef HAVE_UNISTD_H
66 #include <unistd.h>
67 #endif
68
69 #ifdef __STDC__
70 #include <stdarg.h>
71 #define var_start(x, y) va_start(x, y)
72 #else
73 #include <varargs.h>
74 #define var_start(x, y) va_start(x)
75 #endif
76
77 \f
78 /* {heap tuning parameters}
79 *
80 * These are parameters for controlling memory allocation. The heap
81 * is the area out of which scm_cons, and object headers are allocated.
82 *
83 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
84 * 64 bit machine. The units of the _SIZE parameters are bytes.
85 * Cons pairs and object headers occupy one heap cell.
86 *
87 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
88 * allocated initially the heap will grow by half its current size
89 * each subsequent time more heap is needed.
90 *
91 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
92 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
93 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
94 * is in scm_init_storage() and alloc_some_heap() in sys.c
95 *
96 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
97 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
98 *
99 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
100 * is needed.
101 *
102 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
103 * trigger a GC.
104 *
105 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
106 * reclaimed by a GC triggered by must_malloc. If less than this is
107 * reclaimed, the trigger threshold is raised. [I don't know what a
108 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
109 * work around a oscillation that caused almost constant GC.]
110 */
111
112 #define SCM_INIT_HEAP_SIZE (40000L * sizeof (scm_cell))
113 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
114 #ifdef _QC
115 # define SCM_HEAP_SEG_SIZE 32768L
116 #else
117 # ifdef sequent
118 # define SCM_HEAP_SEG_SIZE (7000L*sizeof(scm_cell))
119 # else
120 # define SCM_HEAP_SEG_SIZE (16384L*sizeof(scm_cell))
121 # endif
122 #endif
123 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
124 #define SCM_INIT_MALLOC_LIMIT 100000
125 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
126
127 #define SCM_GC_TRIGGER 10000
128 #define SCM_GC_TRIGGER2 10000
129
130 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
131 bounds for allocated storage */
132
133 #ifdef PROT386
134 /*in 386 protected mode we must only adjust the offset */
135 # define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
136 # define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
137 #else
138 # ifdef _UNICOS
139 # define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
140 # define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
141 # else
142 # define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
143 # define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
144 # endif /* UNICOS */
145 #endif /* PROT386 */
146
147
148 \f
149 /* scm_freelists
150 */
151
152 #ifdef GUILE_NEW_GC_SCHEME
153 SCM scm_freelist = SCM_EOL;
154 scm_freelist_t scm_master_freelist = {
155 SCM_EOL, 0, SCM_EOL, SCM_EOL, 0, 0, 1, 0, 0
156 };
157 SCM scm_freelist2 = SCM_EOL;
158 scm_freelist_t scm_master_freelist2 = {
159 SCM_EOL, 0, SCM_EOL, SCM_EOL, 0, 0, 2, 0, 0
160 };
161 #else
162 scm_freelist_t scm_freelist = { SCM_EOL, 1, 0, 0 };
163 scm_freelist_t scm_freelist2 = { SCM_EOL, 2, 0, 0 };
164 #endif
165
166 /* scm_mtrigger
167 * is the number of bytes of must_malloc allocation needed to trigger gc.
168 */
169 unsigned long scm_mtrigger;
170
171
172 /* scm_gc_heap_lock
173 * If set, don't expand the heap. Set only during gc, during which no allocation
174 * is supposed to take place anyway.
175 */
176 int scm_gc_heap_lock = 0;
177
178 /* GC Blocking
179 * Don't pause for collection if this is set -- just
180 * expand the heap.
181 */
182
183 int scm_block_gc = 1;
184
185 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
186 * collection (GC) more space is allocated for the heap.
187 */
188 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
189
190 /* During collection, this accumulates objects holding
191 * weak references.
192 */
193 SCM scm_weak_vectors;
194
195 /* GC Statistics Keeping
196 */
197 unsigned long scm_cells_allocated = 0;
198 long scm_mallocated = 0;
199 /* unsigned long scm_gc_cells_collected; */
200 unsigned long scm_gc_malloc_collected;
201 unsigned long scm_gc_ports_collected;
202 unsigned long scm_gc_rt;
203 unsigned long scm_gc_time_taken = 0;
204
205 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
206 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
207 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
208 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
209 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
210 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
211
212
213 struct scm_heap_seg_data
214 {
215 /* lower and upper bounds of the segment */
216 SCM_CELLPTR bounds[2];
217
218 /* address of the head-of-freelist pointer for this segment's cells.
219 All segments usually point to the same one, scm_freelist. */
220 scm_freelist_t *freelistp;
221
222 /* number of SCM words per object in this segment */
223 int span;
224
225 /* If SEG_DATA->valid is non-zero, the conservative marking
226 functions will apply SEG_DATA->valid to the purported pointer and
227 SEG_DATA, and mark the object iff the function returns non-zero.
228 At the moment, I don't think anyone uses this. */
229 int (*valid) ();
230 };
231
232
233
234
235 static void scm_mark_weak_vector_spines (void);
236 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
237 static void alloc_some_heap (scm_freelist_t *);
238
239
240 \f
241 /* Debugging functions. */
242
243 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
244
245 /* Return the number of the heap segment containing CELL. */
246 static int
247 which_seg (SCM cell)
248 {
249 int i;
250
251 for (i = 0; i < scm_n_heap_segs; i++)
252 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
253 && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
254 return i;
255 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
256 SCM_UNPACK (cell));
257 abort ();
258 }
259
260
261 #ifdef GUILE_NEW_GC_SCHEME
262 static void
263 map_free_list (scm_freelist_t *master, SCM freelist)
264 {
265 int last_seg = -1, count = 0;
266 SCM f;
267
268 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
269 {
270 int this_seg = which_seg (f);
271
272 if (this_seg != last_seg)
273 {
274 if (last_seg != -1)
275 fprintf (stderr, " %5d %d-cells in segment %d\n",
276 count, master->span, last_seg);
277 last_seg = this_seg;
278 count = 0;
279 }
280 count++;
281 }
282 if (last_seg != -1)
283 fprintf (stderr, " %5d %d-cells in segment %d\n",
284 count, master->span, last_seg);
285 }
286 #else
287 static void
288 map_free_list (scm_freelist_t *freelistp)
289 {
290 int last_seg = -1, count = 0;
291 SCM f;
292
293 for (f = freelistp->cells; SCM_NIMP (f); f = SCM_CDR (f))
294 {
295 int this_seg = which_seg (f);
296
297 if (this_seg != last_seg)
298 {
299 if (last_seg != -1)
300 fprintf (stderr, " %5d %d-cells in segment %d\n",
301 count, freelistp->span, last_seg);
302 last_seg = this_seg;
303 count = 0;
304 }
305 count++;
306 }
307 if (last_seg != -1)
308 fprintf (stderr, " %5d %d-cells in segment %d\n",
309 count, freelistp->span, last_seg);
310 }
311 #endif
312
313 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
314 (),
315 "Print debugging information about the free-list.\n"
316 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
317 #define FUNC_NAME s_scm_map_free_list
318 {
319 fprintf (stderr, "%d segments total\n", scm_n_heap_segs);
320 #ifdef GUILE_NEW_GC_SCHEME
321 map_free_list (&scm_master_freelist, scm_freelist);
322 map_free_list (&scm_master_freelist2, scm_freelist2);
323 #else
324 map_free_list (&scm_freelist);
325 map_free_list (&scm_freelist2);
326 #endif
327 fflush (stderr);
328
329 return SCM_UNSPECIFIED;
330 }
331 #undef FUNC_NAME
332
333 static int
334 free_list_length (char *title, int i, SCM freelist)
335 {
336 SCM ls;
337 int n = 0;
338 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
339 if (SCM_UNPACK_CAR (ls) == scm_tc_free_cell)
340 ++n;
341 else
342 {
343 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
344 abort ();
345 }
346 if (i >= 0)
347 fprintf (stderr, "%s %d\t%d\n", title, i, n);
348 else
349 fprintf (stderr, "%s\t%d\n", title, n);
350 return n;
351 }
352
353 static void
354 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
355 {
356 SCM clusters;
357 int i = 0, n = 0;
358 fprintf (stderr, "%s\n\n", title);
359 n += free_list_length ("free list", -1, freelist);
360 for (clusters = master->clusters;
361 SCM_NNULLP (clusters);
362 clusters = SCM_CDR (clusters))
363 n += free_list_length ("cluster", i++, SCM_CAR (clusters));
364 fprintf (stderr, "\ntotal %d cells\n\n", n);
365 }
366
367 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
368 (),
369 "Print debugging information about the free-list.\n"
370 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
371 #define FUNC_NAME s_scm_free_list_length
372 {
373 #ifdef GUILE_NEW_GC_SCHEME
374 free_list_lengths ("1-words", &scm_master_freelist, scm_freelist);
375 free_list_lengths ("2-words", &scm_master_freelist2, scm_freelist2);
376 #endif
377 return SCM_UNSPECIFIED;
378 }
379 #undef FUNC_NAME
380
381 #endif
382
383 #ifdef GUILE_DEBUG_FREELIST
384
385 /* Number of calls to SCM_NEWCELL since startup. */
386 static unsigned long scm_newcell_count;
387 static unsigned long scm_newcell2_count;
388
389 /* Search freelist for anything that isn't marked as a free cell.
390 Abort if we find something. */
391 #ifdef GUILE_NEW_GC_SCHEME
392 static void
393 scm_check_freelist (SCM freelist)
394 {
395 SCM f;
396 int i = 0;
397
398 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
399 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
400 {
401 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
402 scm_newcell_count, i);
403 fflush (stderr);
404 abort ();
405 }
406 }
407 #else
408 static void
409 scm_check_freelist (scm_freelist_t *freelistp)
410 {
411 SCM f;
412 int i = 0;
413
414 for (f = freelistp->cells; SCM_NIMP (f); f = SCM_CDR (f), i++)
415 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
416 {
417 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
418 scm_newcell_count, i);
419 fflush (stderr);
420 abort ();
421 }
422 }
423 #endif
424
425 static int scm_debug_check_freelist = 0;
426
427 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
428 (SCM flag),
429 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
430 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
431 "compile-time flag was selected.\n")
432 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
433 {
434 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
435 return SCM_UNSPECIFIED;
436 }
437 #undef FUNC_NAME
438
439
440 #ifdef GUILE_NEW_GC_SCHEME
441
442 SCM
443 scm_debug_newcell (void)
444 {
445 SCM new;
446
447 scm_newcell_count++;
448 if (scm_debug_check_freelist)
449 {
450 scm_check_freelist (scm_freelist);
451 scm_gc();
452 }
453
454 /* The rest of this is supposed to be identical to the SCM_NEWCELL
455 macro. */
456 if (SCM_IMP (scm_freelist))
457 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
458 else
459 {
460 new = scm_freelist;
461 scm_freelist = SCM_CDR (scm_freelist);
462 SCM_SETCAR (new, scm_tc16_allocated);
463 }
464
465 return new;
466 }
467
468 SCM
469 scm_debug_newcell2 (void)
470 {
471 SCM new;
472
473 scm_newcell2_count++;
474 if (scm_debug_check_freelist)
475 {
476 scm_check_freelist (scm_freelist2);
477 scm_gc ();
478 }
479
480 /* The rest of this is supposed to be identical to the SCM_NEWCELL
481 macro. */
482 if (SCM_IMP (scm_freelist2))
483 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
484 else
485 {
486 new = scm_freelist2;
487 scm_freelist2 = SCM_CDR (scm_freelist2);
488 SCM_SETCAR (new, scm_tc16_allocated);
489 }
490
491 return new;
492 }
493
494 #else /* GUILE_NEW_GC_SCHEME */
495
496 SCM
497 scm_debug_newcell (void)
498 {
499 SCM new;
500
501 scm_newcell_count++;
502 if (scm_debug_check_freelist)
503 {
504 scm_check_freelist (&scm_freelist);
505 scm_gc();
506 }
507
508 /* The rest of this is supposed to be identical to the SCM_NEWCELL
509 macro. */
510 if (SCM_IMP (scm_freelist.cells))
511 new = scm_gc_for_newcell (&scm_freelist);
512 else
513 {
514 new = scm_freelist.cells;
515 scm_freelist.cells = SCM_CDR (scm_freelist.cells);
516 SCM_SETCAR (new, scm_tc16_allocated);
517 ++scm_cells_allocated;
518 }
519
520 return new;
521 }
522
523 SCM
524 scm_debug_newcell2 (void)
525 {
526 SCM new;
527
528 scm_newcell2_count++;
529 if (scm_debug_check_freelist) {
530 scm_check_freelist (&scm_freelist2);
531 scm_gc();
532 }
533
534 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
535 macro. */
536 if (SCM_IMP (scm_freelist2.cells))
537 new = scm_gc_for_newcell (&scm_freelist2);
538 else
539 {
540 new = scm_freelist2.cells;
541 scm_freelist2.cells = SCM_CDR (scm_freelist2.cells);
542 SCM_SETCAR (new, scm_tc16_allocated);
543 scm_cells_allocated += 2;
544 }
545
546 return new;
547 }
548
549 #endif /* GUILE_NEW_GC_SCHEME */
550 #endif /* GUILE_DEBUG_FREELIST */
551
552 \f
553
554 /* {Scheme Interface to GC}
555 */
556
557 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
558 (),
559 "Returns an association list of statistics about Guile's current use of storage. ")
560 #define FUNC_NAME s_scm_gc_stats
561 {
562 int i;
563 int n;
564 SCM heap_segs;
565 long int local_scm_mtrigger;
566 long int local_scm_mallocated;
567 long int local_scm_heap_size;
568 long int local_scm_cells_allocated;
569 long int local_scm_gc_time_taken;
570 SCM answer;
571
572 SCM_DEFER_INTS;
573 scm_block_gc = 1;
574 retry:
575 heap_segs = SCM_EOL;
576 n = scm_n_heap_segs;
577 for (i = scm_n_heap_segs; i--; )
578 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
579 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
580 heap_segs);
581 if (scm_n_heap_segs != n)
582 goto retry;
583 scm_block_gc = 0;
584
585 /// ? ?? ?
586 local_scm_mtrigger = scm_mtrigger;
587 local_scm_mallocated = scm_mallocated;
588 #ifdef GUILE_NEW_GC_SCHEME
589 local_scm_heap_size = scm_master_freelist.heap_size; /*fixme*/
590 #else
591 local_scm_heap_size = scm_freelist.heap_size; /*fixme*/
592 #endif
593 local_scm_cells_allocated = scm_cells_allocated;
594 local_scm_gc_time_taken = scm_gc_time_taken;
595
596 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
597 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
598 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
599 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
600 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
601 scm_cons (sym_heap_segments, heap_segs),
602 SCM_UNDEFINED);
603 SCM_ALLOW_INTS;
604 return answer;
605 }
606 #undef FUNC_NAME
607
608
609 void
610 scm_gc_start (const char *what)
611 {
612 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
613 /* scm_gc_cells_collected = 0; */
614 scm_gc_malloc_collected = 0;
615 scm_gc_ports_collected = 0;
616 }
617
618 void
619 scm_gc_end ()
620 {
621 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
622 scm_gc_time_taken += scm_gc_rt;
623 scm_system_async_mark (scm_gc_async);
624 }
625
626
627 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
628 (SCM obj),
629 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
630 "returned by this function for @var{obj}")
631 #define FUNC_NAME s_scm_object_address
632 {
633 return scm_ulong2num ((unsigned long) obj);
634 }
635 #undef FUNC_NAME
636
637
638 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
639 (),
640 "Scans all of SCM objects and reclaims for further use those that are\n"
641 "no longer accessible.")
642 #define FUNC_NAME s_scm_gc
643 {
644 SCM_DEFER_INTS;
645 scm_igc ("call");
646 SCM_ALLOW_INTS;
647 return SCM_UNSPECIFIED;
648 }
649 #undef FUNC_NAME
650
651
652 \f
653 /* {C Interface For When GC is Triggered}
654 */
655
656 #ifdef GUILE_NEW_GC_SCHEME
657
658 /* When we get POSIX threads support, the master will be global and
659 common while the freelist will be individual for each thread. */
660
661 SCM
662 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
663 {
664 SCM cell;
665 ++scm_ints_disabled;
666 if (master->triggeredp)
667 scm_igc ("cells");
668 else if (SCM_NULLP (master->clusters))
669 alloc_some_heap (master);
670 else if (SCM_NULLP (SCM_CDR (master->clusters)))
671 /* we are satisfied; GC instead of alloc next time around */
672 master->triggeredp = 1;
673 --scm_ints_disabled;
674 cell = SCM_CAR (master->clusters);
675 master->clusters = SCM_CDR (master->clusters);
676 *freelist = SCM_CDR (cell);
677 SCM_SETCAR (cell, scm_tc16_allocated);
678 return cell;
679 }
680
681 #else /* GUILE_NEW_GC_SCHEME */
682
683 void
684 scm_gc_for_alloc (scm_freelist_t *freelistp)
685 {
686 SCM_REDEFER_INTS;
687 scm_igc ("cells");
688 #ifdef GUILE_DEBUG_FREELIST
689 fprintf (stderr, "Collected: %d, min_yield: %d\n",
690 freelistp->collected, MIN_GC_YIELD (freelistp));
691 #endif
692 if ((freelistp->collected < MIN_GC_YIELD (freelistp))
693 || SCM_IMP (freelistp->cells))
694 alloc_some_heap (freelistp);
695 SCM_REALLOW_INTS;
696 }
697
698
699 SCM
700 scm_gc_for_newcell (scm_freelist_t *freelistp)
701 {
702 SCM fl;
703 scm_gc_for_alloc (freelistp);
704 fl = freelistp->cells;
705 freelistp->cells = SCM_CDR (fl);
706 SCM_SETCAR (fl, scm_tc16_allocated);
707 return fl;
708 }
709
710 #endif /* GUILE_NEW_GC_SCHEME */
711
712 void
713 scm_igc (const char *what)
714 {
715 int j;
716
717 #ifdef USE_THREADS
718 /* During the critical section, only the current thread may run. */
719 SCM_THREAD_CRITICAL_SECTION_START;
720 #endif
721
722 /* fprintf (stderr, "gc: %s\n", what); */
723
724 scm_gc_start (what);
725
726 if (!scm_stack_base || scm_block_gc)
727 {
728 scm_gc_end ();
729 return;
730 }
731
732 if (scm_mallocated < 0)
733 /* The byte count of allocated objects has underflowed. This is
734 probably because you forgot to report the sizes of objects you
735 have allocated, by calling scm_done_malloc or some such. When
736 the GC freed them, it subtracted their size from
737 scm_mallocated, which underflowed. */
738 abort ();
739
740 if (scm_gc_heap_lock)
741 /* We've invoked the collector while a GC is already in progress.
742 That should never happen. */
743 abort ();
744
745 ++scm_gc_heap_lock;
746
747 scm_weak_vectors = SCM_EOL;
748
749 scm_guardian_gc_init ();
750
751 /* unprotect any struct types with no instances */
752 #if 0
753 {
754 SCM type_list;
755 SCM * pos;
756
757 pos = &scm_type_obj_list;
758 type_list = scm_type_obj_list;
759 while (type_list != SCM_EOL)
760 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
761 {
762 pos = SCM_CDRLOC (type_list);
763 type_list = SCM_CDR (type_list);
764 }
765 else
766 {
767 *pos = SCM_CDR (type_list);
768 type_list = SCM_CDR (type_list);
769 }
770 }
771 #endif
772
773 /* flush dead entries from the continuation stack */
774 {
775 int x;
776 int bound;
777 SCM * elts;
778 elts = SCM_VELTS (scm_continuation_stack);
779 bound = SCM_LENGTH (scm_continuation_stack);
780 x = SCM_INUM (scm_continuation_stack_ptr);
781 while (x < bound)
782 {
783 elts[x] = SCM_BOOL_F;
784 ++x;
785 }
786 }
787
788 #ifndef USE_THREADS
789
790 /* Protect from the C stack. This must be the first marking
791 * done because it provides information about what objects
792 * are "in-use" by the C code. "in-use" objects are those
793 * for which the values from SCM_LENGTH and SCM_CHARS must remain
794 * usable. This requirement is stricter than a liveness
795 * requirement -- in particular, it constrains the implementation
796 * of scm_vector_set_length_x.
797 */
798 SCM_FLUSH_REGISTER_WINDOWS;
799 /* This assumes that all registers are saved into the jmp_buf */
800 setjmp (scm_save_regs_gc_mark);
801 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
802 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
803 sizeof scm_save_regs_gc_mark)
804 / sizeof (SCM_STACKITEM)));
805
806 {
807 /* stack_len is long rather than scm_sizet in order to guarantee that
808 &stack_len is long aligned */
809 #ifdef SCM_STACK_GROWS_UP
810 #ifdef nosve
811 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
812 #else
813 long stack_len = scm_stack_size (scm_stack_base);
814 #endif
815 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
816 #else
817 #ifdef nosve
818 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
819 #else
820 long stack_len = scm_stack_size (scm_stack_base);
821 #endif
822 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
823 #endif
824 }
825
826 #else /* USE_THREADS */
827
828 /* Mark every thread's stack and registers */
829 scm_threads_mark_stacks ();
830
831 #endif /* USE_THREADS */
832
833 /* FIXME: insert a phase to un-protect string-data preserved
834 * in scm_vector_set_length_x.
835 */
836
837 j = SCM_NUM_PROTECTS;
838 while (j--)
839 scm_gc_mark (scm_sys_protects[j]);
840
841 /* FIXME: we should have a means to register C functions to be run
842 * in different phases of GC
843 */
844 scm_mark_subr_table ();
845
846 #ifndef USE_THREADS
847 scm_gc_mark (scm_root->handle);
848 #endif
849
850 scm_mark_weak_vector_spines ();
851
852 scm_guardian_zombify ();
853
854 scm_gc_sweep ();
855
856 --scm_gc_heap_lock;
857 scm_gc_end ();
858
859 #ifdef USE_THREADS
860 SCM_THREAD_CRITICAL_SECTION_END;
861 #endif
862 }
863
864 \f
865 /* {Mark/Sweep}
866 */
867
868
869
870 /* Mark an object precisely.
871 */
872 void
873 scm_gc_mark (SCM p)
874 {
875 register long i;
876 register SCM ptr;
877
878 ptr = p;
879
880 gc_mark_loop:
881 if (SCM_IMP (ptr))
882 return;
883
884 gc_mark_nimp:
885 if (SCM_NCELLP (ptr))
886 scm_wta (ptr, "rogue pointer in heap", NULL);
887
888 switch (SCM_TYP7 (ptr))
889 {
890 case scm_tcs_cons_nimcar:
891 if (SCM_GCMARKP (ptr))
892 break;
893 SCM_SETGCMARK (ptr);
894 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
895 {
896 ptr = SCM_CAR (ptr);
897 goto gc_mark_nimp;
898 }
899 scm_gc_mark (SCM_CAR (ptr));
900 ptr = SCM_GCCDR (ptr);
901 goto gc_mark_nimp;
902 case scm_tcs_cons_imcar:
903 if (SCM_GCMARKP (ptr))
904 break;
905 SCM_SETGCMARK (ptr);
906 ptr = SCM_GCCDR (ptr);
907 goto gc_mark_loop;
908 case scm_tc7_pws:
909 if (SCM_GCMARKP (ptr))
910 break;
911 SCM_SETGCMARK (ptr);
912 scm_gc_mark (SCM_CELL_WORD (ptr, 2));
913 ptr = SCM_GCCDR (ptr);
914 goto gc_mark_loop;
915 case scm_tcs_cons_gloc:
916 if (SCM_GCMARKP (ptr))
917 break;
918 SCM_SETGCMARK (ptr);
919 {
920 SCM vcell;
921 vcell = SCM_CAR (ptr) - 1L;
922 switch (SCM_UNPACK (SCM_CDR (vcell)))
923 {
924 default:
925 scm_gc_mark (vcell);
926 ptr = SCM_GCCDR (ptr);
927 goto gc_mark_loop;
928 case 1: /* ! */
929 case 0: /* ! */
930 {
931 SCM layout;
932 SCM * vtable_data;
933 int len;
934 char * fields_desc;
935 register SCM * mem;
936 register int x;
937
938 vtable_data = (SCM *)vcell;
939 layout = vtable_data[scm_vtable_index_layout];
940 len = SCM_LENGTH (layout);
941 fields_desc = SCM_CHARS (layout);
942 /* We're using SCM_GCCDR here like STRUCT_DATA, except
943 that it removes the mark */
944 mem = (SCM *)SCM_GCCDR (ptr);
945
946 if (SCM_UNPACK (vtable_data[scm_struct_i_flags]) & SCM_STRUCTF_ENTITY)
947 {
948 scm_gc_mark (mem[scm_struct_i_procedure]);
949 scm_gc_mark (mem[scm_struct_i_setter]);
950 }
951 if (len)
952 {
953 for (x = 0; x < len - 2; x += 2, ++mem)
954 if (fields_desc[x] == 'p')
955 scm_gc_mark (*mem);
956 if (fields_desc[x] == 'p')
957 {
958 int j;
959 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
960 for (j = (long int) *mem; x; --x)
961 scm_gc_mark (*++mem);
962 else
963 scm_gc_mark (*mem);
964 }
965 }
966 if (!SCM_CDR (vcell))
967 {
968 SCM_SETGCMARK (vcell);
969 ptr = vtable_data[scm_vtable_index_vtable];
970 goto gc_mark_loop;
971 }
972 }
973 }
974 }
975 break;
976 case scm_tcs_closures:
977 if (SCM_GCMARKP (ptr))
978 break;
979 SCM_SETGCMARK (ptr);
980 if (SCM_IMP (SCM_CDR (ptr)))
981 {
982 ptr = SCM_CLOSCAR (ptr);
983 goto gc_mark_nimp;
984 }
985 scm_gc_mark (SCM_CLOSCAR (ptr));
986 ptr = SCM_GCCDR (ptr);
987 goto gc_mark_nimp;
988 case scm_tc7_vector:
989 case scm_tc7_lvector:
990 #ifdef CCLO
991 case scm_tc7_cclo:
992 #endif
993 if (SCM_GC8MARKP (ptr))
994 break;
995 SCM_SETGC8MARK (ptr);
996 i = SCM_LENGTH (ptr);
997 if (i == 0)
998 break;
999 while (--i > 0)
1000 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1001 scm_gc_mark (SCM_VELTS (ptr)[i]);
1002 ptr = SCM_VELTS (ptr)[0];
1003 goto gc_mark_loop;
1004 case scm_tc7_contin:
1005 if SCM_GC8MARKP
1006 (ptr) break;
1007 SCM_SETGC8MARK (ptr);
1008 if (SCM_VELTS (ptr))
1009 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
1010 (scm_sizet)
1011 (SCM_LENGTH (ptr) +
1012 (sizeof (SCM_STACKITEM) + -1 +
1013 sizeof (scm_contregs)) /
1014 sizeof (SCM_STACKITEM)));
1015 break;
1016 #ifdef HAVE_ARRAYS
1017 case scm_tc7_bvect:
1018 case scm_tc7_byvect:
1019 case scm_tc7_ivect:
1020 case scm_tc7_uvect:
1021 case scm_tc7_fvect:
1022 case scm_tc7_dvect:
1023 case scm_tc7_cvect:
1024 case scm_tc7_svect:
1025 #ifdef HAVE_LONG_LONGS
1026 case scm_tc7_llvect:
1027 #endif
1028 #endif
1029 case scm_tc7_string:
1030 SCM_SETGC8MARK (ptr);
1031 break;
1032
1033 case scm_tc7_substring:
1034 if (SCM_GC8MARKP(ptr))
1035 break;
1036 SCM_SETGC8MARK (ptr);
1037 ptr = SCM_CDR (ptr);
1038 goto gc_mark_loop;
1039
1040 case scm_tc7_wvect:
1041 if (SCM_GC8MARKP(ptr))
1042 break;
1043 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1044 scm_weak_vectors = ptr;
1045 SCM_SETGC8MARK (ptr);
1046 if (SCM_IS_WHVEC_ANY (ptr))
1047 {
1048 int x;
1049 int len;
1050 int weak_keys;
1051 int weak_values;
1052
1053 len = SCM_LENGTH (ptr);
1054 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1055 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1056
1057 for (x = 0; x < len; ++x)
1058 {
1059 SCM alist;
1060 alist = SCM_VELTS (ptr)[x];
1061
1062 /* mark everything on the alist except the keys or
1063 * values, according to weak_values and weak_keys. */
1064 while ( SCM_CONSP (alist)
1065 && !SCM_GCMARKP (alist)
1066 && SCM_CONSP (SCM_CAR (alist)))
1067 {
1068 SCM kvpair;
1069 SCM next_alist;
1070
1071 kvpair = SCM_CAR (alist);
1072 next_alist = SCM_CDR (alist);
1073 /*
1074 * Do not do this:
1075 * SCM_SETGCMARK (alist);
1076 * SCM_SETGCMARK (kvpair);
1077 *
1078 * It may be that either the key or value is protected by
1079 * an escaped reference to part of the spine of this alist.
1080 * If we mark the spine here, and only mark one or neither of the
1081 * key and value, they may never be properly marked.
1082 * This leads to a horrible situation in which an alist containing
1083 * freelist cells is exported.
1084 *
1085 * So only mark the spines of these arrays last of all marking.
1086 * If somebody confuses us by constructing a weak vector
1087 * with a circular alist then we are hosed, but at least we
1088 * won't prematurely drop table entries.
1089 */
1090 if (!weak_keys)
1091 scm_gc_mark (SCM_CAR (kvpair));
1092 if (!weak_values)
1093 scm_gc_mark (SCM_GCCDR (kvpair));
1094 alist = next_alist;
1095 }
1096 if (SCM_NIMP (alist))
1097 scm_gc_mark (alist);
1098 }
1099 }
1100 break;
1101
1102 case scm_tc7_msymbol:
1103 if (SCM_GC8MARKP(ptr))
1104 break;
1105 SCM_SETGC8MARK (ptr);
1106 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1107 ptr = SCM_SYMBOL_PROPS (ptr);
1108 goto gc_mark_loop;
1109 case scm_tc7_ssymbol:
1110 if (SCM_GC8MARKP(ptr))
1111 break;
1112 SCM_SETGC8MARK (ptr);
1113 break;
1114 case scm_tcs_subrs:
1115 break;
1116 case scm_tc7_port:
1117 i = SCM_PTOBNUM (ptr);
1118 if (!(i < scm_numptob))
1119 goto def;
1120 if (SCM_GC8MARKP (ptr))
1121 break;
1122 SCM_SETGC8MARK (ptr);
1123 if (SCM_PTAB_ENTRY(ptr))
1124 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1125 if (scm_ptobs[i].mark)
1126 {
1127 ptr = (scm_ptobs[i].mark) (ptr);
1128 goto gc_mark_loop;
1129 }
1130 else
1131 return;
1132 break;
1133 case scm_tc7_smob:
1134 if (SCM_GC8MARKP (ptr))
1135 break;
1136 SCM_SETGC8MARK (ptr);
1137 switch (SCM_GCTYP16 (ptr))
1138 { /* should be faster than going through scm_smobs */
1139 case scm_tc_free_cell:
1140 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1141 case scm_tc16_allocated:
1142 case scm_tc16_big:
1143 case scm_tc16_real:
1144 case scm_tc16_complex:
1145 break;
1146 default:
1147 i = SCM_SMOBNUM (ptr);
1148 if (!(i < scm_numsmob))
1149 goto def;
1150 if (scm_smobs[i].mark)
1151 {
1152 ptr = (scm_smobs[i].mark) (ptr);
1153 goto gc_mark_loop;
1154 }
1155 else
1156 return;
1157 }
1158 break;
1159 default:
1160 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1161 }
1162 }
1163
1164
1165 /* Mark a Region Conservatively
1166 */
1167
1168 void
1169 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1170 {
1171 register long m = n;
1172 register int i, j;
1173 register SCM_CELLPTR ptr;
1174
1175 while (0 <= --m)
1176 if (SCM_CELLP (*(SCM **) (& x[m])))
1177 {
1178 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & x[m]));
1179 i = 0;
1180 j = scm_n_heap_segs - 1;
1181 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1182 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1183 {
1184 while (i <= j)
1185 {
1186 int seg_id;
1187 seg_id = -1;
1188 if ( (i == j)
1189 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1190 seg_id = i;
1191 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1192 seg_id = j;
1193 else
1194 {
1195 int k;
1196 k = (i + j) / 2;
1197 if (k == i)
1198 break;
1199 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1200 {
1201 j = k;
1202 ++i;
1203 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1204 continue;
1205 else
1206 break;
1207 }
1208 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1209 {
1210 i = k;
1211 --j;
1212 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1213 continue;
1214 else
1215 break;
1216 }
1217 }
1218 if ( !scm_heap_table[seg_id].valid
1219 || scm_heap_table[seg_id].valid (ptr,
1220 &scm_heap_table[seg_id]))
1221 scm_gc_mark (*(SCM *) & x[m]);
1222 break;
1223 }
1224
1225 }
1226 }
1227 }
1228
1229
1230 /* The following is a C predicate which determines if an SCM value can be
1231 regarded as a pointer to a cell on the heap. The code is duplicated
1232 from scm_mark_locations. */
1233
1234
1235 int
1236 scm_cellp (SCM value)
1237 {
1238 register int i, j;
1239 register SCM_CELLPTR ptr;
1240
1241 if SCM_CELLP (*(SCM **) (& value))
1242 {
1243 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & value));
1244 i = 0;
1245 j = scm_n_heap_segs - 1;
1246 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1247 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1248 {
1249 while (i <= j)
1250 {
1251 int seg_id;
1252 seg_id = -1;
1253 if ( (i == j)
1254 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1255 seg_id = i;
1256 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1257 seg_id = j;
1258 else
1259 {
1260 int k;
1261 k = (i + j) / 2;
1262 if (k == i)
1263 break;
1264 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1265 {
1266 j = k;
1267 ++i;
1268 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1269 continue;
1270 else
1271 break;
1272 }
1273 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1274 {
1275 i = k;
1276 --j;
1277 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1278 continue;
1279 else
1280 break;
1281 }
1282 }
1283 if ( !scm_heap_table[seg_id].valid
1284 || scm_heap_table[seg_id].valid (ptr,
1285 &scm_heap_table[seg_id]))
1286 return 1;
1287 break;
1288 }
1289
1290 }
1291 }
1292 return 0;
1293 }
1294
1295
1296 static void
1297 scm_mark_weak_vector_spines ()
1298 {
1299 SCM w;
1300
1301 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1302 {
1303 if (SCM_IS_WHVEC_ANY (w))
1304 {
1305 SCM *ptr;
1306 SCM obj;
1307 int j;
1308 int n;
1309
1310 obj = w;
1311 ptr = SCM_VELTS (w);
1312 n = SCM_LENGTH (w);
1313 for (j = 0; j < n; ++j)
1314 {
1315 SCM alist;
1316
1317 alist = ptr[j];
1318 while ( SCM_CONSP (alist)
1319 && !SCM_GCMARKP (alist)
1320 && SCM_CONSP (SCM_CAR (alist)))
1321 {
1322 SCM_SETGCMARK (alist);
1323 SCM_SETGCMARK (SCM_CAR (alist));
1324 alist = SCM_GCCDR (alist);
1325 }
1326 }
1327 }
1328 }
1329 }
1330
1331
1332
1333 void
1334 scm_gc_sweep ()
1335 {
1336 register SCM_CELLPTR ptr;
1337 #ifdef SCM_POINTERS_MUNGED
1338 register SCM scmptr;
1339 #else
1340 #undef scmptr
1341 #define scmptr (SCM)ptr
1342 #endif
1343 register SCM nfreelist;
1344 register scm_freelist_t *hp_freelist;
1345 register long m;
1346 register int span;
1347 long i;
1348 scm_sizet seg_size;
1349
1350 m = 0;
1351
1352 #ifdef GUILE_NEW_GC_SCHEME
1353 /* Reset all free list pointers. We'll reconstruct them completely
1354 while scanning. */
1355 for (i = 0; i < scm_n_heap_segs; i++)
1356 {
1357 scm_heap_table[i].freelistp->cells = SCM_EOL;
1358 scm_heap_table[i].freelistp->n_objects
1359 = scm_heap_table[i].freelistp->gc_trigger;
1360 scm_heap_table[i].freelistp->clusters = SCM_EOL;
1361 scm_heap_table[i].freelistp->clustertail
1362 = &scm_heap_table[i].freelistp->clusters;
1363 scm_heap_table[i].freelistp->triggeredp = 0;
1364 }
1365 #else
1366 /* Reset all free list pointers. We'll reconstruct them completely
1367 while scanning. */
1368 for (i = 0; i < scm_n_heap_segs; i++)
1369 scm_heap_table[i].freelistp->cells = SCM_EOL;
1370 #endif
1371
1372 for (i = 0; i < scm_n_heap_segs; i++)
1373 {
1374 register scm_sizet n = 0;
1375 register scm_sizet j;
1376 #ifdef GUILE_NEW_GC_SCHEME
1377 register int n_objects;
1378 #endif
1379
1380 /* Unmarked cells go onto the front of the freelist this heap
1381 segment points to. Rather than updating the real freelist
1382 pointer as we go along, we accumulate the new head in
1383 nfreelist. Then, if it turns out that the entire segment is
1384 free, we free (i.e., malloc's free) the whole segment, and
1385 simply don't assign nfreelist back into the real freelist. */
1386 hp_freelist = scm_heap_table[i].freelistp;
1387 nfreelist = hp_freelist->cells;
1388 #ifdef GUILE_NEW_GC_SCHEME
1389 n_objects = hp_freelist->n_objects;
1390 #endif
1391 span = scm_heap_table[i].span;
1392 hp_freelist->collected = 0;
1393
1394 ptr = CELL_UP (scm_heap_table[i].bounds[0]);
1395 seg_size = CELL_DN (scm_heap_table[i].bounds[1]) - ptr;
1396 for (j = seg_size + span; j -= span; ptr += span)
1397 {
1398 #ifdef SCM_POINTERS_MUNGED
1399 scmptr = PTR2SCM (ptr);
1400 #endif
1401 switch SCM_TYP7 (scmptr)
1402 {
1403 case scm_tcs_cons_gloc:
1404 if (SCM_GCMARKP (scmptr))
1405 {
1406 if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1)
1407 SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0);
1408 goto cmrkcontinue;
1409 }
1410 {
1411 SCM vcell;
1412 vcell = SCM_CAR (scmptr) - 1L;
1413
1414 if ((SCM_CDR (vcell) == 0) || (SCM_UNPACK (SCM_CDR (vcell)) == 1))
1415 {
1416 scm_struct_free_t free
1417 = (scm_struct_free_t) ((SCM*) vcell)[scm_struct_i_free];
1418 m += free ((SCM *) vcell, (SCM *) SCM_GCCDR (scmptr));
1419 }
1420 }
1421 break;
1422 case scm_tcs_cons_imcar:
1423 case scm_tcs_cons_nimcar:
1424 case scm_tcs_closures:
1425 case scm_tc7_pws:
1426 if (SCM_GCMARKP (scmptr))
1427 goto cmrkcontinue;
1428 break;
1429 case scm_tc7_wvect:
1430 if (SCM_GC8MARKP (scmptr))
1431 {
1432 goto c8mrkcontinue;
1433 }
1434 else
1435 {
1436 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1437 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1438 break;
1439 }
1440
1441 case scm_tc7_vector:
1442 case scm_tc7_lvector:
1443 #ifdef CCLO
1444 case scm_tc7_cclo:
1445 #endif
1446 if (SCM_GC8MARKP (scmptr))
1447 goto c8mrkcontinue;
1448
1449 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1450 freechars:
1451 scm_must_free (SCM_CHARS (scmptr));
1452 /* SCM_SETCHARS(scmptr, 0);*/
1453 break;
1454 #ifdef HAVE_ARRAYS
1455 case scm_tc7_bvect:
1456 if SCM_GC8MARKP (scmptr)
1457 goto c8mrkcontinue;
1458 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1459 goto freechars;
1460 case scm_tc7_byvect:
1461 if SCM_GC8MARKP (scmptr)
1462 goto c8mrkcontinue;
1463 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1464 goto freechars;
1465 case scm_tc7_ivect:
1466 case scm_tc7_uvect:
1467 if SCM_GC8MARKP (scmptr)
1468 goto c8mrkcontinue;
1469 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1470 goto freechars;
1471 case scm_tc7_svect:
1472 if SCM_GC8MARKP (scmptr)
1473 goto c8mrkcontinue;
1474 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1475 goto freechars;
1476 #ifdef HAVE_LONG_LONGS
1477 case scm_tc7_llvect:
1478 if SCM_GC8MARKP (scmptr)
1479 goto c8mrkcontinue;
1480 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1481 goto freechars;
1482 #endif
1483 case scm_tc7_fvect:
1484 if SCM_GC8MARKP (scmptr)
1485 goto c8mrkcontinue;
1486 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1487 goto freechars;
1488 case scm_tc7_dvect:
1489 if SCM_GC8MARKP (scmptr)
1490 goto c8mrkcontinue;
1491 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1492 goto freechars;
1493 case scm_tc7_cvect:
1494 if SCM_GC8MARKP (scmptr)
1495 goto c8mrkcontinue;
1496 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1497 goto freechars;
1498 #endif
1499 case scm_tc7_substring:
1500 if (SCM_GC8MARKP (scmptr))
1501 goto c8mrkcontinue;
1502 break;
1503 case scm_tc7_string:
1504 if (SCM_GC8MARKP (scmptr))
1505 goto c8mrkcontinue;
1506 m += SCM_HUGE_LENGTH (scmptr) + 1;
1507 goto freechars;
1508 case scm_tc7_msymbol:
1509 if (SCM_GC8MARKP (scmptr))
1510 goto c8mrkcontinue;
1511 m += ( SCM_LENGTH (scmptr)
1512 + 1
1513 + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
1514 scm_must_free ((char *)SCM_SLOTS (scmptr));
1515 break;
1516 case scm_tc7_contin:
1517 if SCM_GC8MARKP (scmptr)
1518 goto c8mrkcontinue;
1519 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1520 if (SCM_VELTS (scmptr))
1521 goto freechars;
1522 case scm_tc7_ssymbol:
1523 if SCM_GC8MARKP(scmptr)
1524 goto c8mrkcontinue;
1525 break;
1526 case scm_tcs_subrs:
1527 continue;
1528 case scm_tc7_port:
1529 if SCM_GC8MARKP (scmptr)
1530 goto c8mrkcontinue;
1531 if SCM_OPENP (scmptr)
1532 {
1533 int k = SCM_PTOBNUM (scmptr);
1534 if (!(k < scm_numptob))
1535 goto sweeperr;
1536 /* Keep "revealed" ports alive. */
1537 if (scm_revealed_count (scmptr) > 0)
1538 continue;
1539 /* Yes, I really do mean scm_ptobs[k].free */
1540 /* rather than ftobs[k].close. .close */
1541 /* is for explicit CLOSE-PORT by user */
1542 m += (scm_ptobs[k].free) (scmptr);
1543 SCM_SETSTREAM (scmptr, 0);
1544 scm_remove_from_port_table (scmptr);
1545 scm_gc_ports_collected++;
1546 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1547 }
1548 break;
1549 case scm_tc7_smob:
1550 switch SCM_GCTYP16 (scmptr)
1551 {
1552 case scm_tc_free_cell:
1553 case scm_tc16_real:
1554 if SCM_GC8MARKP (scmptr)
1555 goto c8mrkcontinue;
1556 break;
1557 #ifdef SCM_BIGDIG
1558 case scm_tc16_big:
1559 if SCM_GC8MARKP (scmptr)
1560 goto c8mrkcontinue;
1561 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1562 goto freechars;
1563 #endif /* def SCM_BIGDIG */
1564 case scm_tc16_complex:
1565 if SCM_GC8MARKP (scmptr)
1566 goto c8mrkcontinue;
1567 m += 2 * sizeof (double);
1568 goto freechars;
1569 default:
1570 if SCM_GC8MARKP (scmptr)
1571 goto c8mrkcontinue;
1572
1573 {
1574 int k;
1575 k = SCM_SMOBNUM (scmptr);
1576 if (!(k < scm_numsmob))
1577 goto sweeperr;
1578 m += (scm_smobs[k].free) ((SCM) scmptr);
1579 break;
1580 }
1581 }
1582 break;
1583 default:
1584 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1585 }
1586 #if 0
1587 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1588 exit (2);
1589 #endif
1590 #ifndef GUILE_NEW_GC_SCHEME
1591 n += span;
1592 #else
1593 if (--n_objects < 0)
1594 {
1595 SCM_SETCAR (scmptr, nfreelist);
1596 *hp_freelist->clustertail = scmptr;
1597 hp_freelist->clustertail = SCM_CDRLOC (scmptr);
1598
1599 nfreelist = SCM_EOL;
1600 n += span * (hp_freelist->gc_trigger - n_objects);
1601 n_objects = hp_freelist->gc_trigger;
1602 }
1603 else
1604 #endif
1605 {
1606 /* Stick the new cell on the front of nfreelist. It's
1607 critical that we mark this cell as freed; otherwise, the
1608 conservative collector might trace it as some other type
1609 of object. */
1610 SCM_SETCAR (scmptr, scm_tc_free_cell);
1611 SCM_SETCDR (scmptr, nfreelist);
1612 nfreelist = scmptr;
1613 }
1614
1615 continue;
1616 c8mrkcontinue:
1617 SCM_CLRGC8MARK (scmptr);
1618 continue;
1619 cmrkcontinue:
1620 SCM_CLRGCMARK (scmptr);
1621 }
1622 #ifdef GC_FREE_SEGMENTS
1623 if (n == seg_size)
1624 {
1625 register long j;
1626
1627 hp_freelist->heap_size -= seg_size;
1628 free ((char *) scm_heap_table[i].bounds[0]);
1629 scm_heap_table[i].bounds[0] = 0;
1630 for (j = i + 1; j < scm_n_heap_segs; j++)
1631 scm_heap_table[j - 1] = scm_heap_table[j];
1632 scm_n_heap_segs -= 1;
1633 i--; /* We need to scan the segment just moved. */
1634 }
1635 else
1636 #endif /* ifdef GC_FREE_SEGMENTS */
1637 {
1638 /* Update the real freelist pointer to point to the head of
1639 the list of free cells we've built for this segment. */
1640 hp_freelist->cells = nfreelist;
1641 #ifdef GUILE_NEW_GC_SCHEME
1642 hp_freelist->n_objects = n_objects;
1643 #endif
1644 }
1645
1646 #ifdef GUILE_NEW_GC_SCHEME
1647 j = span * (hp_freelist->gc_trigger - n_objects);
1648 /* sum up---if this is last turn for this freelist */
1649 hp_freelist->collected += n + j;
1650 n -= j; /* compensate for the sum up */
1651 #else
1652 hp_freelist->collected += n;
1653 #endif
1654 scm_cells_allocated += hp_freelist->heap_size - hp_freelist->collected;
1655
1656 #ifdef GUILE_DEBUG_FREELIST
1657 #ifdef GUILE_NEW_GC_SCHEME
1658 scm_check_freelist (hp_freelist == &scm_master_freelist
1659 ? scm_freelist
1660 : scm_freelist2);
1661 #else
1662 scm_check_freelist (hp_freelist);
1663 #endif
1664 scm_map_free_list ();
1665 #endif
1666 }
1667
1668 #ifdef GUILE_NEW_GC_SCHEME
1669 for (i = 0; i < scm_n_heap_segs; i++)
1670 if (scm_heap_table[i].freelistp->clustertail != NULL)
1671 {
1672 scm_freelist_t *hp_freelist = scm_heap_table[i].freelistp;
1673 if (hp_freelist->gc_trigger - hp_freelist->n_objects > 1)
1674 {
1675 SCM c = hp_freelist->cells;
1676 hp_freelist->n_objects = hp_freelist->gc_trigger;
1677 SCM_SETCAR (c, SCM_CDR (c));
1678 SCM_SETCDR (c, SCM_EOL);
1679 *hp_freelist->clustertail = c;
1680 }
1681 else
1682 *hp_freelist->clustertail = SCM_EOL;
1683 hp_freelist->clustertail = NULL;
1684 }
1685
1686 /* When we move to POSIX threads private freelists should probably
1687 be GC-protected instead. */
1688 scm_freelist = SCM_EOL;
1689 scm_freelist2 = SCM_EOL;
1690 #endif
1691
1692 /* Scan weak vectors. */
1693 {
1694 SCM *ptr, w;
1695 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1696 {
1697 if (!SCM_IS_WHVEC_ANY (w))
1698 {
1699 register long j, n;
1700
1701 ptr = SCM_VELTS (w);
1702 n = SCM_LENGTH (w);
1703 for (j = 0; j < n; ++j)
1704 if (SCM_FREEP (ptr[j]))
1705 ptr[j] = SCM_BOOL_F;
1706 }
1707 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1708 {
1709 SCM obj = w;
1710 register long n = SCM_LENGTH (w);
1711 register long j;
1712
1713 ptr = SCM_VELTS (w);
1714
1715 for (j = 0; j < n; ++j)
1716 {
1717 SCM * fixup;
1718 SCM alist;
1719 int weak_keys;
1720 int weak_values;
1721
1722 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1723 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1724
1725 fixup = ptr + j;
1726 alist = *fixup;
1727
1728 while ( SCM_CONSP (alist)
1729 && SCM_CONSP (SCM_CAR (alist)))
1730 {
1731 SCM key;
1732 SCM value;
1733
1734 key = SCM_CAAR (alist);
1735 value = SCM_CDAR (alist);
1736 if ( (weak_keys && SCM_FREEP (key))
1737 || (weak_values && SCM_FREEP (value)))
1738 {
1739 *fixup = SCM_CDR (alist);
1740 }
1741 else
1742 fixup = SCM_CDRLOC (alist);
1743 alist = SCM_CDR (alist);
1744 }
1745 }
1746 }
1747 }
1748 }
1749 scm_mallocated -= m;
1750 scm_gc_malloc_collected = m;
1751 }
1752
1753
1754 \f
1755
1756 /* {Front end to malloc}
1757 *
1758 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1759 *
1760 * These functions provide services comperable to malloc, realloc, and
1761 * free. They are for allocating malloced parts of scheme objects.
1762 * The primary purpose of the front end is to impose calls to gc.
1763 */
1764
1765 /* scm_must_malloc
1766 * Return newly malloced storage or throw an error.
1767 *
1768 * The parameter WHAT is a string for error reporting.
1769 * If the threshold scm_mtrigger will be passed by this
1770 * allocation, or if the first call to malloc fails,
1771 * garbage collect -- on the presumption that some objects
1772 * using malloced storage may be collected.
1773 *
1774 * The limit scm_mtrigger may be raised by this allocation.
1775 */
1776 void *
1777 scm_must_malloc (scm_sizet size, const char *what)
1778 {
1779 void *ptr;
1780 unsigned long nm = scm_mallocated + size;
1781
1782 if (nm <= scm_mtrigger)
1783 {
1784 SCM_SYSCALL (ptr = malloc (size));
1785 if (NULL != ptr)
1786 {
1787 scm_mallocated = nm;
1788 return ptr;
1789 }
1790 }
1791
1792 scm_igc (what);
1793
1794 nm = scm_mallocated + size;
1795 SCM_SYSCALL (ptr = malloc (size));
1796 if (NULL != ptr)
1797 {
1798 scm_mallocated = nm;
1799 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1800 if (nm > scm_mtrigger)
1801 scm_mtrigger = nm + nm / 2;
1802 else
1803 scm_mtrigger += scm_mtrigger / 2;
1804 }
1805 return ptr;
1806 }
1807
1808 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1809 return 0; /* never reached */
1810 }
1811
1812
1813 /* scm_must_realloc
1814 * is similar to scm_must_malloc.
1815 */
1816 void *
1817 scm_must_realloc (void *where,
1818 scm_sizet old_size,
1819 scm_sizet size,
1820 const char *what)
1821 {
1822 void *ptr;
1823 scm_sizet nm = scm_mallocated + size - old_size;
1824
1825 if (nm <= scm_mtrigger)
1826 {
1827 SCM_SYSCALL (ptr = realloc (where, size));
1828 if (NULL != ptr)
1829 {
1830 scm_mallocated = nm;
1831 return ptr;
1832 }
1833 }
1834
1835 scm_igc (what);
1836
1837 nm = scm_mallocated + size - old_size;
1838 SCM_SYSCALL (ptr = realloc (where, size));
1839 if (NULL != ptr)
1840 {
1841 scm_mallocated = nm;
1842 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1843 if (nm > scm_mtrigger)
1844 scm_mtrigger = nm + nm / 2;
1845 else
1846 scm_mtrigger += scm_mtrigger / 2;
1847 }
1848 return ptr;
1849 }
1850
1851 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1852 return 0; /* never reached */
1853 }
1854
1855 void
1856 scm_must_free (void *obj)
1857 {
1858 if (obj)
1859 free (obj);
1860 else
1861 scm_wta (SCM_INUM0, "already free", "");
1862 }
1863
1864 /* Announce that there has been some malloc done that will be freed
1865 * during gc. A typical use is for a smob that uses some malloced
1866 * memory but can not get it from scm_must_malloc (for whatever
1867 * reason). When a new object of this smob is created you call
1868 * scm_done_malloc with the size of the object. When your smob free
1869 * function is called, be sure to include this size in the return
1870 * value. */
1871
1872 void
1873 scm_done_malloc (long size)
1874 {
1875 scm_mallocated += size;
1876
1877 if (scm_mallocated > scm_mtrigger)
1878 {
1879 scm_igc ("foreign mallocs");
1880 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1881 {
1882 if (scm_mallocated > scm_mtrigger)
1883 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1884 else
1885 scm_mtrigger += scm_mtrigger / 2;
1886 }
1887 }
1888 }
1889
1890
1891 \f
1892
1893 /* {Heap Segments}
1894 *
1895 * Each heap segment is an array of objects of a particular size.
1896 * Every segment has an associated (possibly shared) freelist.
1897 * A table of segment records is kept that records the upper and
1898 * lower extents of the segment; this is used during the conservative
1899 * phase of gc to identify probably gc roots (because they point
1900 * into valid segments at reasonable offsets). */
1901
1902 /* scm_expmem
1903 * is true if the first segment was smaller than INIT_HEAP_SEG.
1904 * If scm_expmem is set to one, subsequent segment allocations will
1905 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1906 */
1907 int scm_expmem = 0;
1908
1909 /* scm_heap_org
1910 * is the lowest base address of any heap segment.
1911 */
1912 SCM_CELLPTR scm_heap_org;
1913
1914 struct scm_heap_seg_data * scm_heap_table = 0;
1915 int scm_n_heap_segs = 0;
1916
1917 /* init_heap_seg
1918 * initializes a new heap segment and return the number of objects it contains.
1919 *
1920 * The segment origin, segment size in bytes, and the span of objects
1921 * in cells are input parameters. The freelist is both input and output.
1922 *
1923 * This function presume that the scm_heap_table has already been expanded
1924 * to accomodate a new segment record.
1925 */
1926
1927
1928 static scm_sizet
1929 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelistp)
1930 {
1931 register SCM_CELLPTR ptr;
1932 #ifdef SCM_POINTERS_MUNGED
1933 register SCM scmptr;
1934 #else
1935 #undef scmptr
1936 #define scmptr ptr
1937 #endif
1938 SCM_CELLPTR seg_end;
1939 int new_seg_index;
1940 int n_new_cells;
1941 int span = freelistp->span;
1942
1943 if (seg_org == NULL)
1944 return 0;
1945
1946 ptr = seg_org;
1947
1948 size = (size / sizeof (scm_cell) / span) * span * sizeof (scm_cell);
1949
1950 /* Compute the ceiling on valid object pointers w/in this segment.
1951 */
1952 seg_end = CELL_DN ((char *) ptr + size);
1953
1954 /* Find the right place and insert the segment record.
1955 *
1956 */
1957 for (new_seg_index = 0;
1958 ( (new_seg_index < scm_n_heap_segs)
1959 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1960 new_seg_index++)
1961 ;
1962
1963 {
1964 int i;
1965 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1966 scm_heap_table[i] = scm_heap_table[i - 1];
1967 }
1968
1969 ++scm_n_heap_segs;
1970
1971 scm_heap_table[new_seg_index].valid = 0;
1972 scm_heap_table[new_seg_index].span = span;
1973 scm_heap_table[new_seg_index].freelistp = freelistp;
1974 scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
1975 scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
1976
1977
1978 /* Compute the least valid object pointer w/in this segment
1979 */
1980 ptr = CELL_UP (ptr);
1981
1982
1983 /*n_new_cells*/
1984 n_new_cells = seg_end - ptr;
1985
1986 #ifdef GUILE_NEW_GC_SCHEME
1987
1988 freelistp->heap_size += n_new_cells;
1989
1990 /* Partition objects in this segment into clusters
1991 */
1992 {
1993 SCM clusters;
1994 SCM *clusterp = &clusters;
1995 int trigger = span * freelistp->gc_trigger;
1996 int n, c = 0;
1997
1998 while (n_new_cells > span)
1999 {
2000 if (n_new_cells > trigger)
2001 n = span + trigger;
2002 else
2003 n = n_new_cells;
2004 n_new_cells -= n;
2005 n -= span;
2006 c += span;
2007
2008 *clusterp = PTR2SCM (ptr);
2009 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
2010 clusterp = SCM_CDRLOC (*clusterp);
2011
2012 ptr += span;
2013 seg_end = ptr + n;
2014 while (ptr < seg_end)
2015 {
2016 #ifdef SCM_POINTERS_MUNGED
2017 scmptr = PTR2SCM (ptr);
2018 #endif
2019 SCM_SETCAR (scmptr, scm_tc_free_cell);
2020 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2021 ptr += span;
2022 }
2023 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
2024 }
2025
2026 /* Correction for cluster cells + spill */
2027 freelistp->heap_size -= c + n_new_cells;
2028
2029 /* Patch up the last cluster pointer in the segment
2030 * to join it to the input freelist.
2031 */
2032 *clusterp = freelistp->clusters;
2033 freelistp->clusters = clusters;
2034 }
2035
2036 #else /* GUILE_NEW_GC_SCHEME */
2037
2038 /* Prepend objects in this segment to the freelist.
2039 */
2040 while (ptr < seg_end)
2041 {
2042 #ifdef SCM_POINTERS_MUNGED
2043 scmptr = PTR2SCM (ptr);
2044 #endif
2045 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
2046 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2047 ptr += span;
2048 }
2049
2050 ptr -= span;
2051
2052 /* Patch up the last freelist pointer in the segment
2053 * to join it to the input freelist.
2054 */
2055 SCM_SETCDR (PTR2SCM (ptr), freelistp->cells);
2056 freelistp->cells = PTR2SCM (CELL_UP (seg_org));
2057
2058 freelistp->heap_size += n_new_cells;
2059
2060 #endif /* GUILE_NEW_GC_SCHEME */
2061
2062 return size;
2063 #ifdef scmptr
2064 #undef scmptr
2065 #endif
2066 }
2067
2068
2069 static void
2070 alloc_some_heap (scm_freelist_t *freelistp)
2071 {
2072 struct scm_heap_seg_data * tmptable;
2073 SCM_CELLPTR ptr;
2074 scm_sizet len;
2075
2076 /* Critical code sections (such as the garbage collector)
2077 * aren't supposed to add heap segments.
2078 */
2079 if (scm_gc_heap_lock)
2080 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
2081
2082 /* Expand the heap tables to have room for the new segment.
2083 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
2084 * only if the allocation of the segment itself succeeds.
2085 */
2086 len = (1 + scm_n_heap_segs) * sizeof (struct scm_heap_seg_data);
2087
2088 SCM_SYSCALL (tmptable = ((struct scm_heap_seg_data *)
2089 realloc ((char *)scm_heap_table, len)));
2090 if (!tmptable)
2091 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
2092 else
2093 scm_heap_table = tmptable;
2094
2095
2096 /* Pick a size for the new heap segment.
2097 * The rule for picking the size of a segment is explained in
2098 * gc.h
2099 */
2100 if (scm_expmem)
2101 {
2102 len = (scm_sizet) SCM_EXPHEAP (freelistp->heap_size * sizeof (scm_cell));
2103 if ((scm_sizet) SCM_EXPHEAP (freelistp->heap_size * sizeof (scm_cell))
2104 != len)
2105 len = 0;
2106 }
2107 else
2108 len = SCM_HEAP_SEG_SIZE;
2109
2110 {
2111 scm_sizet smallest;
2112
2113 smallest = (freelistp->span * sizeof (scm_cell));
2114 if (len < smallest)
2115 len = (freelistp->span * sizeof (scm_cell));
2116
2117 /* Allocate with decaying ambition. */
2118 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2119 && (len >= smallest))
2120 {
2121 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (len));
2122 if (ptr)
2123 {
2124 init_heap_seg (ptr, len, freelistp);
2125 return;
2126 }
2127 len /= 2;
2128 }
2129 }
2130
2131 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2132 }
2133
2134
2135
2136 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2137 (SCM name),
2138 "")
2139 #define FUNC_NAME s_scm_unhash_name
2140 {
2141 int x;
2142 int bound;
2143 SCM_VALIDATE_SYMBOL (1,name);
2144 SCM_DEFER_INTS;
2145 bound = scm_n_heap_segs;
2146 for (x = 0; x < bound; ++x)
2147 {
2148 SCM_CELLPTR p;
2149 SCM_CELLPTR pbound;
2150 p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
2151 pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
2152 while (p < pbound)
2153 {
2154 SCM incar;
2155 incar = p->car;
2156 if (1 == (7 & (int)incar))
2157 {
2158 --incar;
2159 if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
2160 && (SCM_CDR (incar) != 0)
2161 && (SCM_UNPACK (SCM_CDR (incar)) != 1))
2162 {
2163 p->car = name;
2164 }
2165 }
2166 ++p;
2167 }
2168 }
2169 SCM_ALLOW_INTS;
2170 return name;
2171 }
2172 #undef FUNC_NAME
2173
2174
2175 \f
2176 /* {GC Protection Helper Functions}
2177 */
2178
2179
2180 void
2181 scm_remember (SCM *ptr)
2182 { /* empty */ }
2183
2184
2185 /*
2186 These crazy functions prevent garbage collection
2187 of arguments after the first argument by
2188 ensuring they remain live throughout the
2189 function because they are used in the last
2190 line of the code block.
2191 It'd be better to have a nice compiler hint to
2192 aid the conservative stack-scanning GC. --03/09/00 gjb */
2193 SCM
2194 scm_return_first (SCM elt, ...)
2195 {
2196 return elt;
2197 }
2198
2199 int
2200 scm_return_first_int (int i, ...)
2201 {
2202 return i;
2203 }
2204
2205
2206 SCM
2207 scm_permanent_object (SCM obj)
2208 {
2209 SCM_REDEFER_INTS;
2210 scm_permobjs = scm_cons (obj, scm_permobjs);
2211 SCM_REALLOW_INTS;
2212 return obj;
2213 }
2214
2215
2216 /* Protect OBJ from the garbage collector. OBJ will not be freed,
2217 even if all other references are dropped, until someone applies
2218 scm_unprotect_object to it. This function returns OBJ.
2219
2220 Calls to scm_protect_object nest. For every object OBJ, there is a
2221 counter which scm_protect_object(OBJ) increments and
2222 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
2223 an object's counter is greater than zero, the garbage collector
2224 will not free it.
2225
2226 Of course, that's not how it's implemented. scm_protect_object and
2227 scm_unprotect_object just maintain a list of references to things.
2228 Since the GC knows about this list, all objects it mentions stay
2229 alive. scm_protect_object adds its argument to the list;
2230 scm_unprotect_object removes the first occurrence of its argument
2231 to the list. */
2232 SCM
2233 scm_protect_object (SCM obj)
2234 {
2235 scm_protects = scm_cons (obj, scm_protects);
2236
2237 return obj;
2238 }
2239
2240
2241 /* Remove any protection for OBJ established by a prior call to
2242 scm_protect_object. This function returns OBJ.
2243
2244 See scm_protect_object for more information. */
2245 SCM
2246 scm_unprotect_object (SCM obj)
2247 {
2248 SCM *tail_ptr = &scm_protects;
2249
2250 while (SCM_CONSP (*tail_ptr))
2251 if (SCM_CAR (*tail_ptr) == obj)
2252 {
2253 *tail_ptr = SCM_CDR (*tail_ptr);
2254 break;
2255 }
2256 else
2257 tail_ptr = SCM_CDRLOC (*tail_ptr);
2258
2259 return obj;
2260 }
2261
2262 int terminating;
2263
2264 /* called on process termination. */
2265 #ifdef HAVE_ATEXIT
2266 static void
2267 cleanup (void)
2268 #else
2269 #ifdef HAVE_ON_EXIT
2270 extern int on_exit (void (*procp) (), int arg);
2271
2272 static void
2273 cleanup (int status, void *arg)
2274 #else
2275 #error Dont know how to setup a cleanup handler on your system.
2276 #endif
2277 #endif
2278 {
2279 terminating = 1;
2280 scm_flush_all_ports ();
2281 }
2282
2283 \f
2284 static int
2285 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelistp)
2286 {
2287 if (0L == init_heap_size)
2288 init_heap_size = SCM_INIT_HEAP_SIZE;
2289 if (!init_heap_seg ((SCM_CELLPTR) malloc (init_heap_size),
2290 init_heap_size,
2291 freelistp))
2292 {
2293 init_heap_size = SCM_HEAP_SEG_SIZE;
2294 if (!init_heap_seg ((SCM_CELLPTR) malloc (init_heap_size),
2295 init_heap_size,
2296 freelistp))
2297 return 1;
2298 }
2299 else
2300 scm_expmem = 1;
2301
2302 return 0;
2303 }
2304
2305 \f
2306 #ifdef GUILE_NEW_GC_SCHEME
2307 int
2308 scm_init_storage (scm_sizet init_heap_size, int gc_trigger,
2309 scm_sizet init_heap2_size, int gc_trigger2)
2310 #else
2311 int
2312 scm_init_storage (scm_sizet init_heap_size, scm_sizet init_heap2_size)
2313 #endif
2314 {
2315 scm_sizet j;
2316
2317 j = SCM_NUM_PROTECTS;
2318 while (j)
2319 scm_sys_protects[--j] = SCM_BOOL_F;
2320 scm_block_gc = 1;
2321
2322 #ifdef GUILE_NEW_GC_SCHEME
2323 scm_freelist = SCM_EOL;
2324 scm_master_freelist.clusters = SCM_EOL;
2325 scm_master_freelist.triggeredp = 0;
2326 scm_master_freelist.gc_trigger
2327 = gc_trigger ? gc_trigger : SCM_GC_TRIGGER;
2328 scm_master_freelist.span = 1;
2329 scm_master_freelist.collected = 0;
2330 scm_master_freelist.heap_size = 0;
2331 #else
2332 scm_freelist.cells = SCM_EOL;
2333 scm_freelist.span = 1;
2334 scm_freelist.collected = 0;
2335 scm_freelist.heap_size = 0;
2336 #endif
2337
2338 #ifdef GUILE_NEW_GC_SCHEME
2339 scm_freelist2 = SCM_EOL;
2340 scm_master_freelist2.clusters = SCM_EOL;
2341 scm_master_freelist2.triggeredp = 0;
2342 scm_master_freelist2.gc_trigger
2343 = gc_trigger2 ? gc_trigger2 : SCM_GC_TRIGGER2;
2344 scm_master_freelist2.span = 2;
2345 scm_master_freelist2.collected = 0;
2346 scm_master_freelist2.heap_size = 0;
2347 #else
2348 scm_freelist2.cells = SCM_EOL;
2349 scm_freelist2.span = 2;
2350 scm_freelist2.collected = 0;
2351 scm_freelist2.heap_size = 0;
2352 #endif
2353
2354 scm_expmem = 0;
2355
2356 j = SCM_HEAP_SEG_SIZE;
2357 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2358 scm_heap_table = ((struct scm_heap_seg_data *)
2359 scm_must_malloc (sizeof (struct scm_heap_seg_data) * 2, "hplims"));
2360
2361 #ifdef GUILE_NEW_GC_SCHEME
2362 if (make_initial_segment (init_heap_size, &scm_master_freelist) ||
2363 make_initial_segment (init_heap2_size, &scm_master_freelist2))
2364 return 1;
2365 #else
2366 if (make_initial_segment (init_heap_size, &scm_freelist) ||
2367 make_initial_segment (init_heap2_size, &scm_freelist2))
2368 return 1;
2369 #endif
2370
2371 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0]);
2372
2373 /* scm_hplims[0] can change. do not remove scm_heap_org */
2374 scm_weak_vectors = SCM_EOL;
2375
2376 /* Initialise the list of ports. */
2377 scm_port_table = (scm_port **)
2378 malloc (sizeof (scm_port *) * scm_port_table_room);
2379 if (!scm_port_table)
2380 return 1;
2381
2382 #ifdef HAVE_ATEXIT
2383 atexit (cleanup);
2384 #else
2385 #ifdef HAVE_ON_EXIT
2386 on_exit (cleanup, 0);
2387 #endif
2388 #endif
2389
2390 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2391 SCM_SETCDR (scm_undefineds, scm_undefineds);
2392
2393 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2394 scm_nullstr = scm_makstr (0L, 0);
2395 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2396 scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2397 scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim));
2398 scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2399 scm_stand_in_procs = SCM_EOL;
2400 scm_permobjs = SCM_EOL;
2401 scm_protects = SCM_EOL;
2402 scm_asyncs = SCM_EOL;
2403 scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2404 scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2405 #ifdef SCM_BIGDIG
2406 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2407 #endif
2408 return 0;
2409 }
2410 \f
2411
2412 void
2413 scm_init_gc ()
2414 {
2415 #include "gc.x"
2416 }