Fix my last commit -- DEBUG_FREELIST renamed to GUILE_DEBUG_FREELIST
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41 \f
42 #include <stdio.h>
43 #include "_scm.h"
44 #include "stime.h"
45 #include "stackchk.h"
46 #include "struct.h"
47 #include "genio.h"
48 #include "weaks.h"
49 #include "guardians.h"
50 #include "smob.h"
51 #include "unif.h"
52 #include "async.h"
53
54 #include "gc.h"
55
56 #ifdef HAVE_MALLOC_H
57 #include <malloc.h>
58 #endif
59
60 #ifdef HAVE_UNISTD_H
61 #include <unistd.h>
62 #endif
63
64 #ifdef __STDC__
65 #include <stdarg.h>
66 #define var_start(x, y) va_start(x, y)
67 #else
68 #include <varargs.h>
69 #define var_start(x, y) va_start(x)
70 #endif
71
72 \f
73 /* {heap tuning parameters}
74 *
75 * These are parameters for controlling memory allocation. The heap
76 * is the area out of which scm_cons, and object headers are allocated.
77 *
78 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
79 * 64 bit machine. The units of the _SIZE parameters are bytes.
80 * Cons pairs and object headers occupy one heap cell.
81 *
82 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
83 * allocated initially the heap will grow by half its current size
84 * each subsequent time more heap is needed.
85 *
86 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
87 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
88 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
89 * is in scm_init_storage() and alloc_some_heap() in sys.c
90 *
91 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
92 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
93 *
94 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
95 * is needed.
96 *
97 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
98 * trigger a GC.
99 *
100 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
101 * reclaimed by a GC triggered by must_malloc. If less than this is
102 * reclaimed, the trigger threshold is raised. [I don't know what a
103 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
104 * work around a oscillation that caused almost constant GC.]
105 */
106
107 #define SCM_INIT_HEAP_SIZE (32768L*sizeof(scm_cell))
108 #define SCM_MIN_HEAP_SEG_SIZE (2048L*sizeof(scm_cell))
109 #ifdef _QC
110 # define SCM_HEAP_SEG_SIZE 32768L
111 #else
112 # ifdef sequent
113 # define SCM_HEAP_SEG_SIZE (7000L*sizeof(scm_cell))
114 # else
115 # define SCM_HEAP_SEG_SIZE (16384L*sizeof(scm_cell))
116 # endif
117 #endif
118 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size*2)
119 #define SCM_INIT_MALLOC_LIMIT 100000
120 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
121
122 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
123 bounds for allocated storage */
124
125 #ifdef PROT386
126 /*in 386 protected mode we must only adjust the offset */
127 # define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
128 # define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
129 #else
130 # ifdef _UNICOS
131 # define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
132 # define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
133 # else
134 # define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
135 # define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
136 # endif /* UNICOS */
137 #endif /* PROT386 */
138
139
140 \f
141 /* scm_freelist
142 * is the head of freelist of cons pairs.
143 */
144 SCM scm_freelist = SCM_EOL;
145
146 /* scm_mtrigger
147 * is the number of bytes of must_malloc allocation needed to trigger gc.
148 */
149 unsigned long scm_mtrigger;
150
151
152 /* scm_gc_heap_lock
153 * If set, don't expand the heap. Set only during gc, during which no allocation
154 * is supposed to take place anyway.
155 */
156 int scm_gc_heap_lock = 0;
157
158 /* GC Blocking
159 * Don't pause for collection if this is set -- just
160 * expand the heap.
161 */
162
163 int scm_block_gc = 1;
164
165 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
166 * collection (GC) more space is allocated for the heap.
167 */
168 #define MIN_GC_YIELD (scm_heap_size/4)
169
170 /* During collection, this accumulates objects holding
171 * weak references.
172 */
173 SCM scm_weak_vectors;
174
175 /* GC Statistics Keeping
176 */
177 unsigned long scm_cells_allocated = 0;
178 long scm_mallocated = 0;
179 unsigned long scm_gc_cells_collected;
180 unsigned long scm_gc_malloc_collected;
181 unsigned long scm_gc_ports_collected;
182 unsigned long scm_gc_rt;
183 unsigned long scm_gc_time_taken = 0;
184
185 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
186 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
187 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
188 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
189 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
190 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
191
192
193 struct scm_heap_seg_data
194 {
195 /* lower and upper bounds of the segment */
196 SCM_CELLPTR bounds[2];
197
198 /* address of the head-of-freelist pointer for this segment's cells.
199 All segments usually point to the same one, scm_freelist. */
200 SCM *freelistp;
201
202 /* number of SCM words per object in this segment */
203 int ncells;
204
205 /* If SEG_DATA->valid is non-zero, the conservative marking
206 functions will apply SEG_DATA->valid to the purported pointer and
207 SEG_DATA, and mark the object iff the function returns non-zero.
208 At the moment, I don't think anyone uses this. */
209 int (*valid) ();
210 };
211
212
213
214
215 static void scm_mark_weak_vector_spines SCM_P ((void));
216 static scm_sizet init_heap_seg SCM_P ((SCM_CELLPTR, scm_sizet, int, SCM *));
217 static void alloc_some_heap SCM_P ((int, SCM *));
218
219
220 \f
221 /* Debugging functions. */
222
223 #ifdef GUILE_DEBUG_FREELIST
224
225 /* Return the number of the heap segment containing CELL. */
226 static int
227 which_seg (SCM cell)
228 {
229 int i;
230
231 for (i = 0; i < scm_n_heap_segs; i++)
232 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
233 && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
234 return i;
235 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
236 cell);
237 abort ();
238 }
239
240
241 SCM_PROC (s_map_free_list, "map-free-list", 0, 0, 0, scm_map_free_list);
242 SCM
243 scm_map_free_list ()
244 {
245 int last_seg = -1, count = 0;
246 SCM f;
247
248 fprintf (stderr, "%d segments total\n", scm_n_heap_segs);
249 for (f = scm_freelist; SCM_NIMP (f); f = SCM_CDR (f))
250 {
251 int this_seg = which_seg (f);
252
253 if (this_seg != last_seg)
254 {
255 if (last_seg != -1)
256 fprintf (stderr, " %5d cells in segment %d\n", count, last_seg);
257 last_seg = this_seg;
258 count = 0;
259 }
260 count++;
261 }
262 if (last_seg != -1)
263 fprintf (stderr, " %5d cells in segment %d\n", count, last_seg);
264
265 fflush (stderr);
266
267 return SCM_UNSPECIFIED;
268 }
269
270
271 /* Number of calls to SCM_NEWCELL since startup. */
272 static unsigned long scm_newcell_count;
273
274 /* Search freelist for anything that isn't marked as a free cell.
275 Abort if we find something. */
276 static void
277 scm_check_freelist ()
278 {
279 SCM f;
280 int i = 0;
281
282 for (f = scm_freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
283 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
284 {
285 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
286 scm_newcell_count, i);
287 fflush (stderr);
288 abort ();
289 }
290 }
291
292 static int scm_debug_check_freelist = 0;
293
294 SCM_PROC (s_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0, scm_gc_set_debug_check_freelist_x);
295 SCM
296 scm_gc_set_debug_check_freelist_x (SCM flag)
297 {
298 SCM_ASSERT(SCM_BOOL_T == flag || SCM_BOOL_F == flag,
299 flag, 1, s_gc_set_debug_check_freelist_x);
300 scm_debug_check_freelist = (SCM_BOOL_T==flag)? 1: 0;
301 return SCM_UNSPECIFIED;
302 }
303
304
305 SCM
306 scm_debug_newcell (void)
307 {
308 SCM new;
309
310 scm_newcell_count++;
311 if (scm_debug_check_freelist) {
312 scm_check_freelist ();
313 scm_gc();
314 }
315
316 /* The rest of this is supposed to be identical to the SCM_NEWCELL
317 macro. */
318 if (SCM_IMP (scm_freelist))
319 new = scm_gc_for_newcell ();
320 else
321 {
322 new = scm_freelist;
323 scm_freelist = SCM_CDR (scm_freelist);
324 ++scm_cells_allocated;
325 }
326
327 return new;
328 }
329
330 #endif /* GUILE_DEBUG_FREELIST */
331
332 \f
333
334 /* {Scheme Interface to GC}
335 */
336
337 SCM_PROC (s_gc_stats, "gc-stats", 0, 0, 0, scm_gc_stats);
338 SCM
339 scm_gc_stats ()
340 {
341 int i;
342 int n;
343 SCM heap_segs;
344 SCM local_scm_mtrigger;
345 SCM local_scm_mallocated;
346 SCM local_scm_heap_size;
347 SCM local_scm_cells_allocated;
348 SCM local_scm_gc_time_taken;
349 SCM answer;
350
351 SCM_DEFER_INTS;
352 scm_block_gc = 1;
353 retry:
354 heap_segs = SCM_EOL;
355 n = scm_n_heap_segs;
356 for (i = scm_n_heap_segs; i--; )
357 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
358 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
359 heap_segs);
360 if (scm_n_heap_segs != n)
361 goto retry;
362 scm_block_gc = 0;
363
364 local_scm_mtrigger = scm_mtrigger;
365 local_scm_mallocated = scm_mallocated;
366 local_scm_heap_size = scm_heap_size;
367 local_scm_cells_allocated = scm_cells_allocated;
368 local_scm_gc_time_taken = scm_gc_time_taken;
369
370 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
371 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
372 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
373 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
374 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
375 scm_cons (sym_heap_segments, heap_segs),
376 SCM_UNDEFINED);
377 SCM_ALLOW_INTS;
378 return answer;
379 }
380
381
382 void
383 scm_gc_start (what)
384 const char *what;
385 {
386 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
387 scm_gc_cells_collected = 0;
388 scm_gc_malloc_collected = 0;
389 scm_gc_ports_collected = 0;
390 }
391
392 void
393 scm_gc_end ()
394 {
395 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
396 scm_gc_time_taken = scm_gc_time_taken + scm_gc_rt;
397 scm_system_async_mark (scm_gc_async);
398 }
399
400
401 SCM_PROC (s_object_address, "object-address", 1, 0, 0, scm_object_address);
402 SCM
403 scm_object_address (obj)
404 SCM obj;
405 {
406 return scm_ulong2num ((unsigned long)obj);
407 }
408
409
410 SCM_PROC(s_gc, "gc", 0, 0, 0, scm_gc);
411 SCM
412 scm_gc ()
413 {
414 SCM_DEFER_INTS;
415 scm_igc ("call");
416 SCM_ALLOW_INTS;
417 return SCM_UNSPECIFIED;
418 }
419
420
421 \f
422 /* {C Interface For When GC is Triggered}
423 */
424
425 void
426 scm_gc_for_alloc (ncells, freelistp)
427 int ncells;
428 SCM * freelistp;
429 {
430 SCM_REDEFER_INTS;
431 scm_igc ("cells");
432 if ((scm_gc_cells_collected < MIN_GC_YIELD) || SCM_IMP (*freelistp))
433 {
434 alloc_some_heap (ncells, freelistp);
435 }
436 SCM_REALLOW_INTS;
437 }
438
439
440 SCM
441 scm_gc_for_newcell ()
442 {
443 SCM fl;
444 scm_gc_for_alloc (1, &scm_freelist);
445 fl = scm_freelist;
446 scm_freelist = SCM_CDR (fl);
447 return fl;
448 }
449
450 void
451 scm_igc (what)
452 const char *what;
453 {
454 int j;
455
456 #ifdef USE_THREADS
457 /* During the critical section, only the current thread may run. */
458 SCM_THREAD_CRITICAL_SECTION_START;
459 #endif
460
461 /* fprintf (stderr, "gc: %s\n", what); */
462
463 scm_gc_start (what);
464
465 if (!scm_stack_base || scm_block_gc)
466 {
467 scm_gc_end ();
468 return;
469 }
470
471 if (scm_mallocated < 0)
472 /* The byte count of allocated objects has underflowed. This is
473 probably because you forgot to report the sizes of objects you
474 have allocated, by calling scm_done_malloc or some such. When
475 the GC freed them, it subtracted their size from
476 scm_mallocated, which underflowed. */
477 abort ();
478
479 if (scm_gc_heap_lock)
480 /* We've invoked the collector while a GC is already in progress.
481 That should never happen. */
482 abort ();
483
484 ++scm_gc_heap_lock;
485
486 scm_weak_vectors = SCM_EOL;
487
488 scm_guardian_gc_init ();
489
490 /* unprotect any struct types with no instances */
491 #if 0
492 {
493 SCM type_list;
494 SCM * pos;
495
496 pos = &scm_type_obj_list;
497 type_list = scm_type_obj_list;
498 while (type_list != SCM_EOL)
499 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
500 {
501 pos = SCM_CDRLOC (type_list);
502 type_list = SCM_CDR (type_list);
503 }
504 else
505 {
506 *pos = SCM_CDR (type_list);
507 type_list = SCM_CDR (type_list);
508 }
509 }
510 #endif
511
512 /* flush dead entries from the continuation stack */
513 {
514 int x;
515 int bound;
516 SCM * elts;
517 elts = SCM_VELTS (scm_continuation_stack);
518 bound = SCM_LENGTH (scm_continuation_stack);
519 x = SCM_INUM (scm_continuation_stack_ptr);
520 while (x < bound)
521 {
522 elts[x] = SCM_BOOL_F;
523 ++x;
524 }
525 }
526
527 #ifndef USE_THREADS
528
529 /* Protect from the C stack. This must be the first marking
530 * done because it provides information about what objects
531 * are "in-use" by the C code. "in-use" objects are those
532 * for which the values from SCM_LENGTH and SCM_CHARS must remain
533 * usable. This requirement is stricter than a liveness
534 * requirement -- in particular, it constrains the implementation
535 * of scm_vector_set_length_x.
536 */
537 SCM_FLUSH_REGISTER_WINDOWS;
538 /* This assumes that all registers are saved into the jmp_buf */
539 setjmp (scm_save_regs_gc_mark);
540 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
541 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
542 sizeof scm_save_regs_gc_mark)
543 / sizeof (SCM_STACKITEM)));
544
545 {
546 /* stack_len is long rather than scm_sizet in order to guarantee that
547 &stack_len is long aligned */
548 #ifdef SCM_STACK_GROWS_UP
549 #ifdef nosve
550 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
551 #else
552 long stack_len = scm_stack_size (scm_stack_base);
553 #endif
554 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
555 #else
556 #ifdef nosve
557 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
558 #else
559 long stack_len = scm_stack_size (scm_stack_base);
560 #endif
561 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
562 #endif
563 }
564
565 #else /* USE_THREADS */
566
567 /* Mark every thread's stack and registers */
568 scm_threads_mark_stacks();
569
570 #endif /* USE_THREADS */
571
572 /* FIXME: insert a phase to un-protect string-data preserved
573 * in scm_vector_set_length_x.
574 */
575
576 j = SCM_NUM_PROTECTS;
577 while (j--)
578 scm_gc_mark (scm_sys_protects[j]);
579
580 /* FIXME: we should have a means to register C functions to be run
581 * in different phases of GC
582 */
583 scm_mark_subr_table ();
584
585 #ifndef USE_THREADS
586 scm_gc_mark (scm_root->handle);
587 #endif
588
589 scm_mark_weak_vector_spines ();
590
591 scm_guardian_zombify ();
592
593 scm_gc_sweep ();
594
595 --scm_gc_heap_lock;
596 scm_gc_end ();
597
598 #ifdef USE_THREADS
599 SCM_THREAD_CRITICAL_SECTION_END;
600 #endif
601 }
602
603 \f
604 /* {Mark/Sweep}
605 */
606
607
608
609 /* Mark an object precisely.
610 */
611 void
612 scm_gc_mark (p)
613 SCM p;
614 {
615 register long i;
616 register SCM ptr;
617
618 ptr = p;
619
620 gc_mark_loop:
621 if (SCM_IMP (ptr))
622 return;
623
624 gc_mark_nimp:
625 if (SCM_NCELLP (ptr))
626 scm_wta (ptr, "rogue pointer in heap", NULL);
627
628 switch (SCM_TYP7 (ptr))
629 {
630 case scm_tcs_cons_nimcar:
631 if (SCM_GCMARKP (ptr))
632 break;
633 SCM_SETGCMARK (ptr);
634 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
635 {
636 ptr = SCM_CAR (ptr);
637 goto gc_mark_nimp;
638 }
639 scm_gc_mark (SCM_CAR (ptr));
640 ptr = SCM_GCCDR (ptr);
641 goto gc_mark_nimp;
642 case scm_tcs_cons_imcar:
643 case scm_tc7_pws:
644 if (SCM_GCMARKP (ptr))
645 break;
646 SCM_SETGCMARK (ptr);
647 ptr = SCM_GCCDR (ptr);
648 goto gc_mark_loop;
649 case scm_tcs_cons_gloc:
650 if (SCM_GCMARKP (ptr))
651 break;
652 SCM_SETGCMARK (ptr);
653 {
654 SCM vcell;
655 vcell = SCM_CAR (ptr) - 1L;
656 switch (SCM_CDR (vcell))
657 {
658 default:
659 scm_gc_mark (vcell);
660 ptr = SCM_GCCDR (ptr);
661 goto gc_mark_loop;
662 case 1: /* ! */
663 case 0: /* ! */
664 {
665 SCM layout;
666 SCM * vtable_data;
667 int len;
668 char * fields_desc;
669 register SCM * mem;
670 register int x;
671
672 vtable_data = (SCM *)vcell;
673 layout = vtable_data[scm_vtable_index_layout];
674 len = SCM_LENGTH (layout);
675 fields_desc = SCM_CHARS (layout);
676 /* We're using SCM_GCCDR here like STRUCT_DATA, except
677 that it removes the mark */
678 mem = (SCM *)SCM_GCCDR (ptr);
679
680 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
681 {
682 scm_gc_mark (mem[scm_struct_i_procedure]);
683 scm_gc_mark (mem[scm_struct_i_setter]);
684 }
685 if (len)
686 {
687 for (x = 0; x < len - 2; x += 2, ++mem)
688 if (fields_desc[x] == 'p')
689 scm_gc_mark (*mem);
690 if (fields_desc[x] == 'p')
691 {
692 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
693 for (x = *mem; x; --x)
694 scm_gc_mark (*++mem);
695 else
696 scm_gc_mark (*mem);
697 }
698 }
699 if (!SCM_CDR (vcell))
700 {
701 SCM_SETGCMARK (vcell);
702 ptr = vtable_data[scm_vtable_index_vtable];
703 goto gc_mark_loop;
704 }
705 }
706 }
707 }
708 break;
709 case scm_tcs_closures:
710 if (SCM_GCMARKP (ptr))
711 break;
712 SCM_SETGCMARK (ptr);
713 if (SCM_IMP (SCM_CDR (ptr)))
714 {
715 ptr = SCM_CLOSCAR (ptr);
716 goto gc_mark_nimp;
717 }
718 scm_gc_mark (SCM_CLOSCAR (ptr));
719 ptr = SCM_GCCDR (ptr);
720 goto gc_mark_nimp;
721 case scm_tc7_vector:
722 case scm_tc7_lvector:
723 #ifdef CCLO
724 case scm_tc7_cclo:
725 #endif
726 if (SCM_GC8MARKP (ptr))
727 break;
728 SCM_SETGC8MARK (ptr);
729 i = SCM_LENGTH (ptr);
730 if (i == 0)
731 break;
732 while (--i > 0)
733 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
734 scm_gc_mark (SCM_VELTS (ptr)[i]);
735 ptr = SCM_VELTS (ptr)[0];
736 goto gc_mark_loop;
737 case scm_tc7_contin:
738 if SCM_GC8MARKP
739 (ptr) break;
740 SCM_SETGC8MARK (ptr);
741 if (SCM_VELTS (ptr))
742 scm_mark_locations (SCM_VELTS (ptr),
743 (scm_sizet)
744 (SCM_LENGTH (ptr) +
745 (sizeof (SCM_STACKITEM) + -1 +
746 sizeof (scm_contregs)) /
747 sizeof (SCM_STACKITEM)));
748 break;
749 case scm_tc7_bvect:
750 case scm_tc7_byvect:
751 case scm_tc7_ivect:
752 case scm_tc7_uvect:
753 case scm_tc7_fvect:
754 case scm_tc7_dvect:
755 case scm_tc7_cvect:
756 case scm_tc7_svect:
757 #ifdef LONGLONGS
758 case scm_tc7_llvect:
759 #endif
760
761 case scm_tc7_string:
762 SCM_SETGC8MARK (ptr);
763 break;
764
765 case scm_tc7_substring:
766 if (SCM_GC8MARKP(ptr))
767 break;
768 SCM_SETGC8MARK (ptr);
769 ptr = SCM_CDR (ptr);
770 goto gc_mark_loop;
771
772 case scm_tc7_wvect:
773 if (SCM_GC8MARKP(ptr))
774 break;
775 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
776 scm_weak_vectors = ptr;
777 SCM_SETGC8MARK (ptr);
778 if (SCM_IS_WHVEC_ANY (ptr))
779 {
780 int x;
781 int len;
782 int weak_keys;
783 int weak_values;
784
785 len = SCM_LENGTH (ptr);
786 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
787 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
788
789 for (x = 0; x < len; ++x)
790 {
791 SCM alist;
792 alist = SCM_VELTS (ptr)[x];
793
794 /* mark everything on the alist except the keys or
795 * values, according to weak_values and weak_keys. */
796 while ( SCM_NIMP (alist)
797 && SCM_CONSP (alist)
798 && !SCM_GCMARKP (alist)
799 && SCM_NIMP (SCM_CAR (alist))
800 && SCM_CONSP (SCM_CAR (alist)))
801 {
802 SCM kvpair;
803 SCM next_alist;
804
805 kvpair = SCM_CAR (alist);
806 next_alist = SCM_CDR (alist);
807 /*
808 * Do not do this:
809 * SCM_SETGCMARK (alist);
810 * SCM_SETGCMARK (kvpair);
811 *
812 * It may be that either the key or value is protected by
813 * an escaped reference to part of the spine of this alist.
814 * If we mark the spine here, and only mark one or neither of the
815 * key and value, they may never be properly marked.
816 * This leads to a horrible situation in which an alist containing
817 * freelist cells is exported.
818 *
819 * So only mark the spines of these arrays last of all marking.
820 * If somebody confuses us by constructing a weak vector
821 * with a circular alist then we are hosed, but at least we
822 * won't prematurely drop table entries.
823 */
824 if (!weak_keys)
825 scm_gc_mark (SCM_CAR (kvpair));
826 if (!weak_values)
827 scm_gc_mark (SCM_GCCDR (kvpair));
828 alist = next_alist;
829 }
830 if (SCM_NIMP (alist))
831 scm_gc_mark (alist);
832 }
833 }
834 break;
835
836 case scm_tc7_msymbol:
837 if (SCM_GC8MARKP(ptr))
838 break;
839 SCM_SETGC8MARK (ptr);
840 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
841 ptr = SCM_SYMBOL_PROPS (ptr);
842 goto gc_mark_loop;
843 case scm_tc7_ssymbol:
844 if (SCM_GC8MARKP(ptr))
845 break;
846 SCM_SETGC8MARK (ptr);
847 break;
848 case scm_tcs_subrs:
849 break;
850 case scm_tc7_port:
851 i = SCM_PTOBNUM (ptr);
852 if (!(i < scm_numptob))
853 goto def;
854 if (SCM_GC8MARKP (ptr))
855 break;
856 SCM_SETGC8MARK (ptr);
857 if (SCM_PTAB_ENTRY(ptr))
858 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
859 if (scm_ptobs[i].mark)
860 {
861 ptr = (scm_ptobs[i].mark) (ptr);
862 goto gc_mark_loop;
863 }
864 else
865 return;
866 break;
867 case scm_tc7_smob:
868 if (SCM_GC8MARKP (ptr))
869 break;
870 SCM_SETGC8MARK (ptr);
871 switch SCM_GCTYP16 (ptr)
872 { /* should be faster than going through scm_smobs */
873 case scm_tc_free_cell:
874 /* printf("found free_cell %X ", ptr); fflush(stdout); */
875 SCM_SETCDR (ptr, SCM_EOL);
876 break;
877 case scm_tcs_bignums:
878 case scm_tc16_flo:
879 break;
880 default:
881 i = SCM_SMOBNUM (ptr);
882 if (!(i < scm_numsmob))
883 goto def;
884 if (scm_smobs[i].mark)
885 {
886 ptr = (scm_smobs[i].mark) (ptr);
887 goto gc_mark_loop;
888 }
889 else
890 return;
891 }
892 break;
893 default:
894 def:scm_wta (ptr, "unknown type in ", "gc_mark");
895 }
896 }
897
898
899 /* Mark a Region Conservatively
900 */
901
902 void
903 scm_mark_locations (x, n)
904 SCM_STACKITEM x[];
905 scm_sizet n;
906 {
907 register long m = n;
908 register int i, j;
909 register SCM_CELLPTR ptr;
910
911 while (0 <= --m)
912 if SCM_CELLP (*(SCM **) & x[m])
913 {
914 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & x[m]));
915 i = 0;
916 j = scm_n_heap_segs - 1;
917 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
918 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
919 {
920 while (i <= j)
921 {
922 int seg_id;
923 seg_id = -1;
924 if ( (i == j)
925 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
926 seg_id = i;
927 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
928 seg_id = j;
929 else
930 {
931 int k;
932 k = (i + j) / 2;
933 if (k == i)
934 break;
935 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
936 {
937 j = k;
938 ++i;
939 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
940 continue;
941 else
942 break;
943 }
944 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
945 {
946 i = k;
947 --j;
948 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
949 continue;
950 else
951 break;
952 }
953 }
954 if ( !scm_heap_table[seg_id].valid
955 || scm_heap_table[seg_id].valid (ptr,
956 &scm_heap_table[seg_id]))
957 scm_gc_mark (*(SCM *) & x[m]);
958 break;
959 }
960
961 }
962 }
963 }
964
965
966 /* The following is a C predicate which determines if an SCM value can be
967 regarded as a pointer to a cell on the heap. The code is duplicated
968 from scm_mark_locations. */
969
970
971 int
972 scm_cellp (value)
973 SCM value;
974 {
975 register int i, j;
976 register SCM_CELLPTR ptr;
977
978 if SCM_CELLP (*(SCM **) & value)
979 {
980 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & value));
981 i = 0;
982 j = scm_n_heap_segs - 1;
983 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
984 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
985 {
986 while (i <= j)
987 {
988 int seg_id;
989 seg_id = -1;
990 if ( (i == j)
991 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
992 seg_id = i;
993 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
994 seg_id = j;
995 else
996 {
997 int k;
998 k = (i + j) / 2;
999 if (k == i)
1000 break;
1001 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1002 {
1003 j = k;
1004 ++i;
1005 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1006 continue;
1007 else
1008 break;
1009 }
1010 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1011 {
1012 i = k;
1013 --j;
1014 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1015 continue;
1016 else
1017 break;
1018 }
1019 }
1020 if ( !scm_heap_table[seg_id].valid
1021 || scm_heap_table[seg_id].valid (ptr,
1022 &scm_heap_table[seg_id]))
1023 return 1;
1024 break;
1025 }
1026
1027 }
1028 }
1029 return 0;
1030 }
1031
1032
1033 static void
1034 scm_mark_weak_vector_spines ()
1035 {
1036 SCM w;
1037
1038 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1039 {
1040 if (SCM_IS_WHVEC_ANY (w))
1041 {
1042 SCM *ptr;
1043 SCM obj;
1044 int j;
1045 int n;
1046
1047 obj = w;
1048 ptr = SCM_VELTS (w);
1049 n = SCM_LENGTH (w);
1050 for (j = 0; j < n; ++j)
1051 {
1052 SCM alist;
1053
1054 alist = ptr[j];
1055 while ( SCM_NIMP (alist)
1056 && SCM_CONSP (alist)
1057 && !SCM_GCMARKP (alist)
1058 && SCM_NIMP (SCM_CAR (alist))
1059 && SCM_CONSP (SCM_CAR (alist)))
1060 {
1061 SCM_SETGCMARK (alist);
1062 SCM_SETGCMARK (SCM_CAR (alist));
1063 alist = SCM_GCCDR (alist);
1064 }
1065 }
1066 }
1067 }
1068 }
1069
1070
1071
1072 void
1073 scm_gc_sweep ()
1074 {
1075 register SCM_CELLPTR ptr;
1076 #ifdef SCM_POINTERS_MUNGED
1077 register SCM scmptr;
1078 #else
1079 #undef scmptr
1080 #define scmptr (SCM)ptr
1081 #endif
1082 register SCM nfreelist;
1083 register SCM *hp_freelist;
1084 register long m;
1085 register int span;
1086 long i;
1087 scm_sizet seg_size;
1088
1089 m = 0;
1090
1091 /* Reset all free list pointers. We'll reconstruct them completely
1092 while scanning. */
1093 for (i = 0; i < scm_n_heap_segs; i++)
1094 *scm_heap_table[i].freelistp = SCM_EOL;
1095
1096 for (i = 0; i < scm_n_heap_segs; i++)
1097 {
1098 register scm_sizet n = 0;
1099 register scm_sizet j;
1100
1101 /* Unmarked cells go onto the front of the freelist this heap
1102 segment points to. Rather than updating the real freelist
1103 pointer as we go along, we accumulate the new head in
1104 nfreelist. Then, if it turns out that the entire segment is
1105 free, we free (i.e., malloc's free) the whole segment, and
1106 simply don't assign nfreelist back into the real freelist. */
1107 hp_freelist = scm_heap_table[i].freelistp;
1108 nfreelist = *hp_freelist;
1109
1110 span = scm_heap_table[i].ncells;
1111 ptr = CELL_UP (scm_heap_table[i].bounds[0]);
1112 seg_size = CELL_DN (scm_heap_table[i].bounds[1]) - ptr;
1113 for (j = seg_size + span; j -= span; ptr += span)
1114 {
1115 #ifdef SCM_POINTERS_MUNGED
1116 scmptr = PTR2SCM (ptr);
1117 #endif
1118 switch SCM_TYP7 (scmptr)
1119 {
1120 case scm_tcs_cons_gloc:
1121 if (SCM_GCMARKP (scmptr))
1122 {
1123 if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1)
1124 SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0);
1125 goto cmrkcontinue;
1126 }
1127 {
1128 SCM vcell;
1129 vcell = SCM_CAR (scmptr) - 1L;
1130
1131 if ((SCM_CDR (vcell) == 0) || (SCM_CDR (vcell) == 1))
1132 {
1133 scm_struct_free_t free
1134 = (scm_struct_free_t) ((SCM*) vcell)[scm_struct_i_free];
1135 m += free ((SCM *) vcell, (SCM *) SCM_GCCDR (scmptr));
1136 }
1137 }
1138 break;
1139 case scm_tcs_cons_imcar:
1140 case scm_tcs_cons_nimcar:
1141 case scm_tcs_closures:
1142 case scm_tc7_pws:
1143 if (SCM_GCMARKP (scmptr))
1144 goto cmrkcontinue;
1145 break;
1146 case scm_tc7_wvect:
1147 if (SCM_GC8MARKP (scmptr))
1148 {
1149 goto c8mrkcontinue;
1150 }
1151 else
1152 {
1153 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1154 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1155 break;
1156 }
1157
1158 case scm_tc7_vector:
1159 case scm_tc7_lvector:
1160 #ifdef CCLO
1161 case scm_tc7_cclo:
1162 #endif
1163 if (SCM_GC8MARKP (scmptr))
1164 goto c8mrkcontinue;
1165
1166 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1167 freechars:
1168 scm_must_free (SCM_CHARS (scmptr));
1169 /* SCM_SETCHARS(scmptr, 0);*/
1170 break;
1171 case scm_tc7_bvect:
1172 if SCM_GC8MARKP (scmptr)
1173 goto c8mrkcontinue;
1174 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1175 goto freechars;
1176 case scm_tc7_byvect:
1177 if SCM_GC8MARKP (scmptr)
1178 goto c8mrkcontinue;
1179 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1180 goto freechars;
1181 case scm_tc7_ivect:
1182 case scm_tc7_uvect:
1183 if SCM_GC8MARKP (scmptr)
1184 goto c8mrkcontinue;
1185 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1186 goto freechars;
1187 case scm_tc7_svect:
1188 if SCM_GC8MARKP (scmptr)
1189 goto c8mrkcontinue;
1190 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1191 goto freechars;
1192 #ifdef LONGLONGS
1193 case scm_tc7_llvect:
1194 if SCM_GC8MARKP (scmptr)
1195 goto c8mrkcontinue;
1196 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1197 goto freechars;
1198 #endif
1199 case scm_tc7_fvect:
1200 if SCM_GC8MARKP (scmptr)
1201 goto c8mrkcontinue;
1202 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1203 goto freechars;
1204 case scm_tc7_dvect:
1205 if SCM_GC8MARKP (scmptr)
1206 goto c8mrkcontinue;
1207 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1208 goto freechars;
1209 case scm_tc7_cvect:
1210 if SCM_GC8MARKP (scmptr)
1211 goto c8mrkcontinue;
1212 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1213 goto freechars;
1214 case scm_tc7_substring:
1215 if (SCM_GC8MARKP (scmptr))
1216 goto c8mrkcontinue;
1217 break;
1218 case scm_tc7_string:
1219 if (SCM_GC8MARKP (scmptr))
1220 goto c8mrkcontinue;
1221 m += SCM_HUGE_LENGTH (scmptr) + 1;
1222 goto freechars;
1223 case scm_tc7_msymbol:
1224 if (SCM_GC8MARKP (scmptr))
1225 goto c8mrkcontinue;
1226 m += ( SCM_LENGTH (scmptr)
1227 + 1
1228 + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
1229 scm_must_free ((char *)SCM_SLOTS (scmptr));
1230 break;
1231 case scm_tc7_contin:
1232 if SCM_GC8MARKP (scmptr)
1233 goto c8mrkcontinue;
1234 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1235 if (SCM_VELTS (scmptr))
1236 goto freechars;
1237 case scm_tc7_ssymbol:
1238 if SCM_GC8MARKP(scmptr)
1239 goto c8mrkcontinue;
1240 break;
1241 case scm_tcs_subrs:
1242 continue;
1243 case scm_tc7_port:
1244 if SCM_GC8MARKP (scmptr)
1245 goto c8mrkcontinue;
1246 if SCM_OPENP (scmptr)
1247 {
1248 int k = SCM_PTOBNUM (scmptr);
1249 if (!(k < scm_numptob))
1250 goto sweeperr;
1251 /* Keep "revealed" ports alive. */
1252 if (scm_revealed_count(scmptr) > 0)
1253 continue;
1254 /* Yes, I really do mean scm_ptobs[k].free */
1255 /* rather than ftobs[k].close. .close */
1256 /* is for explicit CLOSE-PORT by user */
1257 m += (scm_ptobs[k].free) (scmptr);
1258 SCM_SETSTREAM (scmptr, 0);
1259 scm_remove_from_port_table (scmptr);
1260 scm_gc_ports_collected++;
1261 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1262 }
1263 break;
1264 case scm_tc7_smob:
1265 switch SCM_GCTYP16 (scmptr)
1266 {
1267 case scm_tc_free_cell:
1268 if SCM_GC8MARKP (scmptr)
1269 goto c8mrkcontinue;
1270 break;
1271 #ifdef SCM_BIGDIG
1272 case scm_tcs_bignums:
1273 if SCM_GC8MARKP (scmptr)
1274 goto c8mrkcontinue;
1275 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1276 goto freechars;
1277 #endif /* def SCM_BIGDIG */
1278 case scm_tc16_flo:
1279 if SCM_GC8MARKP (scmptr)
1280 goto c8mrkcontinue;
1281 switch ((int) (SCM_CAR (scmptr) >> 16))
1282 {
1283 case (SCM_IMAG_PART | SCM_REAL_PART) >> 16:
1284 m += sizeof (double);
1285 case SCM_REAL_PART >> 16:
1286 case SCM_IMAG_PART >> 16:
1287 m += sizeof (double);
1288 goto freechars;
1289 case 0:
1290 break;
1291 default:
1292 goto sweeperr;
1293 }
1294 break;
1295 default:
1296 if SCM_GC8MARKP (scmptr)
1297 goto c8mrkcontinue;
1298
1299 {
1300 int k;
1301 k = SCM_SMOBNUM (scmptr);
1302 if (!(k < scm_numsmob))
1303 goto sweeperr;
1304 m += (scm_smobs[k].free) ((SCM) scmptr);
1305 break;
1306 }
1307 }
1308 break;
1309 default:
1310 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1311 }
1312 n += span;
1313 #if 0
1314 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1315 exit (2);
1316 #endif
1317 /* Stick the new cell on the front of nfreelist. It's
1318 critical that we mark this cell as freed; otherwise, the
1319 conservative collector might trace it as some other type
1320 of object. */
1321 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
1322 SCM_SETCDR (scmptr, nfreelist);
1323 nfreelist = scmptr;
1324
1325 continue;
1326 c8mrkcontinue:
1327 SCM_CLRGC8MARK (scmptr);
1328 continue;
1329 cmrkcontinue:
1330 SCM_CLRGCMARK (scmptr);
1331 }
1332 #ifdef GC_FREE_SEGMENTS
1333 if (n == seg_size)
1334 {
1335 register long j;
1336
1337 scm_heap_size -= seg_size;
1338 free ((char *) scm_heap_table[i].bounds[0]);
1339 scm_heap_table[i].bounds[0] = 0;
1340 for (j = i + 1; j < scm_n_heap_segs; j++)
1341 scm_heap_table[j - 1] = scm_heap_table[j];
1342 scm_n_heap_segs -= 1;
1343 i--; /* We need to scan the segment just moved. */
1344 }
1345 else
1346 #endif /* ifdef GC_FREE_SEGMENTS */
1347 /* Update the real freelist pointer to point to the head of
1348 the list of free cells we've built for this segment. */
1349 *hp_freelist = nfreelist;
1350
1351 #ifdef GUILE_DEBUG_FREELIST
1352 scm_check_freelist ();
1353 scm_map_free_list ();
1354 #endif
1355
1356 scm_gc_cells_collected += n;
1357 }
1358 /* Scan weak vectors. */
1359 {
1360 SCM *ptr, w;
1361 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1362 {
1363 if (!SCM_IS_WHVEC_ANY (w))
1364 {
1365 register long j, n;
1366
1367 ptr = SCM_VELTS (w);
1368 n = SCM_LENGTH (w);
1369 for (j = 0; j < n; ++j)
1370 if (SCM_NIMP (ptr[j]) && SCM_FREEP (ptr[j]))
1371 ptr[j] = SCM_BOOL_F;
1372 }
1373 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1374 {
1375 SCM obj = w;
1376 register long n = SCM_LENGTH (w);
1377 register long j;
1378
1379 ptr = SCM_VELTS (w);
1380
1381 for (j = 0; j < n; ++j)
1382 {
1383 SCM * fixup;
1384 SCM alist;
1385 int weak_keys;
1386 int weak_values;
1387
1388 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1389 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1390
1391 fixup = ptr + j;
1392 alist = *fixup;
1393
1394 while (SCM_NIMP (alist)
1395 && SCM_CONSP (alist)
1396 && SCM_NIMP (SCM_CAR (alist))
1397 && SCM_CONSP (SCM_CAR (alist)))
1398 {
1399 SCM key;
1400 SCM value;
1401
1402 key = SCM_CAAR (alist);
1403 value = SCM_CDAR (alist);
1404 if ( (weak_keys && SCM_NIMP (key) && SCM_FREEP (key))
1405 || (weak_values && SCM_NIMP (value) && SCM_FREEP (value)))
1406 {
1407 *fixup = SCM_CDR (alist);
1408 }
1409 else
1410 fixup = SCM_CDRLOC (alist);
1411 alist = SCM_CDR (alist);
1412 }
1413 }
1414 }
1415 }
1416 }
1417 scm_cells_allocated = (scm_heap_size - scm_gc_cells_collected);
1418 scm_mallocated -= m;
1419 scm_gc_malloc_collected = m;
1420 }
1421
1422
1423 \f
1424
1425 /* {Front end to malloc}
1426 *
1427 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1428 *
1429 * These functions provide services comperable to malloc, realloc, and
1430 * free. They are for allocating malloced parts of scheme objects.
1431 * The primary purpose of the front end is to impose calls to gc.
1432 */
1433
1434 /* scm_must_malloc
1435 * Return newly malloced storage or throw an error.
1436 *
1437 * The parameter WHAT is a string for error reporting.
1438 * If the threshold scm_mtrigger will be passed by this
1439 * allocation, or if the first call to malloc fails,
1440 * garbage collect -- on the presumption that some objects
1441 * using malloced storage may be collected.
1442 *
1443 * The limit scm_mtrigger may be raised by this allocation.
1444 */
1445 void *
1446 scm_must_malloc (scm_sizet size, const char *what)
1447 {
1448 void *ptr;
1449 unsigned long nm = scm_mallocated + size;
1450
1451 if (nm <= scm_mtrigger)
1452 {
1453 SCM_SYSCALL (ptr = malloc (size));
1454 if (NULL != ptr)
1455 {
1456 scm_mallocated = nm;
1457 return ptr;
1458 }
1459 }
1460
1461 scm_igc (what);
1462
1463 nm = scm_mallocated + size;
1464 SCM_SYSCALL (ptr = malloc (size));
1465 if (NULL != ptr)
1466 {
1467 scm_mallocated = nm;
1468 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1469 if (nm > scm_mtrigger)
1470 scm_mtrigger = nm + nm / 2;
1471 else
1472 scm_mtrigger += scm_mtrigger / 2;
1473 }
1474 return ptr;
1475 }
1476
1477 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1478 return 0; /* never reached */
1479 }
1480
1481
1482 /* scm_must_realloc
1483 * is similar to scm_must_malloc.
1484 */
1485 void *
1486 scm_must_realloc (void *where,
1487 scm_sizet old_size,
1488 scm_sizet size,
1489 const char *what)
1490 {
1491 void *ptr;
1492 scm_sizet nm = scm_mallocated + size - old_size;
1493
1494 if (nm <= scm_mtrigger)
1495 {
1496 SCM_SYSCALL (ptr = realloc (where, size));
1497 if (NULL != ptr)
1498 {
1499 scm_mallocated = nm;
1500 return ptr;
1501 }
1502 }
1503
1504 scm_igc (what);
1505
1506 nm = scm_mallocated + size - old_size;
1507 SCM_SYSCALL (ptr = realloc (where, size));
1508 if (NULL != ptr)
1509 {
1510 scm_mallocated = nm;
1511 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1512 if (nm > scm_mtrigger)
1513 scm_mtrigger = nm + nm / 2;
1514 else
1515 scm_mtrigger += scm_mtrigger / 2;
1516 }
1517 return ptr;
1518 }
1519
1520 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1521 return 0; /* never reached */
1522 }
1523
1524 void
1525 scm_must_free (void *obj)
1526 {
1527 if (obj)
1528 free (obj);
1529 else
1530 scm_wta (SCM_INUM0, "already free", "");
1531 }
1532
1533 /* Announce that there has been some malloc done that will be freed
1534 * during gc. A typical use is for a smob that uses some malloced
1535 * memory but can not get it from scm_must_malloc (for whatever
1536 * reason). When a new object of this smob is created you call
1537 * scm_done_malloc with the size of the object. When your smob free
1538 * function is called, be sure to include this size in the return
1539 * value. */
1540
1541 void
1542 scm_done_malloc (size)
1543 long size;
1544 {
1545 scm_mallocated += size;
1546
1547 if (scm_mallocated > scm_mtrigger)
1548 {
1549 scm_igc ("foreign mallocs");
1550 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1551 {
1552 if (scm_mallocated > scm_mtrigger)
1553 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1554 else
1555 scm_mtrigger += scm_mtrigger / 2;
1556 }
1557 }
1558 }
1559
1560
1561 \f
1562
1563 /* {Heap Segments}
1564 *
1565 * Each heap segment is an array of objects of a particular size.
1566 * Every segment has an associated (possibly shared) freelist.
1567 * A table of segment records is kept that records the upper and
1568 * lower extents of the segment; this is used during the conservative
1569 * phase of gc to identify probably gc roots (because they point
1570 * into valid segments at reasonable offsets). */
1571
1572 /* scm_expmem
1573 * is true if the first segment was smaller than INIT_HEAP_SEG.
1574 * If scm_expmem is set to one, subsequent segment allocations will
1575 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1576 */
1577 int scm_expmem = 0;
1578
1579 /* scm_heap_org
1580 * is the lowest base address of any heap segment.
1581 */
1582 SCM_CELLPTR scm_heap_org;
1583
1584 struct scm_heap_seg_data * scm_heap_table = 0;
1585 int scm_n_heap_segs = 0;
1586
1587 /* scm_heap_size
1588 * is the total number of cells in heap segments.
1589 */
1590 unsigned long scm_heap_size = 0;
1591
1592 /* init_heap_seg
1593 * initializes a new heap segment and return the number of objects it contains.
1594 *
1595 * The segment origin, segment size in bytes, and the span of objects
1596 * in cells are input parameters. The freelist is both input and output.
1597 *
1598 * This function presume that the scm_heap_table has already been expanded
1599 * to accomodate a new segment record.
1600 */
1601
1602
1603 static scm_sizet
1604 init_heap_seg (seg_org, size, ncells, freelistp)
1605 SCM_CELLPTR seg_org;
1606 scm_sizet size;
1607 int ncells;
1608 SCM *freelistp;
1609 {
1610 register SCM_CELLPTR ptr;
1611 #ifdef SCM_POINTERS_MUNGED
1612 register SCM scmptr;
1613 #else
1614 #undef scmptr
1615 #define scmptr ptr
1616 #endif
1617 SCM_CELLPTR seg_end;
1618 int new_seg_index;
1619 int n_new_objects;
1620
1621 if (seg_org == NULL)
1622 return 0;
1623
1624 ptr = seg_org;
1625
1626 /* Compute the ceiling on valid object pointers w/in this segment.
1627 */
1628 seg_end = CELL_DN ((char *) ptr + size);
1629
1630 /* Find the right place and insert the segment record.
1631 *
1632 */
1633 for (new_seg_index = 0;
1634 ( (new_seg_index < scm_n_heap_segs)
1635 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1636 new_seg_index++)
1637 ;
1638
1639 {
1640 int i;
1641 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1642 scm_heap_table[i] = scm_heap_table[i - 1];
1643 }
1644
1645 ++scm_n_heap_segs;
1646
1647 scm_heap_table[new_seg_index].valid = 0;
1648 scm_heap_table[new_seg_index].ncells = ncells;
1649 scm_heap_table[new_seg_index].freelistp = freelistp;
1650 scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
1651 scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
1652
1653
1654 /* Compute the least valid object pointer w/in this segment
1655 */
1656 ptr = CELL_UP (ptr);
1657
1658
1659 n_new_objects = seg_end - ptr;
1660
1661 /* Prepend objects in this segment to the freelist.
1662 */
1663 while (ptr < seg_end)
1664 {
1665 #ifdef SCM_POINTERS_MUNGED
1666 scmptr = PTR2SCM (ptr);
1667 #endif
1668 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
1669 SCM_SETCDR (scmptr, PTR2SCM (ptr + ncells));
1670 ptr += ncells;
1671 }
1672
1673 ptr -= ncells;
1674
1675 /* Patch up the last freelist pointer in the segment
1676 * to join it to the input freelist.
1677 */
1678 SCM_SETCDR (PTR2SCM (ptr), *freelistp);
1679 *freelistp = PTR2SCM (CELL_UP (seg_org));
1680
1681 scm_heap_size += (ncells * n_new_objects);
1682 return size;
1683 #ifdef scmptr
1684 #undef scmptr
1685 #endif
1686 }
1687
1688
1689 static void
1690 alloc_some_heap (ncells, freelistp)
1691 int ncells;
1692 SCM * freelistp;
1693 {
1694 struct scm_heap_seg_data * tmptable;
1695 SCM_CELLPTR ptr;
1696 scm_sizet len;
1697
1698 /* Critical code sections (such as the garbage collector)
1699 * aren't supposed to add heap segments.
1700 */
1701 if (scm_gc_heap_lock)
1702 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
1703
1704 /* Expand the heap tables to have room for the new segment.
1705 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1706 * only if the allocation of the segment itself succeeds.
1707 */
1708 len = (1 + scm_n_heap_segs) * sizeof (struct scm_heap_seg_data);
1709
1710 SCM_SYSCALL (tmptable = ((struct scm_heap_seg_data *)
1711 realloc ((char *)scm_heap_table, len)));
1712 if (!tmptable)
1713 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
1714 else
1715 scm_heap_table = tmptable;
1716
1717
1718 /* Pick a size for the new heap segment.
1719 * The rule for picking the size of a segment is explained in
1720 * gc.h
1721 */
1722 if (scm_expmem)
1723 {
1724 len = (scm_sizet) (SCM_EXPHEAP (scm_heap_size) * sizeof (scm_cell));
1725 if ((scm_sizet) (SCM_EXPHEAP (scm_heap_size) * sizeof (scm_cell)) != len)
1726 len = 0;
1727 }
1728 else
1729 len = SCM_HEAP_SEG_SIZE;
1730
1731 {
1732 scm_sizet smallest;
1733
1734 smallest = (ncells * sizeof (scm_cell));
1735 if (len < smallest)
1736 len = (ncells * sizeof (scm_cell));
1737
1738 /* Allocate with decaying ambition. */
1739 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
1740 && (len >= smallest))
1741 {
1742 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (len));
1743 if (ptr)
1744 {
1745 init_heap_seg (ptr, len, ncells, freelistp);
1746 return;
1747 }
1748 len /= 2;
1749 }
1750 }
1751
1752 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
1753 }
1754
1755
1756
1757 SCM_PROC (s_unhash_name, "unhash-name", 1, 0, 0, scm_unhash_name);
1758 SCM
1759 scm_unhash_name (name)
1760 SCM name;
1761 {
1762 int x;
1763 int bound;
1764 SCM_ASSERT (SCM_NIMP (name) && SCM_SYMBOLP (name), name, SCM_ARG1, s_unhash_name);
1765 SCM_DEFER_INTS;
1766 bound = scm_n_heap_segs;
1767 for (x = 0; x < bound; ++x)
1768 {
1769 SCM_CELLPTR p;
1770 SCM_CELLPTR pbound;
1771 p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
1772 pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
1773 while (p < pbound)
1774 {
1775 SCM incar;
1776 incar = p->car;
1777 if (1 == (7 & (int)incar))
1778 {
1779 --incar;
1780 if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
1781 && (SCM_CDR (incar) != 0)
1782 && (SCM_CDR (incar) != 1))
1783 {
1784 p->car = name;
1785 }
1786 }
1787 ++p;
1788 }
1789 }
1790 SCM_ALLOW_INTS;
1791 return name;
1792 }
1793
1794
1795 \f
1796 /* {GC Protection Helper Functions}
1797 */
1798
1799
1800 void
1801 scm_remember (ptr)
1802 SCM * ptr;
1803 {}
1804
1805
1806 SCM
1807 scm_return_first (SCM elt, ...)
1808 {
1809 return elt;
1810 }
1811
1812
1813 SCM
1814 scm_permanent_object (obj)
1815 SCM obj;
1816 {
1817 SCM_REDEFER_INTS;
1818 scm_permobjs = scm_cons (obj, scm_permobjs);
1819 SCM_REALLOW_INTS;
1820 return obj;
1821 }
1822
1823
1824 /* Protect OBJ from the garbage collector. OBJ will not be freed,
1825 even if all other references are dropped, until someone applies
1826 scm_unprotect_object to it. This function returns OBJ.
1827
1828 Calls to scm_protect_object nest. For every object O, there is a
1829 counter which scm_protect_object(O) increments and
1830 scm_unprotect_object(O) decrements, if it is greater than zero. If
1831 an object's counter is greater than zero, the garbage collector
1832 will not free it.
1833
1834 Of course, that's not how it's implemented. scm_protect_object and
1835 scm_unprotect_object just maintain a list of references to things.
1836 Since the GC knows about this list, all objects it mentions stay
1837 alive. scm_protect_object adds its argument to the list;
1838 scm_unprotect_object removes the first occurrence of its argument
1839 to the list. */
1840 SCM
1841 scm_protect_object (obj)
1842 SCM obj;
1843 {
1844 scm_protects = scm_cons (obj, scm_protects);
1845
1846 return obj;
1847 }
1848
1849
1850 /* Remove any protection for OBJ established by a prior call to
1851 scm_protect_object. This function returns OBJ.
1852
1853 See scm_protect_object for more information. */
1854 SCM
1855 scm_unprotect_object (obj)
1856 SCM obj;
1857 {
1858 SCM *tail_ptr = &scm_protects;
1859
1860 while (SCM_NIMP (*tail_ptr) && SCM_CONSP (*tail_ptr))
1861 if (SCM_CAR (*tail_ptr) == obj)
1862 {
1863 *tail_ptr = SCM_CDR (*tail_ptr);
1864 break;
1865 }
1866 else
1867 tail_ptr = SCM_CDRLOC (*tail_ptr);
1868
1869 return obj;
1870 }
1871
1872 int terminating;
1873
1874 /* called on process termination. */
1875 #ifdef HAVE_ATEXIT
1876 static void
1877 cleanup (void)
1878 #else
1879 #ifdef HAVE_ON_EXIT
1880 extern int on_exit (void (*procp) (), int arg);
1881
1882 static void
1883 cleanup (int status, void *arg)
1884 #else
1885 #error Dont know how to setup a cleanup handler on your system.
1886 #endif
1887 #endif
1888 {
1889 terminating = 1;
1890 scm_flush_all_ports ();
1891 }
1892
1893 \f
1894 int
1895 scm_init_storage (scm_sizet init_heap_size)
1896 {
1897 scm_sizet j;
1898
1899 j = SCM_NUM_PROTECTS;
1900 while (j)
1901 scm_sys_protects[--j] = SCM_BOOL_F;
1902 scm_block_gc = 1;
1903 scm_freelist = SCM_EOL;
1904 scm_expmem = 0;
1905
1906 j = SCM_HEAP_SEG_SIZE;
1907 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
1908 scm_heap_table = ((struct scm_heap_seg_data *)
1909 scm_must_malloc (sizeof (struct scm_heap_seg_data), "hplims"));
1910 if (0L == init_heap_size)
1911 init_heap_size = SCM_INIT_HEAP_SIZE;
1912 j = init_heap_size;
1913 if ((init_heap_size != j)
1914 || !init_heap_seg ((SCM_CELLPTR) malloc (j), j, 1, &scm_freelist))
1915 {
1916 j = SCM_HEAP_SEG_SIZE;
1917 if (!init_heap_seg ((SCM_CELLPTR) malloc (j), j, 1, &scm_freelist))
1918 return 1;
1919 }
1920 else
1921 scm_expmem = 1;
1922 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0]);
1923 /* scm_hplims[0] can change. do not remove scm_heap_org */
1924 scm_weak_vectors = SCM_EOL;
1925
1926 /* Initialise the list of ports. */
1927 scm_port_table = (scm_port **)
1928 malloc (sizeof (scm_port *) * scm_port_table_room);
1929 if (!scm_port_table)
1930 return 1;
1931
1932 #ifdef HAVE_ATEXIT
1933 atexit (cleanup);
1934 #else
1935 #ifdef HAVE_ON_EXIT
1936 on_exit (cleanup, 0);
1937 #endif
1938 #endif
1939
1940 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
1941 SCM_SETCDR (scm_undefineds, scm_undefineds);
1942
1943 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
1944 scm_nullstr = scm_makstr (0L, 0);
1945 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
1946 scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
1947 scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim));
1948 scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
1949 scm_stand_in_procs = SCM_EOL;
1950 scm_permobjs = SCM_EOL;
1951 scm_protects = SCM_EOL;
1952 scm_asyncs = SCM_EOL;
1953 scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
1954 scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
1955 #ifdef SCM_BIGDIG
1956 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
1957 #endif
1958 return 0;
1959 }
1960 \f
1961
1962 void
1963 scm_init_gc ()
1964 {
1965 #include "gc.x"
1966 }