* gc.c (scm_mallocated): Just make this signed.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41 \f
42 #include <stdio.h>
43 #include "_scm.h"
44 #include "stime.h"
45 #include "stackchk.h"
46 #include "struct.h"
47 #include "genio.h"
48 #include "weaks.h"
49 #include "guardians.h"
50 #include "smob.h"
51 #include "unif.h"
52 #include "async.h"
53
54 #include "gc.h"
55
56 #ifdef HAVE_MALLOC_H
57 #include <malloc.h>
58 #endif
59
60 #ifdef HAVE_UNISTD_H
61 #include <unistd.h>
62 #endif
63
64 #ifdef __STDC__
65 #include <stdarg.h>
66 #define var_start(x, y) va_start(x, y)
67 #else
68 #include <varargs.h>
69 #define var_start(x, y) va_start(x)
70 #endif
71
72 \f
73 /* {heap tuning parameters}
74 *
75 * These are parameters for controlling memory allocation. The heap
76 * is the area out of which scm_cons, and object headers are allocated.
77 *
78 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
79 * 64 bit machine. The units of the _SIZE parameters are bytes.
80 * Cons pairs and object headers occupy one heap cell.
81 *
82 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
83 * allocated initially the heap will grow by half its current size
84 * each subsequent time more heap is needed.
85 *
86 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
87 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
88 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
89 * is in scm_init_storage() and alloc_some_heap() in sys.c
90 *
91 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
92 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
93 *
94 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
95 * is needed.
96 *
97 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
98 * trigger a GC.
99 *
100 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
101 * reclaimed by a GC triggered by must_malloc. If less than this is
102 * reclaimed, the trigger threshold is raised. [I don't know what a
103 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
104 * work around a oscillation that caused almost constant GC.]
105 */
106
107 #define SCM_INIT_HEAP_SIZE (32768L*sizeof(scm_cell))
108 #define SCM_MIN_HEAP_SEG_SIZE (2048L*sizeof(scm_cell))
109 #ifdef _QC
110 # define SCM_HEAP_SEG_SIZE 32768L
111 #else
112 # ifdef sequent
113 # define SCM_HEAP_SEG_SIZE (7000L*sizeof(scm_cell))
114 # else
115 # define SCM_HEAP_SEG_SIZE (16384L*sizeof(scm_cell))
116 # endif
117 #endif
118 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size*2)
119 #define SCM_INIT_MALLOC_LIMIT 100000
120 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
121
122 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
123 bounds for allocated storage */
124
125 #ifdef PROT386
126 /*in 386 protected mode we must only adjust the offset */
127 # define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
128 # define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
129 #else
130 # ifdef _UNICOS
131 # define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
132 # define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
133 # else
134 # define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
135 # define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
136 # endif /* UNICOS */
137 #endif /* PROT386 */
138
139
140 \f
141 /* scm_freelist
142 * is the head of freelist of cons pairs.
143 */
144 SCM scm_freelist = SCM_EOL;
145
146 /* scm_mtrigger
147 * is the number of bytes of must_malloc allocation needed to trigger gc.
148 */
149 unsigned long scm_mtrigger;
150
151
152 /* scm_gc_heap_lock
153 * If set, don't expand the heap. Set only during gc, during which no allocation
154 * is supposed to take place anyway.
155 */
156 int scm_gc_heap_lock = 0;
157
158 /* GC Blocking
159 * Don't pause for collection if this is set -- just
160 * expand the heap.
161 */
162
163 int scm_block_gc = 1;
164
165 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
166 * collection (GC) more space is allocated for the heap.
167 */
168 #define MIN_GC_YIELD (scm_heap_size/4)
169
170 /* During collection, this accumulates objects holding
171 * weak references.
172 */
173 SCM scm_weak_vectors;
174
175 /* GC Statistics Keeping
176 */
177 unsigned long scm_cells_allocated = 0;
178 long scm_mallocated = 0;
179 unsigned long scm_gc_cells_collected;
180 unsigned long scm_gc_malloc_collected;
181 unsigned long scm_gc_ports_collected;
182 unsigned long scm_gc_rt;
183 unsigned long scm_gc_time_taken = 0;
184
185 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
186 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
187 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
188 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
189 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
190 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
191
192
193 struct scm_heap_seg_data
194 {
195 /* lower and upper bounds of the segment */
196 SCM_CELLPTR bounds[2];
197
198 /* address of the head-of-freelist pointer for this segment's cells.
199 All segments usually point to the same one, scm_freelist. */
200 SCM *freelistp;
201
202 /* number of SCM words per object in this segment */
203 int ncells;
204
205 /* If SEG_DATA->valid is non-zero, the conservative marking
206 functions will apply SEG_DATA->valid to the purported pointer and
207 SEG_DATA, and mark the object iff the function returns non-zero.
208 At the moment, I don't think anyone uses this. */
209 int (*valid) ();
210 };
211
212
213
214
215 static void scm_mark_weak_vector_spines SCM_P ((void));
216 static scm_sizet init_heap_seg SCM_P ((SCM_CELLPTR, scm_sizet, int, SCM *));
217 static void alloc_some_heap SCM_P ((int, SCM *));
218
219
220 \f
221 /* Debugging functions. */
222
223 #ifdef DEBUG_FREELIST
224
225 /* Return the number of the heap segment containing CELL. */
226 static int
227 which_seg (SCM cell)
228 {
229 int i;
230
231 for (i = 0; i < scm_n_heap_segs; i++)
232 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
233 && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
234 return i;
235 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
236 cell);
237 abort ();
238 }
239
240
241 SCM_PROC (s_map_free_list, "map-free-list", 0, 0, 0, scm_map_free_list);
242 SCM
243 scm_map_free_list ()
244 {
245 int last_seg = -1, count = 0;
246 SCM f;
247
248 fprintf (stderr, "%d segments total\n", scm_n_heap_segs);
249 for (f = scm_freelist; SCM_NIMP (f); f = SCM_CDR (f))
250 {
251 int this_seg = which_seg (f);
252
253 if (this_seg != last_seg)
254 {
255 if (last_seg != -1)
256 fprintf (stderr, " %5d cells in segment %d\n", count, last_seg);
257 last_seg = this_seg;
258 count = 0;
259 }
260 count++;
261 }
262 if (last_seg != -1)
263 fprintf (stderr, " %5d cells in segment %d\n", count, last_seg);
264
265 fflush (stderr);
266
267 return SCM_UNSPECIFIED;
268 }
269
270
271 /* Number of calls to SCM_NEWCELL since startup. */
272 static unsigned long scm_newcell_count;
273
274 /* Search freelist for anything that isn't marked as a free cell.
275 Abort if we find something. */
276 static void
277 scm_check_freelist ()
278 {
279 SCM f;
280 int i = 0;
281
282 for (f = scm_freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
283 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
284 {
285 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
286 scm_newcell_count, i);
287 fflush (stderr);
288 abort ();
289 }
290 }
291
292 static int scm_debug_check_freelist = 0;
293 SCM
294 scm_debug_newcell (void)
295 {
296 SCM new;
297
298 scm_newcell_count++;
299 if (scm_debug_check_freelist)
300 scm_check_freelist ();
301
302 /* The rest of this is supposed to be identical to the SCM_NEWCELL
303 macro. */
304 if (SCM_IMP (scm_freelist))
305 new = scm_gc_for_newcell ();
306 else
307 {
308 new = scm_freelist;
309 scm_freelist = SCM_CDR (scm_freelist);
310 ++scm_cells_allocated;
311 }
312
313 return new;
314 }
315
316 #endif /* DEBUG_FREELIST */
317
318 \f
319
320 /* {Scheme Interface to GC}
321 */
322
323 SCM_PROC (s_gc_stats, "gc-stats", 0, 0, 0, scm_gc_stats);
324 SCM
325 scm_gc_stats ()
326 {
327 int i;
328 int n;
329 SCM heap_segs;
330 SCM local_scm_mtrigger;
331 SCM local_scm_mallocated;
332 SCM local_scm_heap_size;
333 SCM local_scm_cells_allocated;
334 SCM local_scm_gc_time_taken;
335 SCM answer;
336
337 SCM_DEFER_INTS;
338 scm_block_gc = 1;
339 retry:
340 heap_segs = SCM_EOL;
341 n = scm_n_heap_segs;
342 for (i = scm_n_heap_segs; i--; )
343 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
344 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
345 heap_segs);
346 if (scm_n_heap_segs != n)
347 goto retry;
348 scm_block_gc = 0;
349
350 local_scm_mtrigger = scm_mtrigger;
351 local_scm_mallocated = scm_mallocated;
352 local_scm_heap_size = scm_heap_size;
353 local_scm_cells_allocated = scm_cells_allocated;
354 local_scm_gc_time_taken = scm_gc_time_taken;
355
356 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
357 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
358 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
359 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
360 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
361 scm_cons (sym_heap_segments, heap_segs),
362 SCM_UNDEFINED);
363 SCM_ALLOW_INTS;
364 return answer;
365 }
366
367
368 void
369 scm_gc_start (what)
370 const char *what;
371 {
372 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
373 scm_gc_cells_collected = 0;
374 scm_gc_malloc_collected = 0;
375 scm_gc_ports_collected = 0;
376 }
377
378 void
379 scm_gc_end ()
380 {
381 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
382 scm_gc_time_taken = scm_gc_time_taken + scm_gc_rt;
383 scm_system_async_mark (scm_gc_async);
384 }
385
386
387 SCM_PROC (s_object_address, "object-address", 1, 0, 0, scm_object_address);
388 SCM
389 scm_object_address (obj)
390 SCM obj;
391 {
392 return scm_ulong2num ((unsigned long)obj);
393 }
394
395
396 SCM_PROC(s_gc, "gc", 0, 0, 0, scm_gc);
397 SCM
398 scm_gc ()
399 {
400 SCM_DEFER_INTS;
401 scm_igc ("call");
402 SCM_ALLOW_INTS;
403 return SCM_UNSPECIFIED;
404 }
405
406
407 \f
408 /* {C Interface For When GC is Triggered}
409 */
410
411 void
412 scm_gc_for_alloc (ncells, freelistp)
413 int ncells;
414 SCM * freelistp;
415 {
416 SCM_REDEFER_INTS;
417 scm_igc ("cells");
418 if ((scm_gc_cells_collected < MIN_GC_YIELD) || SCM_IMP (*freelistp))
419 {
420 alloc_some_heap (ncells, freelistp);
421 }
422 SCM_REALLOW_INTS;
423 }
424
425
426 SCM
427 scm_gc_for_newcell ()
428 {
429 SCM fl;
430 scm_gc_for_alloc (1, &scm_freelist);
431 fl = scm_freelist;
432 scm_freelist = SCM_CDR (fl);
433 return fl;
434 }
435
436 void
437 scm_igc (what)
438 const char *what;
439 {
440 int j;
441
442 #ifdef USE_THREADS
443 /* During the critical section, only the current thread may run. */
444 SCM_THREAD_CRITICAL_SECTION_START;
445 #endif
446
447 /* fprintf (stderr, "gc: %s\n", what); */
448
449 scm_gc_start (what);
450
451 if (!scm_stack_base || scm_block_gc)
452 {
453 scm_gc_end ();
454 return;
455 }
456
457 if (scm_mallocated < 0)
458 /* The byte count of allocated objects has underflowed. This is
459 probably because you forgot to report the sizes of objects you
460 have allocated, by calling scm_done_malloc or some such. When
461 the GC freed them, it subtracted their size from
462 scm_mallocated, which underflowed. */
463 abort ();
464
465 if (scm_gc_heap_lock)
466 /* We've invoked the collector while a GC is already in progress.
467 That should never happen. */
468 abort ();
469
470 ++scm_gc_heap_lock;
471
472 scm_weak_vectors = SCM_EOL;
473
474 scm_guardian_gc_init ();
475
476 /* unprotect any struct types with no instances */
477 #if 0
478 {
479 SCM type_list;
480 SCM * pos;
481
482 pos = &scm_type_obj_list;
483 type_list = scm_type_obj_list;
484 while (type_list != SCM_EOL)
485 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
486 {
487 pos = SCM_CDRLOC (type_list);
488 type_list = SCM_CDR (type_list);
489 }
490 else
491 {
492 *pos = SCM_CDR (type_list);
493 type_list = SCM_CDR (type_list);
494 }
495 }
496 #endif
497
498 /* flush dead entries from the continuation stack */
499 {
500 int x;
501 int bound;
502 SCM * elts;
503 elts = SCM_VELTS (scm_continuation_stack);
504 bound = SCM_LENGTH (scm_continuation_stack);
505 x = SCM_INUM (scm_continuation_stack_ptr);
506 while (x < bound)
507 {
508 elts[x] = SCM_BOOL_F;
509 ++x;
510 }
511 }
512
513 #ifndef USE_THREADS
514
515 /* Protect from the C stack. This must be the first marking
516 * done because it provides information about what objects
517 * are "in-use" by the C code. "in-use" objects are those
518 * for which the values from SCM_LENGTH and SCM_CHARS must remain
519 * usable. This requirement is stricter than a liveness
520 * requirement -- in particular, it constrains the implementation
521 * of scm_vector_set_length_x.
522 */
523 SCM_FLUSH_REGISTER_WINDOWS;
524 /* This assumes that all registers are saved into the jmp_buf */
525 setjmp (scm_save_regs_gc_mark);
526 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
527 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
528 sizeof scm_save_regs_gc_mark)
529 / sizeof (SCM_STACKITEM)));
530
531 {
532 /* stack_len is long rather than scm_sizet in order to guarantee that
533 &stack_len is long aligned */
534 #ifdef SCM_STACK_GROWS_UP
535 #ifdef nosve
536 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
537 #else
538 long stack_len = scm_stack_size (scm_stack_base);
539 #endif
540 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
541 #else
542 #ifdef nosve
543 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
544 #else
545 long stack_len = scm_stack_size (scm_stack_base);
546 #endif
547 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
548 #endif
549 }
550
551 #else /* USE_THREADS */
552
553 /* Mark every thread's stack and registers */
554 scm_threads_mark_stacks();
555
556 #endif /* USE_THREADS */
557
558 /* FIXME: insert a phase to un-protect string-data preserved
559 * in scm_vector_set_length_x.
560 */
561
562 j = SCM_NUM_PROTECTS;
563 while (j--)
564 scm_gc_mark (scm_sys_protects[j]);
565
566 #ifndef USE_THREADS
567 scm_gc_mark (scm_root->handle);
568 #endif
569
570 scm_mark_weak_vector_spines ();
571
572 scm_guardian_zombify ();
573
574 scm_gc_sweep ();
575
576 --scm_gc_heap_lock;
577 scm_gc_end ();
578
579 #ifdef USE_THREADS
580 SCM_THREAD_CRITICAL_SECTION_END;
581 #endif
582 }
583
584 \f
585 /* {Mark/Sweep}
586 */
587
588
589
590 /* Mark an object precisely.
591 */
592 void
593 scm_gc_mark (p)
594 SCM p;
595 {
596 register long i;
597 register SCM ptr;
598
599 ptr = p;
600
601 gc_mark_loop:
602 if (SCM_IMP (ptr))
603 return;
604
605 gc_mark_nimp:
606 if (SCM_NCELLP (ptr))
607 scm_wta (ptr, "rogue pointer in heap", NULL);
608
609 switch (SCM_TYP7 (ptr))
610 {
611 case scm_tcs_cons_nimcar:
612 if (SCM_GCMARKP (ptr))
613 break;
614 SCM_SETGCMARK (ptr);
615 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
616 {
617 ptr = SCM_CAR (ptr);
618 goto gc_mark_nimp;
619 }
620 scm_gc_mark (SCM_CAR (ptr));
621 ptr = SCM_GCCDR (ptr);
622 goto gc_mark_nimp;
623 case scm_tcs_cons_imcar:
624 case scm_tc7_pws:
625 if (SCM_GCMARKP (ptr))
626 break;
627 SCM_SETGCMARK (ptr);
628 ptr = SCM_GCCDR (ptr);
629 goto gc_mark_loop;
630 case scm_tcs_cons_gloc:
631 if (SCM_GCMARKP (ptr))
632 break;
633 SCM_SETGCMARK (ptr);
634 {
635 SCM vcell;
636 vcell = SCM_CAR (ptr) - 1L;
637 switch (SCM_CDR (vcell))
638 {
639 default:
640 scm_gc_mark (vcell);
641 ptr = SCM_GCCDR (ptr);
642 goto gc_mark_loop;
643 case 1: /* ! */
644 case 0: /* ! */
645 {
646 SCM layout;
647 SCM * vtable_data;
648 int len;
649 char * fields_desc;
650 register SCM * mem;
651 register int x;
652
653 vtable_data = (SCM *)vcell;
654 layout = vtable_data[scm_vtable_index_layout];
655 len = SCM_LENGTH (layout);
656 fields_desc = SCM_CHARS (layout);
657 /* We're using SCM_GCCDR here like STRUCT_DATA, except
658 that it removes the mark */
659 mem = (SCM *)SCM_GCCDR (ptr);
660
661 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
662 {
663 scm_gc_mark (mem[scm_struct_i_proc + 0]);
664 scm_gc_mark (mem[scm_struct_i_proc + 1]);
665 scm_gc_mark (mem[scm_struct_i_proc + 2]);
666 scm_gc_mark (mem[scm_struct_i_proc + 3]);
667 scm_gc_mark (mem[scm_struct_i_setter]);
668 }
669 if (len)
670 {
671 for (x = 0; x < len - 2; x += 2, ++mem)
672 if (fields_desc[x] == 'p')
673 scm_gc_mark (*mem);
674 if (fields_desc[x] == 'p')
675 {
676 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
677 for (x = *mem; x; --x)
678 scm_gc_mark (*++mem);
679 else
680 scm_gc_mark (*mem);
681 }
682 }
683 if (!SCM_CDR (vcell))
684 {
685 SCM_SETGCMARK (vcell);
686 ptr = vtable_data[scm_vtable_index_vtable];
687 goto gc_mark_loop;
688 }
689 }
690 }
691 }
692 break;
693 case scm_tcs_closures:
694 if (SCM_GCMARKP (ptr))
695 break;
696 SCM_SETGCMARK (ptr);
697 if (SCM_IMP (SCM_CDR (ptr)))
698 {
699 ptr = SCM_CLOSCAR (ptr);
700 goto gc_mark_nimp;
701 }
702 scm_gc_mark (SCM_CLOSCAR (ptr));
703 ptr = SCM_GCCDR (ptr);
704 goto gc_mark_nimp;
705 case scm_tc7_vector:
706 case scm_tc7_lvector:
707 #ifdef CCLO
708 case scm_tc7_cclo:
709 #endif
710 if (SCM_GC8MARKP (ptr))
711 break;
712 SCM_SETGC8MARK (ptr);
713 i = SCM_LENGTH (ptr);
714 if (i == 0)
715 break;
716 while (--i > 0)
717 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
718 scm_gc_mark (SCM_VELTS (ptr)[i]);
719 ptr = SCM_VELTS (ptr)[0];
720 goto gc_mark_loop;
721 case scm_tc7_contin:
722 if SCM_GC8MARKP
723 (ptr) break;
724 SCM_SETGC8MARK (ptr);
725 if (SCM_VELTS (ptr))
726 scm_mark_locations (SCM_VELTS (ptr),
727 (scm_sizet)
728 (SCM_LENGTH (ptr) +
729 (sizeof (SCM_STACKITEM) + -1 +
730 sizeof (scm_contregs)) /
731 sizeof (SCM_STACKITEM)));
732 break;
733 case scm_tc7_bvect:
734 case scm_tc7_byvect:
735 case scm_tc7_ivect:
736 case scm_tc7_uvect:
737 case scm_tc7_fvect:
738 case scm_tc7_dvect:
739 case scm_tc7_cvect:
740 case scm_tc7_svect:
741 #ifdef LONGLONGS
742 case scm_tc7_llvect:
743 #endif
744
745 case scm_tc7_string:
746 SCM_SETGC8MARK (ptr);
747 break;
748
749 case scm_tc7_substring:
750 if (SCM_GC8MARKP(ptr))
751 break;
752 SCM_SETGC8MARK (ptr);
753 ptr = SCM_CDR (ptr);
754 goto gc_mark_loop;
755
756 case scm_tc7_wvect:
757 if (SCM_GC8MARKP(ptr))
758 break;
759 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
760 scm_weak_vectors = ptr;
761 SCM_SETGC8MARK (ptr);
762 if (SCM_IS_WHVEC_ANY (ptr))
763 {
764 int x;
765 int len;
766 int weak_keys;
767 int weak_values;
768
769 len = SCM_LENGTH (ptr);
770 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
771 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
772
773 for (x = 0; x < len; ++x)
774 {
775 SCM alist;
776 alist = SCM_VELTS (ptr)[x];
777
778 /* mark everything on the alist except the keys or
779 * values, according to weak_values and weak_keys. */
780 while ( SCM_NIMP (alist)
781 && SCM_CONSP (alist)
782 && !SCM_GCMARKP (alist)
783 && SCM_NIMP (SCM_CAR (alist))
784 && SCM_CONSP (SCM_CAR (alist)))
785 {
786 SCM kvpair;
787 SCM next_alist;
788
789 kvpair = SCM_CAR (alist);
790 next_alist = SCM_CDR (alist);
791 /*
792 * Do not do this:
793 * SCM_SETGCMARK (alist);
794 * SCM_SETGCMARK (kvpair);
795 *
796 * It may be that either the key or value is protected by
797 * an escaped reference to part of the spine of this alist.
798 * If we mark the spine here, and only mark one or neither of the
799 * key and value, they may never be properly marked.
800 * This leads to a horrible situation in which an alist containing
801 * freelist cells is exported.
802 *
803 * So only mark the spines of these arrays last of all marking.
804 * If somebody confuses us by constructing a weak vector
805 * with a circular alist then we are hosed, but at least we
806 * won't prematurely drop table entries.
807 */
808 if (!weak_keys)
809 scm_gc_mark (SCM_CAR (kvpair));
810 if (!weak_values)
811 scm_gc_mark (SCM_GCCDR (kvpair));
812 alist = next_alist;
813 }
814 if (SCM_NIMP (alist))
815 scm_gc_mark (alist);
816 }
817 }
818 break;
819
820 case scm_tc7_msymbol:
821 if (SCM_GC8MARKP(ptr))
822 break;
823 SCM_SETGC8MARK (ptr);
824 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
825 ptr = SCM_SYMBOL_PROPS (ptr);
826 goto gc_mark_loop;
827 case scm_tc7_ssymbol:
828 if (SCM_GC8MARKP(ptr))
829 break;
830 SCM_SETGC8MARK (ptr);
831 break;
832 case scm_tcs_subrs:
833 ptr = (SCM)(scm_heap_org + (((unsigned long)SCM_CAR (ptr)) >> 8));
834 goto gc_mark_loop;
835 case scm_tc7_port:
836 i = SCM_PTOBNUM (ptr);
837 if (!(i < scm_numptob))
838 goto def;
839 if (SCM_GC8MARKP (ptr))
840 break;
841 SCM_SETGC8MARK (ptr);
842 if (SCM_PTAB_ENTRY(ptr))
843 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
844 if (scm_ptobs[i].mark)
845 {
846 ptr = (scm_ptobs[i].mark) (ptr);
847 goto gc_mark_loop;
848 }
849 else
850 return;
851 break;
852 case scm_tc7_smob:
853 if (SCM_GC8MARKP (ptr))
854 break;
855 SCM_SETGC8MARK (ptr);
856 switch SCM_GCTYP16 (ptr)
857 { /* should be faster than going through scm_smobs */
858 case scm_tc_free_cell:
859 /* printf("found free_cell %X ", ptr); fflush(stdout); */
860 SCM_SETCDR (ptr, SCM_EOL);
861 break;
862 case scm_tcs_bignums:
863 case scm_tc16_flo:
864 break;
865 default:
866 i = SCM_SMOBNUM (ptr);
867 if (!(i < scm_numsmob))
868 goto def;
869 if (scm_smobs[i].mark)
870 {
871 ptr = (scm_smobs[i].mark) (ptr);
872 goto gc_mark_loop;
873 }
874 else
875 return;
876 }
877 break;
878 default:
879 def:scm_wta (ptr, "unknown type in ", "gc_mark");
880 }
881 }
882
883
884 /* Mark a Region Conservatively
885 */
886
887 void
888 scm_mark_locations (x, n)
889 SCM_STACKITEM x[];
890 scm_sizet n;
891 {
892 register long m = n;
893 register int i, j;
894 register SCM_CELLPTR ptr;
895
896 while (0 <= --m)
897 if SCM_CELLP (*(SCM **) & x[m])
898 {
899 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & x[m]));
900 i = 0;
901 j = scm_n_heap_segs - 1;
902 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
903 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
904 {
905 while (i <= j)
906 {
907 int seg_id;
908 seg_id = -1;
909 if ( (i == j)
910 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
911 seg_id = i;
912 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
913 seg_id = j;
914 else
915 {
916 int k;
917 k = (i + j) / 2;
918 if (k == i)
919 break;
920 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
921 {
922 j = k;
923 ++i;
924 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
925 continue;
926 else
927 break;
928 }
929 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
930 {
931 i = k;
932 --j;
933 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
934 continue;
935 else
936 break;
937 }
938 }
939 if ( !scm_heap_table[seg_id].valid
940 || scm_heap_table[seg_id].valid (ptr,
941 &scm_heap_table[seg_id]))
942 scm_gc_mark (*(SCM *) & x[m]);
943 break;
944 }
945
946 }
947 }
948 }
949
950
951 /* The following is a C predicate which determines if an SCM value can be
952 regarded as a pointer to a cell on the heap. The code is duplicated
953 from scm_mark_locations. */
954
955
956 int
957 scm_cellp (value)
958 SCM value;
959 {
960 register int i, j;
961 register SCM_CELLPTR ptr;
962
963 if SCM_CELLP (*(SCM **) & value)
964 {
965 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & value));
966 i = 0;
967 j = scm_n_heap_segs - 1;
968 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
969 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
970 {
971 while (i <= j)
972 {
973 int seg_id;
974 seg_id = -1;
975 if ( (i == j)
976 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
977 seg_id = i;
978 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
979 seg_id = j;
980 else
981 {
982 int k;
983 k = (i + j) / 2;
984 if (k == i)
985 break;
986 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
987 {
988 j = k;
989 ++i;
990 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
991 continue;
992 else
993 break;
994 }
995 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
996 {
997 i = k;
998 --j;
999 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1000 continue;
1001 else
1002 break;
1003 }
1004 }
1005 if ( !scm_heap_table[seg_id].valid
1006 || scm_heap_table[seg_id].valid (ptr,
1007 &scm_heap_table[seg_id]))
1008 return 1;
1009 break;
1010 }
1011
1012 }
1013 }
1014 return 0;
1015 }
1016
1017
1018 static void
1019 scm_mark_weak_vector_spines ()
1020 {
1021 SCM w;
1022
1023 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1024 {
1025 if (SCM_IS_WHVEC_ANY (w))
1026 {
1027 SCM *ptr;
1028 SCM obj;
1029 int j;
1030 int n;
1031
1032 obj = w;
1033 ptr = SCM_VELTS (w);
1034 n = SCM_LENGTH (w);
1035 for (j = 0; j < n; ++j)
1036 {
1037 SCM alist;
1038
1039 alist = ptr[j];
1040 while ( SCM_NIMP (alist)
1041 && SCM_CONSP (alist)
1042 && !SCM_GCMARKP (alist)
1043 && SCM_NIMP (SCM_CAR (alist))
1044 && SCM_CONSP (SCM_CAR (alist)))
1045 {
1046 SCM_SETGCMARK (alist);
1047 SCM_SETGCMARK (SCM_CAR (alist));
1048 alist = SCM_GCCDR (alist);
1049 }
1050 }
1051 }
1052 }
1053 }
1054
1055
1056
1057 void
1058 scm_gc_sweep ()
1059 {
1060 register SCM_CELLPTR ptr;
1061 #ifdef SCM_POINTERS_MUNGED
1062 register SCM scmptr;
1063 #else
1064 #undef scmptr
1065 #define scmptr (SCM)ptr
1066 #endif
1067 register SCM nfreelist;
1068 register SCM *hp_freelist;
1069 register long m;
1070 register int span;
1071 long i;
1072 scm_sizet seg_size;
1073
1074 m = 0;
1075
1076 /* Reset all free list pointers. We'll reconstruct them completely
1077 while scanning. */
1078 for (i = 0; i < scm_n_heap_segs; i++)
1079 *scm_heap_table[i].freelistp = SCM_EOL;
1080
1081 for (i = 0; i < scm_n_heap_segs; i++)
1082 {
1083 register scm_sizet n = 0;
1084 register scm_sizet j;
1085
1086 /* Unmarked cells go onto the front of the freelist this heap
1087 segment points to. Rather than updating the real freelist
1088 pointer as we go along, we accumulate the new head in
1089 nfreelist. Then, if it turns out that the entire segment is
1090 free, we free (i.e., malloc's free) the whole segment, and
1091 simply don't assign nfreelist back into the real freelist. */
1092 hp_freelist = scm_heap_table[i].freelistp;
1093 nfreelist = *hp_freelist;
1094
1095 span = scm_heap_table[i].ncells;
1096 ptr = CELL_UP (scm_heap_table[i].bounds[0]);
1097 seg_size = CELL_DN (scm_heap_table[i].bounds[1]) - ptr;
1098 for (j = seg_size + span; j -= span; ptr += span)
1099 {
1100 #ifdef SCM_POINTERS_MUNGED
1101 scmptr = PTR2SCM (ptr);
1102 #endif
1103 switch SCM_TYP7 (scmptr)
1104 {
1105 case scm_tcs_cons_gloc:
1106 if (SCM_GCMARKP (scmptr))
1107 {
1108 if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1)
1109 SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0);
1110 goto cmrkcontinue;
1111 }
1112 {
1113 SCM vcell;
1114 vcell = SCM_CAR (scmptr) - 1L;
1115
1116 if ((SCM_CDR (vcell) == 0) || (SCM_CDR (vcell) == 1))
1117 {
1118 SCM *p = (SCM *) SCM_GCCDR (scmptr);
1119 if (((SCM*) vcell)[scm_struct_i_flags]
1120 & SCM_STRUCTF_LIGHT)
1121 {
1122 SCM layout = ((SCM*)vcell)[scm_vtable_index_layout];
1123 m += (SCM_LENGTH (layout) / 2) * sizeof (SCM);
1124 free ((char *) p);
1125 }
1126 else
1127 {
1128 m += p[scm_struct_i_n_words] * sizeof (SCM) + 7;
1129 /* I feel like I'm programming in BCPL here... */
1130 free ((char *) p[scm_struct_i_ptr]);
1131 }
1132 }
1133 }
1134 break;
1135 case scm_tcs_cons_imcar:
1136 case scm_tcs_cons_nimcar:
1137 case scm_tcs_closures:
1138 case scm_tc7_pws:
1139 if (SCM_GCMARKP (scmptr))
1140 goto cmrkcontinue;
1141 break;
1142 case scm_tc7_wvect:
1143 if (SCM_GC8MARKP (scmptr))
1144 {
1145 goto c8mrkcontinue;
1146 }
1147 else
1148 {
1149 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1150 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1151 break;
1152 }
1153
1154 case scm_tc7_vector:
1155 case scm_tc7_lvector:
1156 #ifdef CCLO
1157 case scm_tc7_cclo:
1158 #endif
1159 if (SCM_GC8MARKP (scmptr))
1160 goto c8mrkcontinue;
1161
1162 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1163 freechars:
1164 scm_must_free (SCM_CHARS (scmptr));
1165 /* SCM_SETCHARS(scmptr, 0);*/
1166 break;
1167 case scm_tc7_bvect:
1168 if SCM_GC8MARKP (scmptr)
1169 goto c8mrkcontinue;
1170 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1171 goto freechars;
1172 case scm_tc7_byvect:
1173 if SCM_GC8MARKP (scmptr)
1174 goto c8mrkcontinue;
1175 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1176 goto freechars;
1177 case scm_tc7_ivect:
1178 case scm_tc7_uvect:
1179 if SCM_GC8MARKP (scmptr)
1180 goto c8mrkcontinue;
1181 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1182 goto freechars;
1183 case scm_tc7_svect:
1184 if SCM_GC8MARKP (scmptr)
1185 goto c8mrkcontinue;
1186 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1187 goto freechars;
1188 #ifdef LONGLONGS
1189 case scm_tc7_llvect:
1190 if SCM_GC8MARKP (scmptr)
1191 goto c8mrkcontinue;
1192 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1193 goto freechars;
1194 #endif
1195 case scm_tc7_fvect:
1196 if SCM_GC8MARKP (scmptr)
1197 goto c8mrkcontinue;
1198 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1199 goto freechars;
1200 case scm_tc7_dvect:
1201 if SCM_GC8MARKP (scmptr)
1202 goto c8mrkcontinue;
1203 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1204 goto freechars;
1205 case scm_tc7_cvect:
1206 if SCM_GC8MARKP (scmptr)
1207 goto c8mrkcontinue;
1208 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1209 goto freechars;
1210 case scm_tc7_substring:
1211 if (SCM_GC8MARKP (scmptr))
1212 goto c8mrkcontinue;
1213 break;
1214 case scm_tc7_string:
1215 if (SCM_GC8MARKP (scmptr))
1216 goto c8mrkcontinue;
1217 m += SCM_HUGE_LENGTH (scmptr) + 1;
1218 goto freechars;
1219 case scm_tc7_msymbol:
1220 if (SCM_GC8MARKP (scmptr))
1221 goto c8mrkcontinue;
1222 m += ( SCM_LENGTH (scmptr)
1223 + 1
1224 + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
1225 scm_must_free ((char *)SCM_SLOTS (scmptr));
1226 break;
1227 case scm_tc7_contin:
1228 if SCM_GC8MARKP (scmptr)
1229 goto c8mrkcontinue;
1230 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1231 if (SCM_VELTS (scmptr))
1232 goto freechars;
1233 case scm_tc7_ssymbol:
1234 if SCM_GC8MARKP(scmptr)
1235 goto c8mrkcontinue;
1236 break;
1237 case scm_tcs_subrs:
1238 continue;
1239 case scm_tc7_port:
1240 if SCM_GC8MARKP (scmptr)
1241 goto c8mrkcontinue;
1242 if SCM_OPENP (scmptr)
1243 {
1244 int k = SCM_PTOBNUM (scmptr);
1245 if (!(k < scm_numptob))
1246 goto sweeperr;
1247 /* Keep "revealed" ports alive. */
1248 if (scm_revealed_count(scmptr) > 0)
1249 continue;
1250 /* Yes, I really do mean scm_ptobs[k].free */
1251 /* rather than ftobs[k].close. .close */
1252 /* is for explicit CLOSE-PORT by user */
1253 (scm_ptobs[k].free) (scmptr);
1254 SCM_SETSTREAM (scmptr, 0);
1255 scm_remove_from_port_table (scmptr);
1256 scm_gc_ports_collected++;
1257 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1258 }
1259 break;
1260 case scm_tc7_smob:
1261 switch SCM_GCTYP16 (scmptr)
1262 {
1263 case scm_tc_free_cell:
1264 if SCM_GC8MARKP (scmptr)
1265 goto c8mrkcontinue;
1266 break;
1267 #ifdef SCM_BIGDIG
1268 case scm_tcs_bignums:
1269 if SCM_GC8MARKP (scmptr)
1270 goto c8mrkcontinue;
1271 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1272 goto freechars;
1273 #endif /* def SCM_BIGDIG */
1274 case scm_tc16_flo:
1275 if SCM_GC8MARKP (scmptr)
1276 goto c8mrkcontinue;
1277 switch ((int) (SCM_CAR (scmptr) >> 16))
1278 {
1279 case (SCM_IMAG_PART | SCM_REAL_PART) >> 16:
1280 m += sizeof (double);
1281 case SCM_REAL_PART >> 16:
1282 case SCM_IMAG_PART >> 16:
1283 m += sizeof (double);
1284 goto freechars;
1285 case 0:
1286 break;
1287 default:
1288 goto sweeperr;
1289 }
1290 break;
1291 default:
1292 if SCM_GC8MARKP (scmptr)
1293 goto c8mrkcontinue;
1294
1295 {
1296 int k;
1297 k = SCM_SMOBNUM (scmptr);
1298 if (!(k < scm_numsmob))
1299 goto sweeperr;
1300 m += (scm_smobs[k].free) ((SCM) scmptr);
1301 break;
1302 }
1303 }
1304 break;
1305 default:
1306 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1307 }
1308 n += span;
1309 #if 0
1310 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1311 exit (2);
1312 #endif
1313 /* Stick the new cell on the front of nfreelist. It's
1314 critical that we mark this cell as freed; otherwise, the
1315 conservative collector might trace it as some other type
1316 of object. */
1317 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
1318 SCM_SETCDR (scmptr, nfreelist);
1319 nfreelist = scmptr;
1320
1321 continue;
1322 c8mrkcontinue:
1323 SCM_CLRGC8MARK (scmptr);
1324 continue;
1325 cmrkcontinue:
1326 SCM_CLRGCMARK (scmptr);
1327 }
1328 #ifdef GC_FREE_SEGMENTS
1329 if (n == seg_size)
1330 {
1331 register long j;
1332
1333 scm_heap_size -= seg_size;
1334 free ((char *) scm_heap_table[i].bounds[0]);
1335 scm_heap_table[i].bounds[0] = 0;
1336 for (j = i + 1; j < scm_n_heap_segs; j++)
1337 scm_heap_table[j - 1] = scm_heap_table[j];
1338 scm_n_heap_segs -= 1;
1339 i--; /* We need to scan the segment just moved. */
1340 }
1341 else
1342 #endif /* ifdef GC_FREE_SEGMENTS */
1343 /* Update the real freelist pointer to point to the head of
1344 the list of free cells we've built for this segment. */
1345 *hp_freelist = nfreelist;
1346
1347 #ifdef DEBUG_FREELIST
1348 scm_check_freelist ();
1349 scm_map_free_list ();
1350 #endif
1351
1352 scm_gc_cells_collected += n;
1353 }
1354 /* Scan weak vectors. */
1355 {
1356 SCM *ptr, w;
1357 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1358 {
1359 if (!SCM_IS_WHVEC_ANY (w))
1360 {
1361 register long j, n;
1362
1363 ptr = SCM_VELTS (w);
1364 n = SCM_LENGTH (w);
1365 for (j = 0; j < n; ++j)
1366 if (SCM_NIMP (ptr[j]) && SCM_FREEP (ptr[j]))
1367 ptr[j] = SCM_BOOL_F;
1368 }
1369 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1370 {
1371 SCM obj = w;
1372 register long n = SCM_LENGTH (w);
1373 register long j;
1374
1375 ptr = SCM_VELTS (w);
1376
1377 for (j = 0; j < n; ++j)
1378 {
1379 SCM * fixup;
1380 SCM alist;
1381 int weak_keys;
1382 int weak_values;
1383
1384 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1385 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1386
1387 fixup = ptr + j;
1388 alist = *fixup;
1389
1390 while (SCM_NIMP (alist)
1391 && SCM_CONSP (alist)
1392 && SCM_NIMP (SCM_CAR (alist))
1393 && SCM_CONSP (SCM_CAR (alist)))
1394 {
1395 SCM key;
1396 SCM value;
1397
1398 key = SCM_CAAR (alist);
1399 value = SCM_CDAR (alist);
1400 if ( (weak_keys && SCM_NIMP (key) && SCM_FREEP (key))
1401 || (weak_values && SCM_NIMP (value) && SCM_FREEP (value)))
1402 {
1403 *fixup = SCM_CDR (alist);
1404 }
1405 else
1406 fixup = SCM_CDRLOC (alist);
1407 alist = SCM_CDR (alist);
1408 }
1409 }
1410 }
1411 }
1412 }
1413 scm_cells_allocated = (scm_heap_size - scm_gc_cells_collected);
1414 scm_mallocated -= m;
1415 scm_gc_malloc_collected = m;
1416 }
1417
1418
1419 \f
1420
1421 /* {Front end to malloc}
1422 *
1423 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1424 *
1425 * These functions provide services comperable to malloc, realloc, and
1426 * free. They are for allocating malloced parts of scheme objects.
1427 * The primary purpose of the front end is to impose calls to gc.
1428 */
1429
1430 /* scm_must_malloc
1431 * Return newly malloced storage or throw an error.
1432 *
1433 * The parameter WHAT is a string for error reporting.
1434 * If the threshold scm_mtrigger will be passed by this
1435 * allocation, or if the first call to malloc fails,
1436 * garbage collect -- on the presumption that some objects
1437 * using malloced storage may be collected.
1438 *
1439 * The limit scm_mtrigger may be raised by this allocation.
1440 */
1441 char *
1442 scm_must_malloc (len, what)
1443 scm_sizet len;
1444 const char *what;
1445 {
1446 char *ptr;
1447 scm_sizet size = len;
1448 unsigned long nm = scm_mallocated + size;
1449 if (len != size)
1450 malerr:
1451 scm_wta (SCM_MAKINUM (len), (char *) SCM_NALLOC, what);
1452 if ((nm <= scm_mtrigger))
1453 {
1454 SCM_SYSCALL (ptr = (char *) malloc (size));
1455 if (NULL != ptr)
1456 {
1457 scm_mallocated = nm;
1458 return ptr;
1459 }
1460 }
1461
1462 scm_igc (what);
1463 nm = scm_mallocated + size;
1464 SCM_SYSCALL (ptr = (char *) malloc (size));
1465 if (NULL != ptr)
1466 {
1467 scm_mallocated = nm;
1468 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1469 if (nm > scm_mtrigger)
1470 scm_mtrigger = nm + nm / 2;
1471 else
1472 scm_mtrigger += scm_mtrigger / 2;
1473 }
1474 return ptr;
1475 }
1476 goto malerr;
1477 }
1478
1479
1480 /* scm_must_realloc
1481 * is similar to scm_must_malloc.
1482 */
1483 char *
1484 scm_must_realloc (char *where,
1485 scm_sizet olen,
1486 scm_sizet len,
1487 const char *what)
1488 {
1489 char *ptr;
1490 scm_sizet size = len;
1491 scm_sizet nm = scm_mallocated + size - olen;
1492 if (len != size)
1493 ralerr:
1494 scm_wta (SCM_MAKINUM (len), (char *) SCM_NALLOC, what);
1495 if ((nm <= scm_mtrigger))
1496 {
1497 SCM_SYSCALL (ptr = (char *) realloc (where, size));
1498 if (NULL != ptr)
1499 {
1500 scm_mallocated = nm;
1501 return ptr;
1502 }
1503 }
1504 scm_igc (what);
1505 nm = scm_mallocated + size - olen;
1506 SCM_SYSCALL (ptr = (char *) realloc (where, size));
1507 if (NULL != ptr)
1508 {
1509 scm_mallocated = nm;
1510 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1511 if (nm > scm_mtrigger)
1512 scm_mtrigger = nm + nm / 2;
1513 else
1514 scm_mtrigger += scm_mtrigger / 2;
1515 }
1516 return ptr;
1517 }
1518 goto ralerr;
1519 }
1520
1521 void
1522 scm_must_free (obj)
1523 char *obj;
1524 {
1525 if (obj)
1526 free (obj);
1527 else
1528 scm_wta (SCM_INUM0, "already free", "");
1529 }
1530
1531 /* Announce that there has been some malloc done that will be freed
1532 * during gc. A typical use is for a smob that uses some malloced
1533 * memory but can not get it from scm_must_malloc (for whatever
1534 * reason). When a new object of this smob is created you call
1535 * scm_done_malloc with the size of the object. When your smob free
1536 * function is called, be sure to include this size in the return
1537 * value. */
1538
1539 void
1540 scm_done_malloc (size)
1541 long size;
1542 {
1543 scm_mallocated += size;
1544
1545 if (scm_mallocated > scm_mtrigger)
1546 {
1547 scm_igc ("foreign mallocs");
1548 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1549 {
1550 if (scm_mallocated > scm_mtrigger)
1551 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1552 else
1553 scm_mtrigger += scm_mtrigger / 2;
1554 }
1555 }
1556 }
1557
1558
1559 \f
1560
1561 /* {Heap Segments}
1562 *
1563 * Each heap segment is an array of objects of a particular size.
1564 * Every segment has an associated (possibly shared) freelist.
1565 * A table of segment records is kept that records the upper and
1566 * lower extents of the segment; this is used during the conservative
1567 * phase of gc to identify probably gc roots (because they point
1568 * into valid segments at reasonable offsets). */
1569
1570 /* scm_expmem
1571 * is true if the first segment was smaller than INIT_HEAP_SEG.
1572 * If scm_expmem is set to one, subsequent segment allocations will
1573 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1574 */
1575 int scm_expmem = 0;
1576
1577 /* scm_heap_org
1578 * is the lowest base address of any heap segment.
1579 */
1580 SCM_CELLPTR scm_heap_org;
1581
1582 struct scm_heap_seg_data * scm_heap_table = 0;
1583 int scm_n_heap_segs = 0;
1584
1585 /* scm_heap_size
1586 * is the total number of cells in heap segments.
1587 */
1588 unsigned long scm_heap_size = 0;
1589
1590 /* init_heap_seg
1591 * initializes a new heap segment and return the number of objects it contains.
1592 *
1593 * The segment origin, segment size in bytes, and the span of objects
1594 * in cells are input parameters. The freelist is both input and output.
1595 *
1596 * This function presume that the scm_heap_table has already been expanded
1597 * to accomodate a new segment record.
1598 */
1599
1600
1601 static scm_sizet
1602 init_heap_seg (seg_org, size, ncells, freelistp)
1603 SCM_CELLPTR seg_org;
1604 scm_sizet size;
1605 int ncells;
1606 SCM *freelistp;
1607 {
1608 register SCM_CELLPTR ptr;
1609 #ifdef SCM_POINTERS_MUNGED
1610 register SCM scmptr;
1611 #else
1612 #undef scmptr
1613 #define scmptr ptr
1614 #endif
1615 SCM_CELLPTR seg_end;
1616 int new_seg_index;
1617 int n_new_objects;
1618
1619 if (seg_org == NULL)
1620 return 0;
1621
1622 ptr = seg_org;
1623
1624 /* Compute the ceiling on valid object pointers w/in this segment.
1625 */
1626 seg_end = CELL_DN ((char *) ptr + size);
1627
1628 /* Find the right place and insert the segment record.
1629 *
1630 */
1631 for (new_seg_index = 0;
1632 ( (new_seg_index < scm_n_heap_segs)
1633 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1634 new_seg_index++)
1635 ;
1636
1637 {
1638 int i;
1639 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1640 scm_heap_table[i] = scm_heap_table[i - 1];
1641 }
1642
1643 ++scm_n_heap_segs;
1644
1645 scm_heap_table[new_seg_index].valid = 0;
1646 scm_heap_table[new_seg_index].ncells = ncells;
1647 scm_heap_table[new_seg_index].freelistp = freelistp;
1648 scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
1649 scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
1650
1651
1652 /* Compute the least valid object pointer w/in this segment
1653 */
1654 ptr = CELL_UP (ptr);
1655
1656
1657 n_new_objects = seg_end - ptr;
1658
1659 /* Prepend objects in this segment to the freelist.
1660 */
1661 while (ptr < seg_end)
1662 {
1663 #ifdef SCM_POINTERS_MUNGED
1664 scmptr = PTR2SCM (ptr);
1665 #endif
1666 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
1667 SCM_SETCDR (scmptr, PTR2SCM (ptr + ncells));
1668 ptr += ncells;
1669 }
1670
1671 ptr -= ncells;
1672
1673 /* Patch up the last freelist pointer in the segment
1674 * to join it to the input freelist.
1675 */
1676 SCM_SETCDR (PTR2SCM (ptr), *freelistp);
1677 *freelistp = PTR2SCM (CELL_UP (seg_org));
1678
1679 scm_heap_size += (ncells * n_new_objects);
1680 return size;
1681 #ifdef scmptr
1682 #undef scmptr
1683 #endif
1684 }
1685
1686
1687 static void
1688 alloc_some_heap (ncells, freelistp)
1689 int ncells;
1690 SCM * freelistp;
1691 {
1692 struct scm_heap_seg_data * tmptable;
1693 SCM_CELLPTR ptr;
1694 scm_sizet len;
1695
1696 /* Critical code sections (such as the garbage collector)
1697 * aren't supposed to add heap segments.
1698 */
1699 if (scm_gc_heap_lock)
1700 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
1701
1702 /* Expand the heap tables to have room for the new segment.
1703 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1704 * only if the allocation of the segment itself succeeds.
1705 */
1706 len = (1 + scm_n_heap_segs) * sizeof (struct scm_heap_seg_data);
1707
1708 SCM_SYSCALL (tmptable = ((struct scm_heap_seg_data *)
1709 realloc ((char *)scm_heap_table, len)));
1710 if (!tmptable)
1711 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
1712 else
1713 scm_heap_table = tmptable;
1714
1715
1716 /* Pick a size for the new heap segment.
1717 * The rule for picking the size of a segment is explained in
1718 * gc.h
1719 */
1720 if (scm_expmem)
1721 {
1722 len = (scm_sizet) (SCM_EXPHEAP (scm_heap_size) * sizeof (scm_cell));
1723 if ((scm_sizet) (SCM_EXPHEAP (scm_heap_size) * sizeof (scm_cell)) != len)
1724 len = 0;
1725 }
1726 else
1727 len = SCM_HEAP_SEG_SIZE;
1728
1729 {
1730 scm_sizet smallest;
1731
1732 smallest = (ncells * sizeof (scm_cell));
1733 if (len < smallest)
1734 len = (ncells * sizeof (scm_cell));
1735
1736 /* Allocate with decaying ambition. */
1737 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
1738 && (len >= smallest))
1739 {
1740 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (len));
1741 if (ptr)
1742 {
1743 init_heap_seg (ptr, len, ncells, freelistp);
1744 return;
1745 }
1746 len /= 2;
1747 }
1748 }
1749
1750 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
1751 }
1752
1753
1754
1755 SCM_PROC (s_unhash_name, "unhash-name", 1, 0, 0, scm_unhash_name);
1756 SCM
1757 scm_unhash_name (name)
1758 SCM name;
1759 {
1760 int x;
1761 int bound;
1762 SCM_ASSERT (SCM_NIMP (name) && SCM_SYMBOLP (name), name, SCM_ARG1, s_unhash_name);
1763 SCM_DEFER_INTS;
1764 bound = scm_n_heap_segs;
1765 for (x = 0; x < bound; ++x)
1766 {
1767 SCM_CELLPTR p;
1768 SCM_CELLPTR pbound;
1769 p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
1770 pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
1771 while (p < pbound)
1772 {
1773 SCM incar;
1774 incar = p->car;
1775 if (1 == (7 & (int)incar))
1776 {
1777 --incar;
1778 if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
1779 && (SCM_CDR (incar) != 0)
1780 && (SCM_CDR (incar) != 1))
1781 {
1782 p->car = name;
1783 }
1784 }
1785 ++p;
1786 }
1787 }
1788 SCM_ALLOW_INTS;
1789 return name;
1790 }
1791
1792
1793 \f
1794 /* {GC Protection Helper Functions}
1795 */
1796
1797
1798 void
1799 scm_remember (ptr)
1800 SCM * ptr;
1801 {}
1802
1803
1804 SCM
1805 scm_return_first (SCM elt, ...)
1806 {
1807 return elt;
1808 }
1809
1810
1811 SCM
1812 scm_permanent_object (obj)
1813 SCM obj;
1814 {
1815 SCM_REDEFER_INTS;
1816 scm_permobjs = scm_cons (obj, scm_permobjs);
1817 SCM_REALLOW_INTS;
1818 return obj;
1819 }
1820
1821
1822 /* Protect OBJ from the garbage collector. OBJ will not be freed,
1823 even if all other references are dropped, until someone applies
1824 scm_unprotect_object to it. This function returns OBJ.
1825
1826 Calls to scm_protect_object nest. For every object O, there is a
1827 counter which scm_protect_object(O) increments and
1828 scm_unprotect_object(O) decrements, if it is greater than zero. If
1829 an object's counter is greater than zero, the garbage collector
1830 will not free it.
1831
1832 Of course, that's not how it's implemented. scm_protect_object and
1833 scm_unprotect_object just maintain a list of references to things.
1834 Since the GC knows about this list, all objects it mentions stay
1835 alive. scm_protect_object adds its argument to the list;
1836 scm_unprotect_object removes the first occurrence of its argument
1837 to the list. */
1838 SCM
1839 scm_protect_object (obj)
1840 SCM obj;
1841 {
1842 scm_protects = scm_cons (obj, scm_protects);
1843
1844 return obj;
1845 }
1846
1847
1848 /* Remove any protection for OBJ established by a prior call to
1849 scm_protect_object. This function returns OBJ.
1850
1851 See scm_protect_object for more information. */
1852 SCM
1853 scm_unprotect_object (obj)
1854 SCM obj;
1855 {
1856 SCM *tail_ptr = &scm_protects;
1857
1858 while (SCM_NIMP (*tail_ptr) && SCM_CONSP (*tail_ptr))
1859 if (SCM_CAR (*tail_ptr) == obj)
1860 {
1861 *tail_ptr = SCM_CDR (*tail_ptr);
1862 break;
1863 }
1864 else
1865 tail_ptr = SCM_CDRLOC (*tail_ptr);
1866
1867 return obj;
1868 }
1869
1870 int terminating;
1871
1872 /* called on process termination. */
1873 static void cleanup (void)
1874 {
1875 terminating = 1;
1876 scm_flush_all_ports ();
1877 }
1878
1879 \f
1880 int
1881 scm_init_storage (scm_sizet init_heap_size)
1882 {
1883 scm_sizet j;
1884
1885 j = SCM_NUM_PROTECTS;
1886 while (j)
1887 scm_sys_protects[--j] = SCM_BOOL_F;
1888 scm_block_gc = 1;
1889 scm_freelist = SCM_EOL;
1890 scm_expmem = 0;
1891
1892 j = SCM_HEAP_SEG_SIZE;
1893 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
1894 scm_heap_table = ((struct scm_heap_seg_data *)
1895 scm_must_malloc (sizeof (struct scm_heap_seg_data), "hplims"));
1896 if (0L == init_heap_size)
1897 init_heap_size = SCM_INIT_HEAP_SIZE;
1898 j = init_heap_size;
1899 if ((init_heap_size != j)
1900 || !init_heap_seg ((SCM_CELLPTR) malloc (j), j, 1, &scm_freelist))
1901 {
1902 j = SCM_HEAP_SEG_SIZE;
1903 if (!init_heap_seg ((SCM_CELLPTR) malloc (j), j, 1, &scm_freelist))
1904 return 1;
1905 }
1906 else
1907 scm_expmem = 1;
1908 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0]);
1909 /* scm_hplims[0] can change. do not remove scm_heap_org */
1910 scm_weak_vectors = SCM_EOL;
1911
1912 /* Initialise the list of ports. */
1913 scm_port_table = (scm_port **)
1914 malloc (sizeof (scm_port *) * scm_port_table_room);
1915 if (!scm_port_table)
1916 return 1;
1917
1918 atexit (cleanup);
1919
1920 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
1921 SCM_SETCDR (scm_undefineds, scm_undefineds);
1922
1923 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
1924 scm_nullstr = scm_makstr (0L, 0);
1925 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
1926 scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
1927 scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim));
1928 scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
1929 scm_stand_in_procs = SCM_EOL;
1930 scm_permobjs = SCM_EOL;
1931 scm_protects = SCM_EOL;
1932 scm_asyncs = SCM_EOL;
1933 scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
1934 scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
1935 #ifdef SCM_BIGDIG
1936 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
1937 #endif
1938 return 0;
1939 }
1940 \f
1941
1942 void
1943 scm_init_gc ()
1944 {
1945 #include "gc.x"
1946 }