*** empty log message ***
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41 \f
42 #include <stdio.h>
43 #include "_scm.h"
44 #include "stime.h"
45 #include "stackchk.h"
46 #include "struct.h"
47 #include "genio.h"
48 #include "weaks.h"
49 #include "guardians.h"
50 #include "smob.h"
51 #include "unif.h"
52 #include "async.h"
53
54 #include "gc.h"
55
56 #ifdef HAVE_MALLOC_H
57 #include <malloc.h>
58 #endif
59
60 #ifdef HAVE_UNISTD_H
61 #include <unistd.h>
62 #endif
63
64 #ifdef __STDC__
65 #include <stdarg.h>
66 #define var_start(x, y) va_start(x, y)
67 #else
68 #include <varargs.h>
69 #define var_start(x, y) va_start(x)
70 #endif
71
72 \f
73 /* {heap tuning parameters}
74 *
75 * These are parameters for controlling memory allocation. The heap
76 * is the area out of which scm_cons, and object headers are allocated.
77 *
78 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
79 * 64 bit machine. The units of the _SIZE parameters are bytes.
80 * Cons pairs and object headers occupy one heap cell.
81 *
82 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
83 * allocated initially the heap will grow by half its current size
84 * each subsequent time more heap is needed.
85 *
86 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
87 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
88 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
89 * is in scm_init_storage() and alloc_some_heap() in sys.c
90 *
91 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
92 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
93 *
94 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
95 * is needed.
96 *
97 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
98 * trigger a GC.
99 *
100 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
101 * reclaimed by a GC triggered by must_malloc. If less than this is
102 * reclaimed, the trigger threshold is raised. [I don't know what a
103 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
104 * work around a oscillation that caused almost constant GC.]
105 */
106
107 #define SCM_INIT_HEAP_SIZE (32768L*sizeof(scm_cell))
108 #define SCM_MIN_HEAP_SEG_SIZE (2048L*sizeof(scm_cell))
109 #ifdef _QC
110 # define SCM_HEAP_SEG_SIZE 32768L
111 #else
112 # ifdef sequent
113 # define SCM_HEAP_SEG_SIZE (7000L*sizeof(scm_cell))
114 # else
115 # define SCM_HEAP_SEG_SIZE (16384L*sizeof(scm_cell))
116 # endif
117 #endif
118 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size*2)
119 #define SCM_INIT_MALLOC_LIMIT 100000
120 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
121
122 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
123 bounds for allocated storage */
124
125 #ifdef PROT386
126 /*in 386 protected mode we must only adjust the offset */
127 # define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
128 # define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
129 #else
130 # ifdef _UNICOS
131 # define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
132 # define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
133 # else
134 # define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
135 # define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
136 # endif /* UNICOS */
137 #endif /* PROT386 */
138
139
140 \f
141 /* scm_freelist
142 * is the head of freelist of cons pairs.
143 */
144 SCM scm_freelist = SCM_EOL;
145
146 /* scm_mtrigger
147 * is the number of bytes of must_malloc allocation needed to trigger gc.
148 */
149 unsigned long scm_mtrigger;
150
151
152 /* scm_gc_heap_lock
153 * If set, don't expand the heap. Set only during gc, during which no allocation
154 * is supposed to take place anyway.
155 */
156 int scm_gc_heap_lock = 0;
157
158 /* GC Blocking
159 * Don't pause for collection if this is set -- just
160 * expand the heap.
161 */
162
163 int scm_block_gc = 1;
164
165 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
166 * collection (GC) more space is allocated for the heap.
167 */
168 #define MIN_GC_YIELD (scm_heap_size/4)
169
170 /* During collection, this accumulates objects holding
171 * weak references.
172 */
173 SCM *scm_weak_vectors;
174 int scm_weak_size;
175 int scm_n_weak;
176
177 /* GC Statistics Keeping
178 */
179 unsigned long scm_cells_allocated = 0;
180 unsigned long scm_mallocated = 0;
181 unsigned long scm_gc_cells_collected;
182 unsigned long scm_gc_malloc_collected;
183 unsigned long scm_gc_ports_collected;
184 unsigned long scm_gc_rt;
185 unsigned long scm_gc_time_taken = 0;
186
187 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
188 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
189 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
190 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
191 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
192 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
193
194
195 struct scm_heap_seg_data
196 {
197 /* lower and upper bounds of the segment */
198 SCM_CELLPTR bounds[2];
199
200 /* address of the head-of-freelist pointer for this segment's cells.
201 All segments usually point to the same one, scm_freelist. */
202 SCM *freelistp;
203
204 /* number of SCM words per object in this segment */
205 int ncells;
206
207 /* If SEG_DATA->valid is non-zero, the conservative marking
208 functions will apply SEG_DATA->valid to the purported pointer and
209 SEG_DATA, and mark the object iff the function returns non-zero.
210 At the moment, I don't think anyone uses this. */
211 int (*valid) ();
212 };
213
214
215
216
217 static void scm_mark_weak_vector_spines SCM_P ((void));
218 static scm_sizet init_heap_seg SCM_P ((SCM_CELLPTR, scm_sizet, int, SCM *));
219 static void alloc_some_heap SCM_P ((int, SCM *));
220
221
222 \f
223 /* Debugging functions. */
224
225 #ifdef DEBUG_FREELIST
226
227 /* Return the number of the heap segment containing CELL. */
228 static int
229 which_seg (SCM cell)
230 {
231 int i;
232
233 for (i = 0; i < scm_n_heap_segs; i++)
234 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
235 && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
236 return i;
237 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
238 cell);
239 abort ();
240 }
241
242
243 SCM_PROC (s_map_free_list, "map-free-list", 0, 0, 0, scm_map_free_list);
244 SCM
245 scm_map_free_list ()
246 {
247 int last_seg = -1, count = 0;
248 SCM f;
249
250 fprintf (stderr, "%d segments total\n", scm_n_heap_segs);
251 for (f = scm_freelist; SCM_NIMP (f); f = SCM_CDR (f))
252 {
253 int this_seg = which_seg (f);
254
255 if (this_seg != last_seg)
256 {
257 if (last_seg != -1)
258 fprintf (stderr, " %5d cells in segment %d\n", count, last_seg);
259 last_seg = this_seg;
260 count = 0;
261 }
262 count++;
263 }
264 if (last_seg != -1)
265 fprintf (stderr, " %5d cells in segment %d\n", count, last_seg);
266
267 fflush (stderr);
268
269 return SCM_UNSPECIFIED;
270 }
271
272
273 /* Number of calls to SCM_NEWCELL since startup. */
274 static unsigned long scm_newcell_count;
275
276 /* Search freelist for anything that isn't marked as a free cell.
277 Abort if we find something. */
278 static void
279 scm_check_freelist ()
280 {
281 SCM f;
282 int i = 0;
283
284 for (f = scm_freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
285 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
286 {
287 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
288 scm_newcell_count, i);
289 fflush (stderr);
290 abort ();
291 }
292 }
293
294 static int scm_debug_check_freelist = 0;
295 SCM
296 scm_debug_newcell (void)
297 {
298 SCM new;
299
300 scm_newcell_count++;
301 if (scm_debug_check_freelist)
302 scm_check_freelist ();
303
304 /* The rest of this is supposed to be identical to the SCM_NEWCELL
305 macro. */
306 if (SCM_IMP (scm_freelist))
307 new = scm_gc_for_newcell ();
308 else
309 {
310 new = scm_freelist;
311 scm_freelist = SCM_CDR (scm_freelist);
312 ++scm_cells_allocated;
313 }
314
315 return new;
316 }
317
318 #endif /* DEBUG_FREELIST */
319
320 \f
321
322 /* {Scheme Interface to GC}
323 */
324
325 SCM_PROC (s_gc_stats, "gc-stats", 0, 0, 0, scm_gc_stats);
326 SCM
327 scm_gc_stats ()
328 {
329 int i;
330 int n;
331 SCM heap_segs;
332 SCM local_scm_mtrigger;
333 SCM local_scm_mallocated;
334 SCM local_scm_heap_size;
335 SCM local_scm_cells_allocated;
336 SCM local_scm_gc_time_taken;
337 SCM answer;
338
339 SCM_DEFER_INTS;
340 scm_block_gc = 1;
341 retry:
342 heap_segs = SCM_EOL;
343 n = scm_n_heap_segs;
344 for (i = scm_n_heap_segs; i--; )
345 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
346 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
347 heap_segs);
348 if (scm_n_heap_segs != n)
349 goto retry;
350 scm_block_gc = 0;
351
352 local_scm_mtrigger = scm_mtrigger;
353 local_scm_mallocated = scm_mallocated;
354 local_scm_heap_size = scm_heap_size;
355 local_scm_cells_allocated = scm_cells_allocated;
356 local_scm_gc_time_taken = scm_gc_time_taken;
357
358 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
359 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
360 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
361 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
362 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
363 scm_cons (sym_heap_segments, heap_segs),
364 SCM_UNDEFINED);
365 SCM_ALLOW_INTS;
366 return answer;
367 }
368
369
370 void
371 scm_gc_start (what)
372 char *what;
373 {
374 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
375 scm_gc_cells_collected = 0;
376 scm_gc_malloc_collected = 0;
377 scm_gc_ports_collected = 0;
378 }
379
380 void
381 scm_gc_end ()
382 {
383 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
384 scm_gc_time_taken = scm_gc_time_taken + scm_gc_rt;
385 scm_system_async_mark (scm_gc_async);
386 }
387
388
389 SCM_PROC(s_object_address, "object-address", 1, 0, 0, scm_object_addr);
390 SCM
391 scm_object_addr (obj)
392 SCM obj;
393 {
394 return scm_ulong2num ((unsigned long)obj);
395 }
396
397
398 SCM_PROC(s_gc, "gc", 0, 0, 0, scm_gc);
399 SCM
400 scm_gc ()
401 {
402 SCM_DEFER_INTS;
403 scm_igc ("call");
404 SCM_ALLOW_INTS;
405 return SCM_UNSPECIFIED;
406 }
407
408
409 \f
410 /* {C Interface For When GC is Triggered}
411 */
412
413 void
414 scm_gc_for_alloc (ncells, freelistp)
415 int ncells;
416 SCM * freelistp;
417 {
418 SCM_REDEFER_INTS;
419 scm_igc ("cells");
420 if ((scm_gc_cells_collected < MIN_GC_YIELD) || SCM_IMP (*freelistp))
421 {
422 alloc_some_heap (ncells, freelistp);
423 }
424 SCM_REALLOW_INTS;
425 }
426
427
428 SCM
429 scm_gc_for_newcell ()
430 {
431 SCM fl;
432 scm_gc_for_alloc (1, &scm_freelist);
433 fl = scm_freelist;
434 scm_freelist = SCM_CDR (fl);
435 return fl;
436 }
437
438 void
439 scm_igc (what)
440 char *what;
441 {
442 int j;
443
444 #ifdef USE_THREADS
445 /* During the critical section, only the current thread may run. */
446 SCM_THREAD_CRITICAL_SECTION_START;
447 #endif
448
449 /* fprintf (stderr, "gc: %s\n", what); */
450
451 scm_gc_start (what);
452 if (!scm_stack_base || scm_block_gc)
453 {
454 scm_gc_end ();
455 return;
456 }
457
458 ++scm_gc_heap_lock;
459 scm_n_weak = 0;
460
461 scm_guardian_gc_init ();
462
463 /* unprotect any struct types with no instances */
464 #if 0
465 {
466 SCM type_list;
467 SCM * pos;
468
469 pos = &scm_type_obj_list;
470 type_list = scm_type_obj_list;
471 while (type_list != SCM_EOL)
472 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
473 {
474 pos = SCM_CDRLOC (type_list);
475 type_list = SCM_CDR (type_list);
476 }
477 else
478 {
479 *pos = SCM_CDR (type_list);
480 type_list = SCM_CDR (type_list);
481 }
482 }
483 #endif
484
485 /* flush dead entries from the continuation stack */
486 {
487 int x;
488 int bound;
489 SCM * elts;
490 elts = SCM_VELTS (scm_continuation_stack);
491 bound = SCM_LENGTH (scm_continuation_stack);
492 x = SCM_INUM (scm_continuation_stack_ptr);
493 while (x < bound)
494 {
495 elts[x] = SCM_BOOL_F;
496 ++x;
497 }
498 }
499
500 #ifndef USE_THREADS
501
502 /* Protect from the C stack. This must be the first marking
503 * done because it provides information about what objects
504 * are "in-use" by the C code. "in-use" objects are those
505 * for which the values from SCM_LENGTH and SCM_CHARS must remain
506 * usable. This requirement is stricter than a liveness
507 * requirement -- in particular, it constrains the implementation
508 * of scm_vector_set_length_x.
509 */
510 SCM_FLUSH_REGISTER_WINDOWS;
511 /* This assumes that all registers are saved into the jmp_buf */
512 setjmp (scm_save_regs_gc_mark);
513 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
514 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
515 sizeof scm_save_regs_gc_mark)
516 / sizeof (SCM_STACKITEM)));
517
518 {
519 /* stack_len is long rather than scm_sizet in order to guarantee that
520 &stack_len is long aligned */
521 #ifdef SCM_STACK_GROWS_UP
522 #ifdef nosve
523 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
524 #else
525 long stack_len = scm_stack_size (scm_stack_base);
526 #endif
527 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
528 #else
529 #ifdef nosve
530 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
531 #else
532 long stack_len = scm_stack_size (scm_stack_base);
533 #endif
534 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
535 #endif
536 }
537
538 #else /* USE_THREADS */
539
540 /* Mark every thread's stack and registers */
541 scm_threads_mark_stacks();
542
543 #endif /* USE_THREADS */
544
545 /* FIXME: insert a phase to un-protect string-data preserved
546 * in scm_vector_set_length_x.
547 */
548
549 j = SCM_NUM_PROTECTS;
550 while (j--)
551 scm_gc_mark (scm_sys_protects[j]);
552
553 #ifndef USE_THREADS
554 scm_gc_mark (scm_root->handle);
555 #endif
556
557 scm_mark_weak_vector_spines ();
558
559 scm_guardian_zombify ();
560
561 scm_gc_sweep ();
562
563 --scm_gc_heap_lock;
564 scm_gc_end ();
565
566 #ifdef USE_THREADS
567 SCM_THREAD_CRITICAL_SECTION_END;
568 #endif
569 }
570
571 \f
572 /* {Mark/Sweep}
573 */
574
575
576
577 /* Mark an object precisely.
578 */
579 void
580 scm_gc_mark (p)
581 SCM p;
582 {
583 register long i;
584 register SCM ptr;
585
586 ptr = p;
587
588 gc_mark_loop:
589 if (SCM_IMP (ptr))
590 return;
591
592 gc_mark_nimp:
593 if (SCM_NCELLP (ptr))
594 scm_wta (ptr, "rogue pointer in heap", NULL);
595
596 switch (SCM_TYP7 (ptr))
597 {
598 case scm_tcs_cons_nimcar:
599 if (SCM_GCMARKP (ptr))
600 break;
601 SCM_SETGCMARK (ptr);
602 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
603 {
604 ptr = SCM_CAR (ptr);
605 goto gc_mark_nimp;
606 }
607 scm_gc_mark (SCM_CAR (ptr));
608 ptr = SCM_GCCDR (ptr);
609 goto gc_mark_nimp;
610 case scm_tcs_cons_imcar:
611 if (SCM_GCMARKP (ptr))
612 break;
613 SCM_SETGCMARK (ptr);
614 ptr = SCM_GCCDR (ptr);
615 goto gc_mark_loop;
616 case scm_tcs_cons_gloc:
617 if (SCM_GCMARKP (ptr))
618 break;
619 SCM_SETGCMARK (ptr);
620 {
621 SCM vcell;
622 vcell = SCM_CAR (ptr) - 1L;
623 switch (SCM_CDR (vcell))
624 {
625 default:
626 scm_gc_mark (vcell);
627 ptr = SCM_GCCDR (ptr);
628 goto gc_mark_loop;
629 case 1: /* ! */
630 case 0: /* ! */
631 {
632 SCM layout;
633 SCM * vtable_data;
634 int len;
635 char * fields_desc;
636 register SCM * mem;
637 register int x;
638
639 vtable_data = (SCM *)vcell;
640 layout = vtable_data[scm_vtable_index_layout];
641 len = SCM_LENGTH (layout);
642 fields_desc = SCM_CHARS (layout);
643 /* We're using SCM_GCCDR here like STRUCT_DATA, except
644 that it removes the mark */
645 mem = (SCM *)SCM_GCCDR (ptr);
646
647 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
648 {
649 scm_gc_mark (mem[scm_struct_i_proc + 0]);
650 scm_gc_mark (mem[scm_struct_i_proc + 1]);
651 scm_gc_mark (mem[scm_struct_i_proc + 2]);
652 scm_gc_mark (mem[scm_struct_i_proc + 3]);
653 }
654 if (len)
655 {
656 for (x = 0; x < len - 2; x += 2, ++mem)
657 if (fields_desc[x] == 'p')
658 scm_gc_mark (*mem);
659 if (fields_desc[x] == 'p')
660 {
661 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
662 for (x = *mem; x; --x)
663 scm_gc_mark (*++mem);
664 else
665 scm_gc_mark (*mem);
666 }
667 }
668 if (!SCM_CDR (vcell))
669 {
670 SCM_SETGCMARK (vcell);
671 ptr = vtable_data[scm_vtable_index_vtable];
672 goto gc_mark_loop;
673 }
674 }
675 }
676 }
677 break;
678 case scm_tcs_closures:
679 if (SCM_GCMARKP (ptr))
680 break;
681 SCM_SETGCMARK (ptr);
682 if (SCM_IMP (SCM_CDR (ptr)))
683 {
684 ptr = SCM_CLOSCAR (ptr);
685 goto gc_mark_nimp;
686 }
687 scm_gc_mark (SCM_CLOSCAR (ptr));
688 ptr = SCM_GCCDR (ptr);
689 goto gc_mark_nimp;
690 case scm_tc7_vector:
691 case scm_tc7_lvector:
692 #ifdef CCLO
693 case scm_tc7_cclo:
694 #endif
695 if (SCM_GC8MARKP (ptr))
696 break;
697 SCM_SETGC8MARK (ptr);
698 i = SCM_LENGTH (ptr);
699 if (i == 0)
700 break;
701 while (--i > 0)
702 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
703 scm_gc_mark (SCM_VELTS (ptr)[i]);
704 ptr = SCM_VELTS (ptr)[0];
705 goto gc_mark_loop;
706 case scm_tc7_contin:
707 if SCM_GC8MARKP
708 (ptr) break;
709 SCM_SETGC8MARK (ptr);
710 if (SCM_VELTS (ptr))
711 scm_mark_locations (SCM_VELTS (ptr),
712 (scm_sizet)
713 (SCM_LENGTH (ptr) +
714 (sizeof (SCM_STACKITEM) + -1 +
715 sizeof (scm_contregs)) /
716 sizeof (SCM_STACKITEM)));
717 break;
718 case scm_tc7_bvect:
719 case scm_tc7_byvect:
720 case scm_tc7_ivect:
721 case scm_tc7_uvect:
722 case scm_tc7_fvect:
723 case scm_tc7_dvect:
724 case scm_tc7_cvect:
725 case scm_tc7_svect:
726 #ifdef LONGLONGS
727 case scm_tc7_llvect:
728 #endif
729
730 case scm_tc7_string:
731 SCM_SETGC8MARK (ptr);
732 break;
733
734 case scm_tc7_substring:
735 if (SCM_GC8MARKP(ptr))
736 break;
737 SCM_SETGC8MARK (ptr);
738 ptr = SCM_CDR (ptr);
739 goto gc_mark_loop;
740
741 case scm_tc7_wvect:
742 if (SCM_GC8MARKP(ptr))
743 break;
744 scm_weak_vectors[scm_n_weak++] = ptr;
745 if (scm_n_weak >= scm_weak_size)
746 {
747 SCM_SYSCALL (scm_weak_vectors =
748 (SCM *) realloc ((char *) scm_weak_vectors,
749 sizeof (SCM) * (scm_weak_size *= 2)));
750 if (scm_weak_vectors == NULL)
751 {
752 scm_puts ("weak vector table", scm_cur_errp);
753 scm_puts ("\nFATAL ERROR DURING CRITICAL SCM_CODE SECTION\n",
754 scm_cur_errp);
755 exit(SCM_EXIT_FAILURE);
756 }
757 }
758 SCM_SETGC8MARK (ptr);
759 if (SCM_IS_WHVEC_ANY (ptr))
760 {
761 int x;
762 int len;
763 int weak_keys;
764 int weak_values;
765
766 len = SCM_LENGTH (ptr);
767 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
768 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
769
770 for (x = 0; x < len; ++x)
771 {
772 SCM alist;
773 alist = SCM_VELTS (ptr)[x];
774 /* mark everything on the alist
775 * except the keys or values, according to weak_values and weak_keys.
776 */
777 while ( SCM_NIMP (alist)
778 && SCM_CONSP (alist)
779 && !SCM_GCMARKP (alist)
780 && SCM_NIMP (SCM_CAR (alist))
781 && SCM_CONSP (SCM_CAR (alist)))
782 {
783 SCM kvpair;
784 SCM next_alist;
785
786 kvpair = SCM_CAR (alist);
787 next_alist = SCM_CDR (alist);
788 /*
789 * Do not do this:
790 * SCM_SETGCMARK (alist);
791 * SCM_SETGCMARK (kvpair);
792 *
793 * It may be that either the key or value is protected by
794 * an escaped reference to part of the spine of this alist.
795 * If we mark the spine here, and only mark one or neither of the
796 * key and value, they may never be properly marked.
797 * This leads to a horrible situation in which an alist containing
798 * freelist cells is exported.
799 *
800 * So only mark the spines of these arrays last of all marking.
801 * If somebody confuses us by constructing a weak vector
802 * with a circular alist then we are hosed, but at least we
803 * won't prematurely drop table entries.
804 */
805 if (!weak_keys)
806 scm_gc_mark (SCM_CAR (kvpair));
807 if (!weak_values)
808 scm_gc_mark (SCM_GCCDR (kvpair));
809 alist = next_alist;
810 }
811 if (SCM_NIMP (alist))
812 scm_gc_mark (alist);
813 }
814 }
815 break;
816
817 case scm_tc7_msymbol:
818 if (SCM_GC8MARKP(ptr))
819 break;
820 SCM_SETGC8MARK (ptr);
821 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
822 ptr = SCM_SYMBOL_PROPS (ptr);
823 goto gc_mark_loop;
824 case scm_tc7_ssymbol:
825 if (SCM_GC8MARKP(ptr))
826 break;
827 SCM_SETGC8MARK (ptr);
828 break;
829 case scm_tcs_subrs:
830 ptr = (SCM)(scm_heap_org + (((unsigned long)SCM_CAR (ptr)) >> 8));
831 goto gc_mark_loop;
832 case scm_tc7_port:
833 i = SCM_PTOBNUM (ptr);
834 if (!(i < scm_numptob))
835 goto def;
836 if (SCM_GC8MARKP (ptr))
837 break;
838 SCM_SETGC8MARK (ptr);
839 if (SCM_PTAB_ENTRY(ptr))
840 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
841 if (scm_ptobs[i].mark)
842 {
843 ptr = (scm_ptobs[i].mark) (ptr);
844 goto gc_mark_loop;
845 }
846 else
847 return;
848 break;
849 case scm_tc7_smob:
850 if (SCM_GC8MARKP (ptr))
851 break;
852 SCM_SETGC8MARK (ptr);
853 switch SCM_GCTYP16 (ptr)
854 { /* should be faster than going through scm_smobs */
855 case scm_tc_free_cell:
856 /* printf("found free_cell %X ", ptr); fflush(stdout); */
857 SCM_SETCDR (ptr, SCM_EOL);
858 break;
859 case scm_tcs_bignums:
860 case scm_tc16_flo:
861 break;
862 default:
863 i = SCM_SMOBNUM (ptr);
864 if (!(i < scm_numsmob))
865 goto def;
866 if (scm_smobs[i].mark)
867 {
868 ptr = (scm_smobs[i].mark) (ptr);
869 goto gc_mark_loop;
870 }
871 else
872 return;
873 }
874 break;
875 default:
876 def:scm_wta (ptr, "unknown type in ", "gc_mark");
877 }
878 }
879
880
881 /* Mark a Region Conservatively
882 */
883
884 void
885 scm_mark_locations (x, n)
886 SCM_STACKITEM x[];
887 scm_sizet n;
888 {
889 register long m = n;
890 register int i, j;
891 register SCM_CELLPTR ptr;
892
893 while (0 <= --m)
894 if SCM_CELLP (*(SCM **) & x[m])
895 {
896 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & x[m]));
897 i = 0;
898 j = scm_n_heap_segs - 1;
899 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
900 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
901 {
902 while (i <= j)
903 {
904 int seg_id;
905 seg_id = -1;
906 if ( (i == j)
907 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
908 seg_id = i;
909 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
910 seg_id = j;
911 else
912 {
913 int k;
914 k = (i + j) / 2;
915 if (k == i)
916 break;
917 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
918 {
919 j = k;
920 ++i;
921 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
922 continue;
923 else
924 break;
925 }
926 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
927 {
928 i = k;
929 --j;
930 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
931 continue;
932 else
933 break;
934 }
935 }
936 if ( !scm_heap_table[seg_id].valid
937 || scm_heap_table[seg_id].valid (ptr,
938 &scm_heap_table[seg_id]))
939 scm_gc_mark (*(SCM *) & x[m]);
940 break;
941 }
942
943 }
944 }
945 }
946
947
948 /* The following is a C predicate which determines if an SCM value can be
949 regarded as a pointer to a cell on the heap. The code is duplicated
950 from scm_mark_locations. */
951
952
953 int
954 scm_cellp (value)
955 SCM value;
956 {
957 register int i, j;
958 register SCM_CELLPTR ptr;
959
960 if SCM_CELLP (*(SCM **) & value)
961 {
962 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & value));
963 i = 0;
964 j = scm_n_heap_segs - 1;
965 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
966 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
967 {
968 while (i <= j)
969 {
970 int seg_id;
971 seg_id = -1;
972 if ( (i == j)
973 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
974 seg_id = i;
975 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
976 seg_id = j;
977 else
978 {
979 int k;
980 k = (i + j) / 2;
981 if (k == i)
982 break;
983 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
984 {
985 j = k;
986 ++i;
987 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
988 continue;
989 else
990 break;
991 }
992 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
993 {
994 i = k;
995 --j;
996 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
997 continue;
998 else
999 break;
1000 }
1001 }
1002 if ( !scm_heap_table[seg_id].valid
1003 || scm_heap_table[seg_id].valid (ptr,
1004 &scm_heap_table[seg_id]))
1005 return 1;
1006 break;
1007 }
1008
1009 }
1010 }
1011 return 0;
1012 }
1013
1014
1015 static void
1016 scm_mark_weak_vector_spines ()
1017 {
1018 int i;
1019
1020 for (i = 0; i < scm_n_weak; ++i)
1021 {
1022 if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i]))
1023 {
1024 SCM *ptr;
1025 SCM obj;
1026 int j;
1027 int n;
1028
1029 obj = scm_weak_vectors[i];
1030 ptr = SCM_VELTS (scm_weak_vectors[i]);
1031 n = SCM_LENGTH (scm_weak_vectors[i]);
1032 for (j = 0; j < n; ++j)
1033 {
1034 SCM alist;
1035
1036 alist = ptr[j];
1037 while ( SCM_NIMP (alist)
1038 && SCM_CONSP (alist)
1039 && !SCM_GCMARKP (alist)
1040 && SCM_NIMP (SCM_CAR (alist))
1041 && SCM_CONSP (SCM_CAR (alist)))
1042 {
1043 SCM_SETGCMARK (alist);
1044 SCM_SETGCMARK (SCM_CAR (alist));
1045 alist = SCM_GCCDR (alist);
1046 }
1047 }
1048 }
1049 }
1050 }
1051
1052
1053
1054 void
1055 scm_gc_sweep ()
1056 {
1057 register SCM_CELLPTR ptr;
1058 #ifdef SCM_POINTERS_MUNGED
1059 register SCM scmptr;
1060 #else
1061 #undef scmptr
1062 #define scmptr (SCM)ptr
1063 #endif
1064 register SCM nfreelist;
1065 register SCM *hp_freelist;
1066 register long m;
1067 register int span;
1068 long i;
1069 scm_sizet seg_size;
1070
1071 m = 0;
1072
1073 /* Reset all free list pointers. We'll reconstruct them completely
1074 while scanning. */
1075 for (i = 0; i < scm_n_heap_segs; i++)
1076 *scm_heap_table[i].freelistp = SCM_EOL;
1077
1078 for (i = 0; i < scm_n_heap_segs; i++)
1079 {
1080 register scm_sizet n = 0;
1081 register scm_sizet j;
1082
1083 /* Unmarked cells go onto the front of the freelist this heap
1084 segment points to. Rather than updating the real freelist
1085 pointer as we go along, we accumulate the new head in
1086 nfreelist. Then, if it turns out that the entire segment is
1087 free, we free (i.e., malloc's free) the whole segment, and
1088 simply don't assign nfreelist back into the real freelist. */
1089 hp_freelist = scm_heap_table[i].freelistp;
1090 nfreelist = *hp_freelist;
1091
1092 span = scm_heap_table[i].ncells;
1093 ptr = CELL_UP (scm_heap_table[i].bounds[0]);
1094 seg_size = CELL_DN (scm_heap_table[i].bounds[1]) - ptr;
1095 for (j = seg_size + span; j -= span; ptr += span)
1096 {
1097 #ifdef SCM_POINTERS_MUNGED
1098 scmptr = PTR2SCM (ptr);
1099 #endif
1100 switch SCM_TYP7 (scmptr)
1101 {
1102 case scm_tcs_cons_gloc:
1103 if (SCM_GCMARKP (scmptr))
1104 {
1105 if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1)
1106 SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0);
1107 goto cmrkcontinue;
1108 }
1109 {
1110 SCM vcell;
1111 vcell = SCM_CAR (scmptr) - 1L;
1112
1113 if ((SCM_CDR (vcell) == 0) || (SCM_CDR (vcell) == 1))
1114 {
1115 SCM *p = (SCM *) SCM_GCCDR (scmptr);
1116 if (((SCM*) vcell)[scm_vtable_index_layout]
1117 & SCM_STRUCTF_LIGHT)
1118 {
1119 SCM layout = ((SCM*)vcell)[scm_vtable_index_layout];
1120 m += SCM_LENGTH (layout) / 2;
1121 free ((char *) p);
1122 }
1123 else
1124 {
1125 m += p[scm_struct_i_n_words] * sizeof (SCM);
1126 /* I feel like I'm programming in BCPL here... */
1127 free ((char *) p[scm_struct_i_ptr]);
1128 }
1129 }
1130 }
1131 break;
1132 case scm_tcs_cons_imcar:
1133 case scm_tcs_cons_nimcar:
1134 case scm_tcs_closures:
1135 if (SCM_GCMARKP (scmptr))
1136 goto cmrkcontinue;
1137 break;
1138 case scm_tc7_wvect:
1139 if (SCM_GC8MARKP (scmptr))
1140 {
1141 goto c8mrkcontinue;
1142 }
1143 else
1144 {
1145 m += (1 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1146 scm_must_free ((char *)(SCM_VELTS (scmptr) - 1));
1147 break;
1148 }
1149
1150 case scm_tc7_vector:
1151 case scm_tc7_lvector:
1152 #ifdef CCLO
1153 case scm_tc7_cclo:
1154 #endif
1155 if (SCM_GC8MARKP (scmptr))
1156 goto c8mrkcontinue;
1157
1158 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1159 freechars:
1160 scm_must_free (SCM_CHARS (scmptr));
1161 /* SCM_SETCHARS(scmptr, 0);*/
1162 break;
1163 case scm_tc7_bvect:
1164 if SCM_GC8MARKP (scmptr)
1165 goto c8mrkcontinue;
1166 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1167 goto freechars;
1168 case scm_tc7_byvect:
1169 if SCM_GC8MARKP (scmptr)
1170 goto c8mrkcontinue;
1171 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1172 goto freechars;
1173 case scm_tc7_ivect:
1174 case scm_tc7_uvect:
1175 if SCM_GC8MARKP (scmptr)
1176 goto c8mrkcontinue;
1177 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1178 goto freechars;
1179 case scm_tc7_svect:
1180 if SCM_GC8MARKP (scmptr)
1181 goto c8mrkcontinue;
1182 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1183 goto freechars;
1184 #ifdef LONGLONGS
1185 case scm_tc7_llvect:
1186 if SCM_GC8MARKP (scmptr)
1187 goto c8mrkcontinue;
1188 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1189 goto freechars;
1190 #endif
1191 case scm_tc7_fvect:
1192 if SCM_GC8MARKP (scmptr)
1193 goto c8mrkcontinue;
1194 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1195 goto freechars;
1196 case scm_tc7_dvect:
1197 if SCM_GC8MARKP (scmptr)
1198 goto c8mrkcontinue;
1199 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1200 goto freechars;
1201 case scm_tc7_cvect:
1202 if SCM_GC8MARKP (scmptr)
1203 goto c8mrkcontinue;
1204 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1205 goto freechars;
1206 case scm_tc7_substring:
1207 if (SCM_GC8MARKP (scmptr))
1208 goto c8mrkcontinue;
1209 break;
1210 case scm_tc7_string:
1211 if (SCM_GC8MARKP (scmptr))
1212 goto c8mrkcontinue;
1213 m += SCM_HUGE_LENGTH (scmptr) + 1;
1214 goto freechars;
1215 case scm_tc7_msymbol:
1216 if (SCM_GC8MARKP (scmptr))
1217 goto c8mrkcontinue;
1218 m += ( SCM_LENGTH (scmptr)
1219 + 1
1220 + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
1221 scm_must_free ((char *)SCM_SLOTS (scmptr));
1222 break;
1223 case scm_tc7_contin:
1224 if SCM_GC8MARKP (scmptr)
1225 goto c8mrkcontinue;
1226 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1227 if (SCM_VELTS (scmptr))
1228 goto freechars;
1229 case scm_tc7_ssymbol:
1230 if SCM_GC8MARKP(scmptr)
1231 goto c8mrkcontinue;
1232 break;
1233 case scm_tcs_subrs:
1234 continue;
1235 case scm_tc7_port:
1236 if SCM_GC8MARKP (scmptr)
1237 goto c8mrkcontinue;
1238 if SCM_OPENP (scmptr)
1239 {
1240 int k = SCM_PTOBNUM (scmptr);
1241 if (!(k < scm_numptob))
1242 goto sweeperr;
1243 /* Keep "revealed" ports alive. */
1244 if (scm_revealed_count(scmptr) > 0)
1245 continue;
1246 /* Yes, I really do mean scm_ptobs[k].free */
1247 /* rather than ftobs[k].close. .close */
1248 /* is for explicit CLOSE-PORT by user */
1249 (scm_ptobs[k].free) (scmptr);
1250 SCM_SETSTREAM (scmptr, 0);
1251 scm_remove_from_port_table (scmptr);
1252 scm_gc_ports_collected++;
1253 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1254 }
1255 break;
1256 case scm_tc7_smob:
1257 switch SCM_GCTYP16 (scmptr)
1258 {
1259 case scm_tc_free_cell:
1260 if SCM_GC8MARKP (scmptr)
1261 goto c8mrkcontinue;
1262 break;
1263 #ifdef SCM_BIGDIG
1264 case scm_tcs_bignums:
1265 if SCM_GC8MARKP (scmptr)
1266 goto c8mrkcontinue;
1267 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1268 goto freechars;
1269 #endif /* def SCM_BIGDIG */
1270 case scm_tc16_flo:
1271 if SCM_GC8MARKP (scmptr)
1272 goto c8mrkcontinue;
1273 switch ((int) (SCM_CAR (scmptr) >> 16))
1274 {
1275 case (SCM_IMAG_PART | SCM_REAL_PART) >> 16:
1276 m += sizeof (double);
1277 case SCM_REAL_PART >> 16:
1278 case SCM_IMAG_PART >> 16:
1279 m += sizeof (double);
1280 goto freechars;
1281 case 0:
1282 break;
1283 default:
1284 goto sweeperr;
1285 }
1286 break;
1287 default:
1288 if SCM_GC8MARKP (scmptr)
1289 goto c8mrkcontinue;
1290
1291 {
1292 int k;
1293 k = SCM_SMOBNUM (scmptr);
1294 if (!(k < scm_numsmob))
1295 goto sweeperr;
1296 m += (scm_smobs[k].free) ((SCM) scmptr);
1297 break;
1298 }
1299 }
1300 break;
1301 default:
1302 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1303 }
1304 n += span;
1305 #if 0
1306 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1307 exit (2);
1308 #endif
1309 /* Stick the new cell on the front of nfreelist. It's
1310 critical that we mark this cell as freed; otherwise, the
1311 conservative collector might trace it as some other type
1312 of object. */
1313 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
1314 SCM_SETCDR (scmptr, nfreelist);
1315 nfreelist = scmptr;
1316
1317 continue;
1318 c8mrkcontinue:
1319 SCM_CLRGC8MARK (scmptr);
1320 continue;
1321 cmrkcontinue:
1322 SCM_CLRGCMARK (scmptr);
1323 }
1324 #ifdef GC_FREE_SEGMENTS
1325 if (n == seg_size)
1326 {
1327 register long j;
1328
1329 scm_heap_size -= seg_size;
1330 free ((char *) scm_heap_table[i].bounds[0]);
1331 scm_heap_table[i].bounds[0] = 0;
1332 for (j = i + 1; j < scm_n_heap_segs; j++)
1333 scm_heap_table[j - 1] = scm_heap_table[j];
1334 scm_n_heap_segs -= 1;
1335 i--; /* We need to scan the segment just moved. */
1336 }
1337 else
1338 #endif /* ifdef GC_FREE_SEGMENTS */
1339 /* Update the real freelist pointer to point to the head of
1340 the list of free cells we've built for this segment. */
1341 *hp_freelist = nfreelist;
1342
1343 #ifdef DEBUG_FREELIST
1344 scm_check_freelist ();
1345 scm_map_free_list ();
1346 #endif
1347
1348 scm_gc_cells_collected += n;
1349 }
1350 /* Scan weak vectors. */
1351 {
1352 SCM *ptr;
1353 for (i = 0; i < scm_n_weak; ++i)
1354 {
1355 if (!SCM_IS_WHVEC_ANY (scm_weak_vectors[i]))
1356 {
1357 register long j, n;
1358
1359 ptr = SCM_VELTS (scm_weak_vectors[i]);
1360 n = SCM_LENGTH (scm_weak_vectors[i]);
1361 for (j = 0; j < n; ++j)
1362 if (SCM_NIMP (ptr[j]) && SCM_FREEP (ptr[j]))
1363 ptr[j] = SCM_BOOL_F;
1364 }
1365 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1366 {
1367 SCM obj = scm_weak_vectors[i];
1368 register long n = SCM_LENGTH (scm_weak_vectors[i]);
1369 register long j;
1370
1371 ptr = SCM_VELTS (scm_weak_vectors[i]);
1372
1373 for (j = 0; j < n; ++j)
1374 {
1375 SCM * fixup;
1376 SCM alist;
1377 int weak_keys;
1378 int weak_values;
1379
1380 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1381 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1382
1383 fixup = ptr + j;
1384 alist = *fixup;
1385
1386 while (SCM_NIMP (alist)
1387 && SCM_CONSP (alist)
1388 && SCM_NIMP (SCM_CAR (alist))
1389 && SCM_CONSP (SCM_CAR (alist)))
1390 {
1391 SCM key;
1392 SCM value;
1393
1394 key = SCM_CAAR (alist);
1395 value = SCM_CDAR (alist);
1396 if ( (weak_keys && SCM_NIMP (key) && SCM_FREEP (key))
1397 || (weak_values && SCM_NIMP (value) && SCM_FREEP (value)))
1398 {
1399 *fixup = SCM_CDR (alist);
1400 }
1401 else
1402 fixup = SCM_CDRLOC (alist);
1403 alist = SCM_CDR (alist);
1404 }
1405 }
1406 }
1407 }
1408 }
1409 scm_cells_allocated = (scm_heap_size - scm_gc_cells_collected);
1410 scm_mallocated -= m;
1411 scm_gc_malloc_collected = m;
1412 }
1413
1414
1415 \f
1416
1417 /* {Front end to malloc}
1418 *
1419 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1420 *
1421 * These functions provide services comperable to malloc, realloc, and
1422 * free. They are for allocating malloced parts of scheme objects.
1423 * The primary purpose of the front end is to impose calls to gc.
1424 */
1425
1426 /* scm_must_malloc
1427 * Return newly malloced storage or throw an error.
1428 *
1429 * The parameter WHAT is a string for error reporting.
1430 * If the threshold scm_mtrigger will be passed by this
1431 * allocation, or if the first call to malloc fails,
1432 * garbage collect -- on the presumption that some objects
1433 * using malloced storage may be collected.
1434 *
1435 * The limit scm_mtrigger may be raised by this allocation.
1436 */
1437 char *
1438 scm_must_malloc (len, what)
1439 scm_sizet len;
1440 char *what;
1441 {
1442 char *ptr;
1443 scm_sizet size = len;
1444 unsigned long nm = scm_mallocated + size;
1445 if (len != size)
1446 malerr:
1447 scm_wta (SCM_MAKINUM (len), (char *) SCM_NALLOC, what);
1448 if ((nm <= scm_mtrigger))
1449 {
1450 SCM_SYSCALL (ptr = (char *) malloc (size));
1451 if (NULL != ptr)
1452 {
1453 scm_mallocated = nm;
1454 return ptr;
1455 }
1456 }
1457
1458 scm_igc (what);
1459 nm = scm_mallocated + size;
1460 SCM_SYSCALL (ptr = (char *) malloc (size));
1461 if (NULL != ptr)
1462 {
1463 scm_mallocated = nm;
1464 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1465 if (nm > scm_mtrigger)
1466 scm_mtrigger = nm + nm / 2;
1467 else
1468 scm_mtrigger += scm_mtrigger / 2;
1469 }
1470 return ptr;
1471 }
1472 goto malerr;
1473 }
1474
1475
1476 /* scm_must_realloc
1477 * is similar to scm_must_malloc.
1478 */
1479 char *
1480 scm_must_realloc (char *where,
1481 scm_sizet olen,
1482 scm_sizet len,
1483 char *what)
1484 {
1485 char *ptr;
1486 scm_sizet size = len;
1487 scm_sizet nm = scm_mallocated + size - olen;
1488 if (len != size)
1489 ralerr:
1490 scm_wta (SCM_MAKINUM (len), (char *) SCM_NALLOC, what);
1491 if ((nm <= scm_mtrigger))
1492 {
1493 SCM_SYSCALL (ptr = (char *) realloc (where, size));
1494 if (NULL != ptr)
1495 {
1496 scm_mallocated = nm;
1497 return ptr;
1498 }
1499 }
1500 scm_igc (what);
1501 nm = scm_mallocated + size - olen;
1502 SCM_SYSCALL (ptr = (char *) realloc (where, size));
1503 if (NULL != ptr)
1504 {
1505 scm_mallocated = nm;
1506 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1507 if (nm > scm_mtrigger)
1508 scm_mtrigger = nm + nm / 2;
1509 else
1510 scm_mtrigger += scm_mtrigger / 2;
1511 }
1512 return ptr;
1513 }
1514 goto ralerr;
1515 }
1516
1517 void
1518 scm_must_free (obj)
1519 char *obj;
1520 {
1521 if (obj)
1522 free (obj);
1523 else
1524 scm_wta (SCM_INUM0, "already free", "");
1525 }
1526
1527 /* Announce that there has been some malloc done that will be freed
1528 * during gc. A typical use is for a smob that uses some malloced
1529 * memory but can not get it from scm_must_malloc (for whatever
1530 * reason). When a new object of this smob is created you call
1531 * scm_done_malloc with the size of the object. When your smob free
1532 * function is called, be sure to include this size in the return
1533 * value. */
1534
1535 void
1536 scm_done_malloc (size)
1537 long size;
1538 {
1539 scm_mallocated += size;
1540
1541 if (scm_mallocated > scm_mtrigger)
1542 {
1543 scm_igc ("foreign mallocs");
1544 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1545 {
1546 if (scm_mallocated > scm_mtrigger)
1547 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1548 else
1549 scm_mtrigger += scm_mtrigger / 2;
1550 }
1551 }
1552 }
1553
1554
1555 \f
1556
1557 /* {Heap Segments}
1558 *
1559 * Each heap segment is an array of objects of a particular size.
1560 * Every segment has an associated (possibly shared) freelist.
1561 * A table of segment records is kept that records the upper and
1562 * lower extents of the segment; this is used during the conservative
1563 * phase of gc to identify probably gc roots (because they point
1564 * into valid segments at reasonable offsets). */
1565
1566 /* scm_expmem
1567 * is true if the first segment was smaller than INIT_HEAP_SEG.
1568 * If scm_expmem is set to one, subsequent segment allocations will
1569 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1570 */
1571 int scm_expmem = 0;
1572
1573 /* scm_heap_org
1574 * is the lowest base address of any heap segment.
1575 */
1576 SCM_CELLPTR scm_heap_org;
1577
1578 struct scm_heap_seg_data * scm_heap_table = 0;
1579 int scm_n_heap_segs = 0;
1580
1581 /* scm_heap_size
1582 * is the total number of cells in heap segments.
1583 */
1584 unsigned long scm_heap_size = 0;
1585
1586 /* init_heap_seg
1587 * initializes a new heap segment and return the number of objects it contains.
1588 *
1589 * The segment origin, segment size in bytes, and the span of objects
1590 * in cells are input parameters. The freelist is both input and output.
1591 *
1592 * This function presume that the scm_heap_table has already been expanded
1593 * to accomodate a new segment record.
1594 */
1595
1596
1597 static scm_sizet
1598 init_heap_seg (seg_org, size, ncells, freelistp)
1599 SCM_CELLPTR seg_org;
1600 scm_sizet size;
1601 int ncells;
1602 SCM *freelistp;
1603 {
1604 register SCM_CELLPTR ptr;
1605 #ifdef SCM_POINTERS_MUNGED
1606 register SCM scmptr;
1607 #else
1608 #undef scmptr
1609 #define scmptr ptr
1610 #endif
1611 SCM_CELLPTR seg_end;
1612 int new_seg_index;
1613 int n_new_objects;
1614
1615 if (seg_org == NULL)
1616 return 0;
1617
1618 ptr = seg_org;
1619
1620 /* Compute the ceiling on valid object pointers w/in this segment.
1621 */
1622 seg_end = CELL_DN ((char *) ptr + size);
1623
1624 /* Find the right place and insert the segment record.
1625 *
1626 */
1627 for (new_seg_index = 0;
1628 ( (new_seg_index < scm_n_heap_segs)
1629 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1630 new_seg_index++)
1631 ;
1632
1633 {
1634 int i;
1635 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1636 scm_heap_table[i] = scm_heap_table[i - 1];
1637 }
1638
1639 ++scm_n_heap_segs;
1640
1641 scm_heap_table[new_seg_index].valid = 0;
1642 scm_heap_table[new_seg_index].ncells = ncells;
1643 scm_heap_table[new_seg_index].freelistp = freelistp;
1644 scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
1645 scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
1646
1647
1648 /* Compute the least valid object pointer w/in this segment
1649 */
1650 ptr = CELL_UP (ptr);
1651
1652
1653 n_new_objects = seg_end - ptr;
1654
1655 /* Prepend objects in this segment to the freelist.
1656 */
1657 while (ptr < seg_end)
1658 {
1659 #ifdef SCM_POINTERS_MUNGED
1660 scmptr = PTR2SCM (ptr);
1661 #endif
1662 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
1663 SCM_SETCDR (scmptr, PTR2SCM (ptr + ncells));
1664 ptr += ncells;
1665 }
1666
1667 ptr -= ncells;
1668
1669 /* Patch up the last freelist pointer in the segment
1670 * to join it to the input freelist.
1671 */
1672 SCM_SETCDR (PTR2SCM (ptr), *freelistp);
1673 *freelistp = PTR2SCM (CELL_UP (seg_org));
1674
1675 scm_heap_size += (ncells * n_new_objects);
1676 return size;
1677 #ifdef scmptr
1678 #undef scmptr
1679 #endif
1680 }
1681
1682
1683 static void
1684 alloc_some_heap (ncells, freelistp)
1685 int ncells;
1686 SCM * freelistp;
1687 {
1688 struct scm_heap_seg_data * tmptable;
1689 SCM_CELLPTR ptr;
1690 scm_sizet len;
1691
1692 /* Critical code sections (such as the garbage collector)
1693 * aren't supposed to add heap segments.
1694 */
1695 if (scm_gc_heap_lock)
1696 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
1697
1698 /* Expand the heap tables to have room for the new segment.
1699 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1700 * only if the allocation of the segment itself succeeds.
1701 */
1702 len = (1 + scm_n_heap_segs) * sizeof (struct scm_heap_seg_data);
1703
1704 SCM_SYSCALL (tmptable = ((struct scm_heap_seg_data *)
1705 realloc ((char *)scm_heap_table, len)));
1706 if (!tmptable)
1707 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
1708 else
1709 scm_heap_table = tmptable;
1710
1711
1712 /* Pick a size for the new heap segment.
1713 * The rule for picking the size of a segment is explained in
1714 * gc.h
1715 */
1716 if (scm_expmem)
1717 {
1718 len = (scm_sizet) (SCM_EXPHEAP (scm_heap_size) * sizeof (scm_cell));
1719 if ((scm_sizet) (SCM_EXPHEAP (scm_heap_size) * sizeof (scm_cell)) != len)
1720 len = 0;
1721 }
1722 else
1723 len = SCM_HEAP_SEG_SIZE;
1724
1725 {
1726 scm_sizet smallest;
1727
1728 smallest = (ncells * sizeof (scm_cell));
1729 if (len < smallest)
1730 len = (ncells * sizeof (scm_cell));
1731
1732 /* Allocate with decaying ambition. */
1733 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
1734 && (len >= smallest))
1735 {
1736 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (len));
1737 if (ptr)
1738 {
1739 init_heap_seg (ptr, len, ncells, freelistp);
1740 return;
1741 }
1742 len /= 2;
1743 }
1744 }
1745
1746 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
1747 }
1748
1749
1750
1751 SCM_PROC (s_unhash_name, "unhash-name", 1, 0, 0, scm_unhash_name);
1752 SCM
1753 scm_unhash_name (name)
1754 SCM name;
1755 {
1756 int x;
1757 int bound;
1758 SCM_ASSERT (SCM_NIMP (name) && SCM_SYMBOLP (name), name, SCM_ARG1, s_unhash_name);
1759 SCM_DEFER_INTS;
1760 bound = scm_n_heap_segs;
1761 for (x = 0; x < bound; ++x)
1762 {
1763 SCM_CELLPTR p;
1764 SCM_CELLPTR pbound;
1765 p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
1766 pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
1767 while (p < pbound)
1768 {
1769 SCM incar;
1770 incar = p->car;
1771 if (1 == (7 & (int)incar))
1772 {
1773 --incar;
1774 if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
1775 && (SCM_CDR (incar) != 0)
1776 && (SCM_CDR (incar) != 1))
1777 {
1778 p->car = name;
1779 }
1780 }
1781 ++p;
1782 }
1783 }
1784 SCM_ALLOW_INTS;
1785 return name;
1786 }
1787
1788
1789 \f
1790 /* {GC Protection Helper Functions}
1791 */
1792
1793
1794 void
1795 scm_remember (ptr)
1796 SCM * ptr;
1797 {}
1798
1799
1800 SCM
1801 scm_return_first (SCM elt, ...)
1802 {
1803 return elt;
1804 }
1805
1806
1807 SCM
1808 scm_permanent_object (obj)
1809 SCM obj;
1810 {
1811 SCM_REDEFER_INTS;
1812 scm_permobjs = scm_cons (obj, scm_permobjs);
1813 SCM_REALLOW_INTS;
1814 return obj;
1815 }
1816
1817
1818 /* Protect OBJ from the garbage collector. OBJ will not be freed,
1819 even if all other references are dropped, until someone applies
1820 scm_unprotect_object to it. This function returns OBJ.
1821
1822 Calls to scm_protect_object nest. For every object O, there is a
1823 counter which scm_protect_object(O) increments and
1824 scm_unprotect_object(O) decrements, if it is greater than zero. If
1825 an object's counter is greater than zero, the garbage collector
1826 will not free it.
1827
1828 Of course, that's not how it's implemented. scm_protect_object and
1829 scm_unprotect_object just maintain a list of references to things.
1830 Since the GC knows about this list, all objects it mentions stay
1831 alive. scm_protect_object adds its argument to the list;
1832 scm_unprotect_object removes the first occurrence of its argument
1833 to the list. */
1834 SCM
1835 scm_protect_object (obj)
1836 SCM obj;
1837 {
1838 scm_protects = scm_cons (obj, scm_protects);
1839
1840 return obj;
1841 }
1842
1843
1844 /* Remove any protection for OBJ established by a prior call to
1845 scm_protect_object. This function returns OBJ.
1846
1847 See scm_protect_object for more information. */
1848 SCM
1849 scm_unprotect_object (obj)
1850 SCM obj;
1851 {
1852 SCM *tail_ptr = &scm_protects;
1853
1854 while (SCM_NIMP (*tail_ptr) && SCM_CONSP (*tail_ptr))
1855 if (SCM_CAR (*tail_ptr) == obj)
1856 {
1857 *tail_ptr = SCM_CDR (*tail_ptr);
1858 break;
1859 }
1860 else
1861 tail_ptr = SCM_CDRLOC (*tail_ptr);
1862
1863 return obj;
1864 }
1865
1866
1867 \f
1868 int
1869 scm_init_storage (scm_sizet init_heap_size)
1870 {
1871 scm_sizet j;
1872
1873 j = SCM_NUM_PROTECTS;
1874 while (j)
1875 scm_sys_protects[--j] = SCM_BOOL_F;
1876 scm_block_gc = 1;
1877 scm_freelist = SCM_EOL;
1878 scm_expmem = 0;
1879
1880 j = SCM_HEAP_SEG_SIZE;
1881 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
1882 scm_heap_table = ((struct scm_heap_seg_data *)
1883 scm_must_malloc (sizeof (struct scm_heap_seg_data), "hplims"));
1884 if (0L == init_heap_size)
1885 init_heap_size = SCM_INIT_HEAP_SIZE;
1886 j = init_heap_size;
1887 if ((init_heap_size != j)
1888 || !init_heap_seg ((SCM_CELLPTR) malloc (j), j, 1, &scm_freelist))
1889 {
1890 j = SCM_HEAP_SEG_SIZE;
1891 if (!init_heap_seg ((SCM_CELLPTR) malloc (j), j, 1, &scm_freelist))
1892 return 1;
1893 }
1894 else
1895 scm_expmem = 1;
1896 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0]);
1897 /* scm_hplims[0] can change. do not remove scm_heap_org */
1898 if (!(scm_weak_vectors = (SCM *) malloc ((scm_weak_size = 32) * sizeof(SCM))))
1899 return 1;
1900
1901 /* Initialise the list of ports. */
1902 scm_port_table = (struct scm_port_table **) malloc ((long) (sizeof (struct scm_port_table)
1903 * scm_port_table_room));
1904 if (!scm_port_table)
1905 return 1;
1906
1907
1908 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
1909 SCM_SETCDR (scm_undefineds, scm_undefineds);
1910
1911 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
1912 scm_nullstr = scm_makstr (0L, 0);
1913 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
1914 scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
1915 scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim));
1916 scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
1917 scm_stand_in_procs = SCM_EOL;
1918 scm_permobjs = SCM_EOL;
1919 scm_protects = SCM_EOL;
1920 scm_asyncs = SCM_EOL;
1921 scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
1922 scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
1923 #ifdef SCM_BIGDIG
1924 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
1925 #endif
1926 return 0;
1927 }
1928 \f
1929
1930 void
1931 scm_init_gc ()
1932 {
1933 #include "gc.x"
1934 }