* gc.c (scm_debug_newcell): Added SCM_SETCAR of the newly
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 \f
46 #include <stdio.h>
47 #include "_scm.h"
48 #include "stime.h"
49 #include "stackchk.h"
50 #include "struct.h"
51 #include "genio.h"
52 #include "weaks.h"
53 #include "guardians.h"
54 #include "smob.h"
55 #include "unif.h"
56 #include "async.h"
57
58 #include "scm_validate.h"
59 #include "gc.h"
60
61 #ifdef HAVE_MALLOC_H
62 #include <malloc.h>
63 #endif
64
65 #ifdef HAVE_UNISTD_H
66 #include <unistd.h>
67 #endif
68
69 #ifdef __STDC__
70 #include <stdarg.h>
71 #define var_start(x, y) va_start(x, y)
72 #else
73 #include <varargs.h>
74 #define var_start(x, y) va_start(x)
75 #endif
76
77 \f
78 /* {heap tuning parameters}
79 *
80 * These are parameters for controlling memory allocation. The heap
81 * is the area out of which scm_cons, and object headers are allocated.
82 *
83 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
84 * 64 bit machine. The units of the _SIZE parameters are bytes.
85 * Cons pairs and object headers occupy one heap cell.
86 *
87 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
88 * allocated initially the heap will grow by half its current size
89 * each subsequent time more heap is needed.
90 *
91 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
92 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
93 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
94 * is in scm_init_storage() and alloc_some_heap() in sys.c
95 *
96 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
97 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
98 *
99 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
100 * is needed.
101 *
102 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
103 * trigger a GC.
104 *
105 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
106 * reclaimed by a GC triggered by must_malloc. If less than this is
107 * reclaimed, the trigger threshold is raised. [I don't know what a
108 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
109 * work around a oscillation that caused almost constant GC.]
110 */
111
112 #define SCM_INIT_HEAP_SIZE (32768L*sizeof(scm_cell))
113 #define SCM_MIN_HEAP_SEG_SIZE (2048L*sizeof(scm_cell))
114 #ifdef _QC
115 # define SCM_HEAP_SEG_SIZE 32768L
116 #else
117 # ifdef sequent
118 # define SCM_HEAP_SEG_SIZE (7000L*sizeof(scm_cell))
119 # else
120 # define SCM_HEAP_SEG_SIZE (16384L*sizeof(scm_cell))
121 # endif
122 #endif
123 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size*2)
124 #define SCM_INIT_MALLOC_LIMIT 100000
125 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
126
127 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
128 bounds for allocated storage */
129
130 #ifdef PROT386
131 /*in 386 protected mode we must only adjust the offset */
132 # define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
133 # define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
134 #else
135 # ifdef _UNICOS
136 # define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
137 # define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
138 # else
139 # define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
140 # define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
141 # endif /* UNICOS */
142 #endif /* PROT386 */
143
144
145 \f
146 /* scm_freelist
147 * is the head of freelist of cons pairs.
148 */
149 SCM scm_freelist = SCM_EOL;
150
151 /* scm_mtrigger
152 * is the number of bytes of must_malloc allocation needed to trigger gc.
153 */
154 unsigned long scm_mtrigger;
155
156
157 /* scm_gc_heap_lock
158 * If set, don't expand the heap. Set only during gc, during which no allocation
159 * is supposed to take place anyway.
160 */
161 int scm_gc_heap_lock = 0;
162
163 /* GC Blocking
164 * Don't pause for collection if this is set -- just
165 * expand the heap.
166 */
167
168 int scm_block_gc = 1;
169
170 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
171 * collection (GC) more space is allocated for the heap.
172 */
173 #define MIN_GC_YIELD (scm_heap_size/4)
174
175 /* During collection, this accumulates objects holding
176 * weak references.
177 */
178 SCM scm_weak_vectors;
179
180 /* GC Statistics Keeping
181 */
182 unsigned long scm_cells_allocated = 0;
183 long scm_mallocated = 0;
184 unsigned long scm_gc_cells_collected;
185 unsigned long scm_gc_malloc_collected;
186 unsigned long scm_gc_ports_collected;
187 unsigned long scm_gc_rt;
188 unsigned long scm_gc_time_taken = 0;
189
190 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
191 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
192 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
193 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
194 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
195 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
196
197
198 struct scm_heap_seg_data
199 {
200 /* lower and upper bounds of the segment */
201 SCM_CELLPTR bounds[2];
202
203 /* address of the head-of-freelist pointer for this segment's cells.
204 All segments usually point to the same one, scm_freelist. */
205 SCM *freelistp;
206
207 /* number of SCM words per object in this segment */
208 int ncells;
209
210 /* If SEG_DATA->valid is non-zero, the conservative marking
211 functions will apply SEG_DATA->valid to the purported pointer and
212 SEG_DATA, and mark the object iff the function returns non-zero.
213 At the moment, I don't think anyone uses this. */
214 int (*valid) ();
215 };
216
217
218
219
220 static void scm_mark_weak_vector_spines(void);
221 static scm_sizet init_heap_seg(SCM_CELLPTR, scm_sizet, int, SCM *);
222 static void alloc_some_heap(int, SCM *);
223
224
225 \f
226 /* Debugging functions. */
227
228 #ifdef GUILE_DEBUG_FREELIST
229
230 /* Return the number of the heap segment containing CELL. */
231 static int
232 which_seg (SCM cell)
233 {
234 int i;
235
236 for (i = 0; i < scm_n_heap_segs; i++)
237 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
238 && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
239 return i;
240 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
241 cell);
242 abort ();
243 }
244
245
246 GUILE_PROC (scm_map_free_list, "map-free-list", 0, 0, 0,
247 (),
248 "")
249 #define FUNC_NAME s_scm_map_free_list
250 {
251 int last_seg = -1, count = 0;
252 SCM f;
253
254 fprintf (stderr, "%d segments total\n", scm_n_heap_segs);
255 for (f = scm_freelist; SCM_NIMP (f); f = SCM_CDR (f))
256 {
257 int this_seg = which_seg (f);
258
259 if (this_seg != last_seg)
260 {
261 if (last_seg != -1)
262 fprintf (stderr, " %5d cells in segment %d\n", count, last_seg);
263 last_seg = this_seg;
264 count = 0;
265 }
266 count++;
267 }
268 if (last_seg != -1)
269 fprintf (stderr, " %5d cells in segment %d\n", count, last_seg);
270
271 fflush (stderr);
272
273 return SCM_UNSPECIFIED;
274 }
275 #undef FUNC_NAME
276
277
278 /* Number of calls to SCM_NEWCELL since startup. */
279 static unsigned long scm_newcell_count;
280
281 /* Search freelist for anything that isn't marked as a free cell.
282 Abort if we find something. */
283 static void
284 scm_check_freelist ()
285 {
286 SCM f;
287 int i = 0;
288
289 for (f = scm_freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
290 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
291 {
292 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
293 scm_newcell_count, i);
294 fflush (stderr);
295 abort ();
296 }
297 }
298
299 static int scm_debug_check_freelist = 0;
300
301 GUILE_PROC (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
302 (SCM flag),
303 "")
304 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
305 {
306 SCM_VALIDATE_BOOL_COPY(1,flag,scm_debug_check_freelist);
307 return SCM_UNSPECIFIED;
308 }
309 #undef FUNC_NAME
310
311
312 SCM
313 scm_debug_newcell (void)
314 {
315 SCM new;
316
317 scm_newcell_count++;
318 if (scm_debug_check_freelist) {
319 scm_check_freelist ();
320 scm_gc();
321 }
322
323 /* The rest of this is supposed to be identical to the SCM_NEWCELL
324 macro. */
325 if (SCM_IMP (scm_freelist))
326 new = scm_gc_for_newcell ();
327 else
328 {
329 new = scm_freelist;
330 scm_freelist = SCM_CDR (scm_freelist);
331 SCM_SETCAR (new, scm_tc16_allocated);
332 ++scm_cells_allocated;
333 }
334
335 return new;
336 }
337
338 #endif /* GUILE_DEBUG_FREELIST */
339
340 \f
341
342 /* {Scheme Interface to GC}
343 */
344
345 GUILE_PROC (scm_gc_stats, "gc-stats", 0, 0, 0,
346 (),
347 "Returns an association list of statistics about Guile's current use of storage. ")
348 #define FUNC_NAME s_scm_gc_stats
349 {
350 int i;
351 int n;
352 SCM heap_segs;
353 SCM local_scm_mtrigger;
354 SCM local_scm_mallocated;
355 SCM local_scm_heap_size;
356 SCM local_scm_cells_allocated;
357 SCM local_scm_gc_time_taken;
358 SCM answer;
359
360 SCM_DEFER_INTS;
361 scm_block_gc = 1;
362 retry:
363 heap_segs = SCM_EOL;
364 n = scm_n_heap_segs;
365 for (i = scm_n_heap_segs; i--; )
366 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
367 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
368 heap_segs);
369 if (scm_n_heap_segs != n)
370 goto retry;
371 scm_block_gc = 0;
372
373 local_scm_mtrigger = scm_mtrigger;
374 local_scm_mallocated = scm_mallocated;
375 local_scm_heap_size = scm_heap_size;
376 local_scm_cells_allocated = scm_cells_allocated;
377 local_scm_gc_time_taken = scm_gc_time_taken;
378
379 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
380 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
381 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
382 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
383 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
384 scm_cons (sym_heap_segments, heap_segs),
385 SCM_UNDEFINED);
386 SCM_ALLOW_INTS;
387 return answer;
388 }
389 #undef FUNC_NAME
390
391
392 void
393 scm_gc_start (const char *what)
394 {
395 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
396 scm_gc_cells_collected = 0;
397 scm_gc_malloc_collected = 0;
398 scm_gc_ports_collected = 0;
399 }
400
401 void
402 scm_gc_end ()
403 {
404 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
405 scm_gc_time_taken = scm_gc_time_taken + scm_gc_rt;
406 scm_system_async_mark (scm_gc_async);
407 }
408
409
410 GUILE_PROC (scm_object_address, "object-address", 1, 0, 0,
411 (SCM obj),
412 "Return an integer that for the lifetime of @var{obj} is uniquely
413 returned by this function for @var{obj}")
414 #define FUNC_NAME s_scm_object_address
415 {
416 return scm_ulong2num ((unsigned long)obj);
417 }
418 #undef FUNC_NAME
419
420
421 GUILE_PROC(scm_gc, "gc", 0, 0, 0,
422 (),
423 "Scans all of SCM objects and reclaims for further use those that are
424 no longer accessible.")
425 #define FUNC_NAME s_scm_gc
426 {
427 SCM_DEFER_INTS;
428 scm_igc ("call");
429 SCM_ALLOW_INTS;
430 return SCM_UNSPECIFIED;
431 }
432 #undef FUNC_NAME
433
434
435 \f
436 /* {C Interface For When GC is Triggered}
437 */
438
439 void
440 scm_gc_for_alloc (int ncells, SCM *freelistp)
441 {
442 SCM_REDEFER_INTS;
443 scm_igc ("cells");
444 if ((scm_gc_cells_collected < MIN_GC_YIELD) || SCM_IMP (*freelistp))
445 {
446 alloc_some_heap (ncells, freelistp);
447 }
448 SCM_REALLOW_INTS;
449 }
450
451
452 SCM
453 scm_gc_for_newcell ()
454 {
455 SCM fl;
456 scm_gc_for_alloc (1, &scm_freelist);
457 fl = scm_freelist;
458 scm_freelist = SCM_CDR (fl);
459 SCM_SETCAR(fl, scm_tc16_allocated);
460 return fl;
461 }
462
463 void
464 scm_igc (const char *what)
465 {
466 int j;
467
468 #ifdef USE_THREADS
469 /* During the critical section, only the current thread may run. */
470 SCM_THREAD_CRITICAL_SECTION_START;
471 #endif
472
473 /* fprintf (stderr, "gc: %s\n", what); */
474
475 scm_gc_start (what);
476
477 if (!scm_stack_base || scm_block_gc)
478 {
479 scm_gc_end ();
480 return;
481 }
482
483 if (scm_mallocated < 0)
484 /* The byte count of allocated objects has underflowed. This is
485 probably because you forgot to report the sizes of objects you
486 have allocated, by calling scm_done_malloc or some such. When
487 the GC freed them, it subtracted their size from
488 scm_mallocated, which underflowed. */
489 abort ();
490
491 if (scm_gc_heap_lock)
492 /* We've invoked the collector while a GC is already in progress.
493 That should never happen. */
494 abort ();
495
496 ++scm_gc_heap_lock;
497
498 scm_weak_vectors = SCM_EOL;
499
500 scm_guardian_gc_init ();
501
502 /* unprotect any struct types with no instances */
503 #if 0
504 {
505 SCM type_list;
506 SCM * pos;
507
508 pos = &scm_type_obj_list;
509 type_list = scm_type_obj_list;
510 while (type_list != SCM_EOL)
511 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
512 {
513 pos = SCM_CDRLOC (type_list);
514 type_list = SCM_CDR (type_list);
515 }
516 else
517 {
518 *pos = SCM_CDR (type_list);
519 type_list = SCM_CDR (type_list);
520 }
521 }
522 #endif
523
524 /* flush dead entries from the continuation stack */
525 {
526 int x;
527 int bound;
528 SCM * elts;
529 elts = SCM_VELTS (scm_continuation_stack);
530 bound = SCM_LENGTH (scm_continuation_stack);
531 x = SCM_INUM (scm_continuation_stack_ptr);
532 while (x < bound)
533 {
534 elts[x] = SCM_BOOL_F;
535 ++x;
536 }
537 }
538
539 #ifndef USE_THREADS
540
541 /* Protect from the C stack. This must be the first marking
542 * done because it provides information about what objects
543 * are "in-use" by the C code. "in-use" objects are those
544 * for which the values from SCM_LENGTH and SCM_CHARS must remain
545 * usable. This requirement is stricter than a liveness
546 * requirement -- in particular, it constrains the implementation
547 * of scm_vector_set_length_x.
548 */
549 SCM_FLUSH_REGISTER_WINDOWS;
550 /* This assumes that all registers are saved into the jmp_buf */
551 setjmp (scm_save_regs_gc_mark);
552 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
553 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
554 sizeof scm_save_regs_gc_mark)
555 / sizeof (SCM_STACKITEM)));
556
557 {
558 /* stack_len is long rather than scm_sizet in order to guarantee that
559 &stack_len is long aligned */
560 #ifdef SCM_STACK_GROWS_UP
561 #ifdef nosve
562 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
563 #else
564 long stack_len = scm_stack_size (scm_stack_base);
565 #endif
566 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
567 #else
568 #ifdef nosve
569 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
570 #else
571 long stack_len = scm_stack_size (scm_stack_base);
572 #endif
573 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
574 #endif
575 }
576
577 #else /* USE_THREADS */
578
579 /* Mark every thread's stack and registers */
580 scm_threads_mark_stacks();
581
582 #endif /* USE_THREADS */
583
584 /* FIXME: insert a phase to un-protect string-data preserved
585 * in scm_vector_set_length_x.
586 */
587
588 j = SCM_NUM_PROTECTS;
589 while (j--)
590 scm_gc_mark (scm_sys_protects[j]);
591
592 /* FIXME: we should have a means to register C functions to be run
593 * in different phases of GC
594 */
595 scm_mark_subr_table ();
596
597 #ifndef USE_THREADS
598 scm_gc_mark (scm_root->handle);
599 #endif
600
601 scm_mark_weak_vector_spines ();
602
603 scm_guardian_zombify ();
604
605 scm_gc_sweep ();
606
607 --scm_gc_heap_lock;
608 scm_gc_end ();
609
610 #ifdef USE_THREADS
611 SCM_THREAD_CRITICAL_SECTION_END;
612 #endif
613 }
614
615 \f
616 /* {Mark/Sweep}
617 */
618
619
620
621 /* Mark an object precisely.
622 */
623 void
624 scm_gc_mark (SCM p)
625 {
626 register long i;
627 register SCM ptr;
628
629 ptr = p;
630
631 gc_mark_loop:
632 if (SCM_IMP (ptr))
633 return;
634
635 gc_mark_nimp:
636 if (SCM_NCELLP (ptr))
637 scm_wta (ptr, "rogue pointer in heap", NULL);
638
639 switch (SCM_TYP7 (ptr))
640 {
641 case scm_tcs_cons_nimcar:
642 if (SCM_GCMARKP (ptr))
643 break;
644 SCM_SETGCMARK (ptr);
645 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
646 {
647 ptr = SCM_CAR (ptr);
648 goto gc_mark_nimp;
649 }
650 scm_gc_mark (SCM_CAR (ptr));
651 ptr = SCM_GCCDR (ptr);
652 goto gc_mark_nimp;
653 case scm_tcs_cons_imcar:
654 case scm_tc7_pws:
655 if (SCM_GCMARKP (ptr))
656 break;
657 SCM_SETGCMARK (ptr);
658 ptr = SCM_GCCDR (ptr);
659 goto gc_mark_loop;
660 case scm_tcs_cons_gloc:
661 if (SCM_GCMARKP (ptr))
662 break;
663 SCM_SETGCMARK (ptr);
664 {
665 SCM vcell;
666 vcell = SCM_CAR (ptr) - 1L;
667 switch (SCM_CDR (vcell))
668 {
669 default:
670 scm_gc_mark (vcell);
671 ptr = SCM_GCCDR (ptr);
672 goto gc_mark_loop;
673 case 1: /* ! */
674 case 0: /* ! */
675 {
676 SCM layout;
677 SCM * vtable_data;
678 int len;
679 char * fields_desc;
680 register SCM * mem;
681 register int x;
682
683 vtable_data = (SCM *)vcell;
684 layout = vtable_data[scm_vtable_index_layout];
685 len = SCM_LENGTH (layout);
686 fields_desc = SCM_CHARS (layout);
687 /* We're using SCM_GCCDR here like STRUCT_DATA, except
688 that it removes the mark */
689 mem = (SCM *)SCM_GCCDR (ptr);
690
691 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
692 {
693 scm_gc_mark (mem[scm_struct_i_procedure]);
694 scm_gc_mark (mem[scm_struct_i_setter]);
695 }
696 if (len)
697 {
698 for (x = 0; x < len - 2; x += 2, ++mem)
699 if (fields_desc[x] == 'p')
700 scm_gc_mark (*mem);
701 if (fields_desc[x] == 'p')
702 {
703 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
704 for (x = *mem; x; --x)
705 scm_gc_mark (*++mem);
706 else
707 scm_gc_mark (*mem);
708 }
709 }
710 if (!SCM_CDR (vcell))
711 {
712 SCM_SETGCMARK (vcell);
713 ptr = vtable_data[scm_vtable_index_vtable];
714 goto gc_mark_loop;
715 }
716 }
717 }
718 }
719 break;
720 case scm_tcs_closures:
721 if (SCM_GCMARKP (ptr))
722 break;
723 SCM_SETGCMARK (ptr);
724 if (SCM_IMP (SCM_CDR (ptr)))
725 {
726 ptr = SCM_CLOSCAR (ptr);
727 goto gc_mark_nimp;
728 }
729 scm_gc_mark (SCM_CLOSCAR (ptr));
730 ptr = SCM_GCCDR (ptr);
731 goto gc_mark_nimp;
732 case scm_tc7_vector:
733 case scm_tc7_lvector:
734 #ifdef CCLO
735 case scm_tc7_cclo:
736 #endif
737 if (SCM_GC8MARKP (ptr))
738 break;
739 SCM_SETGC8MARK (ptr);
740 i = SCM_LENGTH (ptr);
741 if (i == 0)
742 break;
743 while (--i > 0)
744 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
745 scm_gc_mark (SCM_VELTS (ptr)[i]);
746 ptr = SCM_VELTS (ptr)[0];
747 goto gc_mark_loop;
748 case scm_tc7_contin:
749 if SCM_GC8MARKP
750 (ptr) break;
751 SCM_SETGC8MARK (ptr);
752 if (SCM_VELTS (ptr))
753 scm_mark_locations (SCM_VELTS (ptr),
754 (scm_sizet)
755 (SCM_LENGTH (ptr) +
756 (sizeof (SCM_STACKITEM) + -1 +
757 sizeof (scm_contregs)) /
758 sizeof (SCM_STACKITEM)));
759 break;
760 #ifdef HAVE_ARRAYS
761 case scm_tc7_bvect:
762 case scm_tc7_byvect:
763 case scm_tc7_ivect:
764 case scm_tc7_uvect:
765 case scm_tc7_fvect:
766 case scm_tc7_dvect:
767 case scm_tc7_cvect:
768 case scm_tc7_svect:
769 #ifdef HAVE_LONG_LONGS
770 case scm_tc7_llvect:
771 #endif
772 #endif
773 case scm_tc7_string:
774 SCM_SETGC8MARK (ptr);
775 break;
776
777 case scm_tc7_substring:
778 if (SCM_GC8MARKP(ptr))
779 break;
780 SCM_SETGC8MARK (ptr);
781 ptr = SCM_CDR (ptr);
782 goto gc_mark_loop;
783
784 case scm_tc7_wvect:
785 if (SCM_GC8MARKP(ptr))
786 break;
787 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
788 scm_weak_vectors = ptr;
789 SCM_SETGC8MARK (ptr);
790 if (SCM_IS_WHVEC_ANY (ptr))
791 {
792 int x;
793 int len;
794 int weak_keys;
795 int weak_values;
796
797 len = SCM_LENGTH (ptr);
798 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
799 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
800
801 for (x = 0; x < len; ++x)
802 {
803 SCM alist;
804 alist = SCM_VELTS (ptr)[x];
805
806 /* mark everything on the alist except the keys or
807 * values, according to weak_values and weak_keys. */
808 while ( SCM_NIMP (alist)
809 && SCM_CONSP (alist)
810 && !SCM_GCMARKP (alist)
811 && SCM_NIMP (SCM_CAR (alist))
812 && SCM_CONSP (SCM_CAR (alist)))
813 {
814 SCM kvpair;
815 SCM next_alist;
816
817 kvpair = SCM_CAR (alist);
818 next_alist = SCM_CDR (alist);
819 /*
820 * Do not do this:
821 * SCM_SETGCMARK (alist);
822 * SCM_SETGCMARK (kvpair);
823 *
824 * It may be that either the key or value is protected by
825 * an escaped reference to part of the spine of this alist.
826 * If we mark the spine here, and only mark one or neither of the
827 * key and value, they may never be properly marked.
828 * This leads to a horrible situation in which an alist containing
829 * freelist cells is exported.
830 *
831 * So only mark the spines of these arrays last of all marking.
832 * If somebody confuses us by constructing a weak vector
833 * with a circular alist then we are hosed, but at least we
834 * won't prematurely drop table entries.
835 */
836 if (!weak_keys)
837 scm_gc_mark (SCM_CAR (kvpair));
838 if (!weak_values)
839 scm_gc_mark (SCM_GCCDR (kvpair));
840 alist = next_alist;
841 }
842 if (SCM_NIMP (alist))
843 scm_gc_mark (alist);
844 }
845 }
846 break;
847
848 case scm_tc7_msymbol:
849 if (SCM_GC8MARKP(ptr))
850 break;
851 SCM_SETGC8MARK (ptr);
852 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
853 ptr = SCM_SYMBOL_PROPS (ptr);
854 goto gc_mark_loop;
855 case scm_tc7_ssymbol:
856 if (SCM_GC8MARKP(ptr))
857 break;
858 SCM_SETGC8MARK (ptr);
859 break;
860 case scm_tcs_subrs:
861 break;
862 case scm_tc7_port:
863 i = SCM_PTOBNUM (ptr);
864 if (!(i < scm_numptob))
865 goto def;
866 if (SCM_GC8MARKP (ptr))
867 break;
868 SCM_SETGC8MARK (ptr);
869 if (SCM_PTAB_ENTRY(ptr))
870 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
871 if (scm_ptobs[i].mark)
872 {
873 ptr = (scm_ptobs[i].mark) (ptr);
874 goto gc_mark_loop;
875 }
876 else
877 return;
878 break;
879 case scm_tc7_smob:
880 if (SCM_GC8MARKP (ptr))
881 break;
882 SCM_SETGC8MARK (ptr);
883 switch SCM_GCTYP16 (ptr)
884 { /* should be faster than going through scm_smobs */
885 case scm_tc_free_cell:
886 /* printf("found free_cell %X ", ptr); fflush(stdout); */
887 break;
888 case scm_tc16_allocated:
889 SCM_SETGC8MARK (ptr);
890 break;
891 case scm_tcs_bignums:
892 case scm_tc16_flo:
893 break;
894 default:
895 i = SCM_SMOBNUM (ptr);
896 if (!(i < scm_numsmob))
897 goto def;
898 if (scm_smobs[i].mark)
899 {
900 ptr = (scm_smobs[i].mark) (ptr);
901 goto gc_mark_loop;
902 }
903 else
904 return;
905 }
906 break;
907 default:
908 def:scm_wta (ptr, "unknown type in ", "gc_mark");
909 }
910 }
911
912
913 /* Mark a Region Conservatively
914 */
915
916 void
917 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
918 {
919 register long m = n;
920 register int i, j;
921 register SCM_CELLPTR ptr;
922
923 while (0 <= --m)
924 if SCM_CELLP (*(SCM **) & x[m])
925 {
926 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & x[m]));
927 i = 0;
928 j = scm_n_heap_segs - 1;
929 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
930 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
931 {
932 while (i <= j)
933 {
934 int seg_id;
935 seg_id = -1;
936 if ( (i == j)
937 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
938 seg_id = i;
939 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
940 seg_id = j;
941 else
942 {
943 int k;
944 k = (i + j) / 2;
945 if (k == i)
946 break;
947 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
948 {
949 j = k;
950 ++i;
951 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
952 continue;
953 else
954 break;
955 }
956 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
957 {
958 i = k;
959 --j;
960 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
961 continue;
962 else
963 break;
964 }
965 }
966 if ( !scm_heap_table[seg_id].valid
967 || scm_heap_table[seg_id].valid (ptr,
968 &scm_heap_table[seg_id]))
969 scm_gc_mark (*(SCM *) & x[m]);
970 break;
971 }
972
973 }
974 }
975 }
976
977
978 /* The following is a C predicate which determines if an SCM value can be
979 regarded as a pointer to a cell on the heap. The code is duplicated
980 from scm_mark_locations. */
981
982
983 int
984 scm_cellp (SCM value)
985 {
986 register int i, j;
987 register SCM_CELLPTR ptr;
988
989 if SCM_CELLP (*(SCM **) & value)
990 {
991 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & value));
992 i = 0;
993 j = scm_n_heap_segs - 1;
994 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
995 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
996 {
997 while (i <= j)
998 {
999 int seg_id;
1000 seg_id = -1;
1001 if ( (i == j)
1002 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1003 seg_id = i;
1004 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1005 seg_id = j;
1006 else
1007 {
1008 int k;
1009 k = (i + j) / 2;
1010 if (k == i)
1011 break;
1012 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1013 {
1014 j = k;
1015 ++i;
1016 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1017 continue;
1018 else
1019 break;
1020 }
1021 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1022 {
1023 i = k;
1024 --j;
1025 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1026 continue;
1027 else
1028 break;
1029 }
1030 }
1031 if ( !scm_heap_table[seg_id].valid
1032 || scm_heap_table[seg_id].valid (ptr,
1033 &scm_heap_table[seg_id]))
1034 return 1;
1035 break;
1036 }
1037
1038 }
1039 }
1040 return 0;
1041 }
1042
1043
1044 static void
1045 scm_mark_weak_vector_spines ()
1046 {
1047 SCM w;
1048
1049 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1050 {
1051 if (SCM_IS_WHVEC_ANY (w))
1052 {
1053 SCM *ptr;
1054 SCM obj;
1055 int j;
1056 int n;
1057
1058 obj = w;
1059 ptr = SCM_VELTS (w);
1060 n = SCM_LENGTH (w);
1061 for (j = 0; j < n; ++j)
1062 {
1063 SCM alist;
1064
1065 alist = ptr[j];
1066 while ( SCM_NIMP (alist)
1067 && SCM_CONSP (alist)
1068 && !SCM_GCMARKP (alist)
1069 && SCM_NIMP (SCM_CAR (alist))
1070 && SCM_CONSP (SCM_CAR (alist)))
1071 {
1072 SCM_SETGCMARK (alist);
1073 SCM_SETGCMARK (SCM_CAR (alist));
1074 alist = SCM_GCCDR (alist);
1075 }
1076 }
1077 }
1078 }
1079 }
1080
1081
1082
1083 void
1084 scm_gc_sweep ()
1085 {
1086 register SCM_CELLPTR ptr;
1087 #ifdef SCM_POINTERS_MUNGED
1088 register SCM scmptr;
1089 #else
1090 #undef scmptr
1091 #define scmptr (SCM)ptr
1092 #endif
1093 register SCM nfreelist;
1094 register SCM *hp_freelist;
1095 register long m;
1096 register int span;
1097 long i;
1098 scm_sizet seg_size;
1099
1100 m = 0;
1101
1102 /* Reset all free list pointers. We'll reconstruct them completely
1103 while scanning. */
1104 for (i = 0; i < scm_n_heap_segs; i++)
1105 *scm_heap_table[i].freelistp = SCM_EOL;
1106
1107 for (i = 0; i < scm_n_heap_segs; i++)
1108 {
1109 register scm_sizet n = 0;
1110 register scm_sizet j;
1111
1112 /* Unmarked cells go onto the front of the freelist this heap
1113 segment points to. Rather than updating the real freelist
1114 pointer as we go along, we accumulate the new head in
1115 nfreelist. Then, if it turns out that the entire segment is
1116 free, we free (i.e., malloc's free) the whole segment, and
1117 simply don't assign nfreelist back into the real freelist. */
1118 hp_freelist = scm_heap_table[i].freelistp;
1119 nfreelist = *hp_freelist;
1120
1121 span = scm_heap_table[i].ncells;
1122 ptr = CELL_UP (scm_heap_table[i].bounds[0]);
1123 seg_size = CELL_DN (scm_heap_table[i].bounds[1]) - ptr;
1124 for (j = seg_size + span; j -= span; ptr += span)
1125 {
1126 #ifdef SCM_POINTERS_MUNGED
1127 scmptr = PTR2SCM (ptr);
1128 #endif
1129 switch SCM_TYP7 (scmptr)
1130 {
1131 case scm_tcs_cons_gloc:
1132 if (SCM_GCMARKP (scmptr))
1133 {
1134 if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1)
1135 SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0);
1136 goto cmrkcontinue;
1137 }
1138 {
1139 SCM vcell;
1140 vcell = SCM_CAR (scmptr) - 1L;
1141
1142 if ((SCM_CDR (vcell) == 0) || (SCM_CDR (vcell) == 1))
1143 {
1144 scm_struct_free_t free
1145 = (scm_struct_free_t) ((SCM*) vcell)[scm_struct_i_free];
1146 m += free ((SCM *) vcell, (SCM *) SCM_GCCDR (scmptr));
1147 }
1148 }
1149 break;
1150 case scm_tcs_cons_imcar:
1151 case scm_tcs_cons_nimcar:
1152 case scm_tcs_closures:
1153 case scm_tc7_pws:
1154 if (SCM_GCMARKP (scmptr))
1155 goto cmrkcontinue;
1156 break;
1157 case scm_tc7_wvect:
1158 if (SCM_GC8MARKP (scmptr))
1159 {
1160 goto c8mrkcontinue;
1161 }
1162 else
1163 {
1164 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1165 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1166 break;
1167 }
1168
1169 case scm_tc7_vector:
1170 case scm_tc7_lvector:
1171 #ifdef CCLO
1172 case scm_tc7_cclo:
1173 #endif
1174 if (SCM_GC8MARKP (scmptr))
1175 goto c8mrkcontinue;
1176
1177 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1178 freechars:
1179 scm_must_free (SCM_CHARS (scmptr));
1180 /* SCM_SETCHARS(scmptr, 0);*/
1181 break;
1182 #ifdef HAVE_ARRAYS
1183 case scm_tc7_bvect:
1184 if SCM_GC8MARKP (scmptr)
1185 goto c8mrkcontinue;
1186 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1187 goto freechars;
1188 case scm_tc7_byvect:
1189 if SCM_GC8MARKP (scmptr)
1190 goto c8mrkcontinue;
1191 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1192 goto freechars;
1193 case scm_tc7_ivect:
1194 case scm_tc7_uvect:
1195 if SCM_GC8MARKP (scmptr)
1196 goto c8mrkcontinue;
1197 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1198 goto freechars;
1199 case scm_tc7_svect:
1200 if SCM_GC8MARKP (scmptr)
1201 goto c8mrkcontinue;
1202 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1203 goto freechars;
1204 #ifdef HAVE_LONG_LONGS
1205 case scm_tc7_llvect:
1206 if SCM_GC8MARKP (scmptr)
1207 goto c8mrkcontinue;
1208 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1209 goto freechars;
1210 #endif
1211 case scm_tc7_fvect:
1212 if SCM_GC8MARKP (scmptr)
1213 goto c8mrkcontinue;
1214 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1215 goto freechars;
1216 case scm_tc7_dvect:
1217 if SCM_GC8MARKP (scmptr)
1218 goto c8mrkcontinue;
1219 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1220 goto freechars;
1221 case scm_tc7_cvect:
1222 if SCM_GC8MARKP (scmptr)
1223 goto c8mrkcontinue;
1224 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1225 goto freechars;
1226 #endif
1227 case scm_tc7_substring:
1228 if (SCM_GC8MARKP (scmptr))
1229 goto c8mrkcontinue;
1230 break;
1231 case scm_tc7_string:
1232 if (SCM_GC8MARKP (scmptr))
1233 goto c8mrkcontinue;
1234 m += SCM_HUGE_LENGTH (scmptr) + 1;
1235 goto freechars;
1236 case scm_tc7_msymbol:
1237 if (SCM_GC8MARKP (scmptr))
1238 goto c8mrkcontinue;
1239 m += ( SCM_LENGTH (scmptr)
1240 + 1
1241 + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
1242 scm_must_free ((char *)SCM_SLOTS (scmptr));
1243 break;
1244 case scm_tc7_contin:
1245 if SCM_GC8MARKP (scmptr)
1246 goto c8mrkcontinue;
1247 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1248 if (SCM_VELTS (scmptr))
1249 goto freechars;
1250 case scm_tc7_ssymbol:
1251 if SCM_GC8MARKP(scmptr)
1252 goto c8mrkcontinue;
1253 break;
1254 case scm_tcs_subrs:
1255 continue;
1256 case scm_tc7_port:
1257 if SCM_GC8MARKP (scmptr)
1258 goto c8mrkcontinue;
1259 if SCM_OPENP (scmptr)
1260 {
1261 int k = SCM_PTOBNUM (scmptr);
1262 if (!(k < scm_numptob))
1263 goto sweeperr;
1264 /* Keep "revealed" ports alive. */
1265 if (scm_revealed_count(scmptr) > 0)
1266 continue;
1267 /* Yes, I really do mean scm_ptobs[k].free */
1268 /* rather than ftobs[k].close. .close */
1269 /* is for explicit CLOSE-PORT by user */
1270 m += (scm_ptobs[k].free) (scmptr);
1271 SCM_SETSTREAM (scmptr, 0);
1272 scm_remove_from_port_table (scmptr);
1273 scm_gc_ports_collected++;
1274 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1275 }
1276 break;
1277 case scm_tc7_smob:
1278 switch SCM_GCTYP16 (scmptr)
1279 {
1280 case scm_tc_free_cell:
1281 if SCM_GC8MARKP (scmptr)
1282 goto c8mrkcontinue;
1283 break;
1284 #ifdef SCM_BIGDIG
1285 case scm_tcs_bignums:
1286 if SCM_GC8MARKP (scmptr)
1287 goto c8mrkcontinue;
1288 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1289 goto freechars;
1290 #endif /* def SCM_BIGDIG */
1291 case scm_tc16_flo:
1292 if SCM_GC8MARKP (scmptr)
1293 goto c8mrkcontinue;
1294 switch ((int) (SCM_CAR (scmptr) >> 16))
1295 {
1296 case (SCM_IMAG_PART | SCM_REAL_PART) >> 16:
1297 m += sizeof (double);
1298 case SCM_REAL_PART >> 16:
1299 case SCM_IMAG_PART >> 16:
1300 m += sizeof (double);
1301 goto freechars;
1302 case 0:
1303 break;
1304 default:
1305 goto sweeperr;
1306 }
1307 break;
1308 default:
1309 if SCM_GC8MARKP (scmptr)
1310 goto c8mrkcontinue;
1311
1312 {
1313 int k;
1314 k = SCM_SMOBNUM (scmptr);
1315 if (!(k < scm_numsmob))
1316 goto sweeperr;
1317 m += (scm_smobs[k].free) ((SCM) scmptr);
1318 break;
1319 }
1320 }
1321 break;
1322 default:
1323 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1324 }
1325 n += span;
1326 #if 0
1327 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1328 exit (2);
1329 #endif
1330 /* Stick the new cell on the front of nfreelist. It's
1331 critical that we mark this cell as freed; otherwise, the
1332 conservative collector might trace it as some other type
1333 of object. */
1334 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
1335 SCM_SETCDR (scmptr, nfreelist);
1336 nfreelist = scmptr;
1337
1338 continue;
1339 c8mrkcontinue:
1340 SCM_CLRGC8MARK (scmptr);
1341 continue;
1342 cmrkcontinue:
1343 SCM_CLRGCMARK (scmptr);
1344 }
1345 #ifdef GC_FREE_SEGMENTS
1346 if (n == seg_size)
1347 {
1348 register long j;
1349
1350 scm_heap_size -= seg_size;
1351 free ((char *) scm_heap_table[i].bounds[0]);
1352 scm_heap_table[i].bounds[0] = 0;
1353 for (j = i + 1; j < scm_n_heap_segs; j++)
1354 scm_heap_table[j - 1] = scm_heap_table[j];
1355 scm_n_heap_segs -= 1;
1356 i--; /* We need to scan the segment just moved. */
1357 }
1358 else
1359 #endif /* ifdef GC_FREE_SEGMENTS */
1360 /* Update the real freelist pointer to point to the head of
1361 the list of free cells we've built for this segment. */
1362 *hp_freelist = nfreelist;
1363
1364 #ifdef GUILE_DEBUG_FREELIST
1365 scm_check_freelist ();
1366 scm_map_free_list ();
1367 #endif
1368
1369 scm_gc_cells_collected += n;
1370 }
1371 /* Scan weak vectors. */
1372 {
1373 SCM *ptr, w;
1374 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1375 {
1376 if (!SCM_IS_WHVEC_ANY (w))
1377 {
1378 register long j, n;
1379
1380 ptr = SCM_VELTS (w);
1381 n = SCM_LENGTH (w);
1382 for (j = 0; j < n; ++j)
1383 if (SCM_FREEP (ptr[j]))
1384 ptr[j] = SCM_BOOL_F;
1385 }
1386 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1387 {
1388 SCM obj = w;
1389 register long n = SCM_LENGTH (w);
1390 register long j;
1391
1392 ptr = SCM_VELTS (w);
1393
1394 for (j = 0; j < n; ++j)
1395 {
1396 SCM * fixup;
1397 SCM alist;
1398 int weak_keys;
1399 int weak_values;
1400
1401 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1402 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1403
1404 fixup = ptr + j;
1405 alist = *fixup;
1406
1407 while (SCM_NIMP (alist)
1408 && SCM_CONSP (alist)
1409 && SCM_NIMP (SCM_CAR (alist))
1410 && SCM_CONSP (SCM_CAR (alist)))
1411 {
1412 SCM key;
1413 SCM value;
1414
1415 key = SCM_CAAR (alist);
1416 value = SCM_CDAR (alist);
1417 if ( (weak_keys && SCM_FREEP (key))
1418 || (weak_values && SCM_FREEP (value)))
1419 {
1420 *fixup = SCM_CDR (alist);
1421 }
1422 else
1423 fixup = SCM_CDRLOC (alist);
1424 alist = SCM_CDR (alist);
1425 }
1426 }
1427 }
1428 }
1429 }
1430 scm_cells_allocated = (scm_heap_size - scm_gc_cells_collected);
1431 scm_mallocated -= m;
1432 scm_gc_malloc_collected = m;
1433 }
1434
1435
1436 \f
1437
1438 /* {Front end to malloc}
1439 *
1440 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1441 *
1442 * These functions provide services comperable to malloc, realloc, and
1443 * free. They are for allocating malloced parts of scheme objects.
1444 * The primary purpose of the front end is to impose calls to gc.
1445 */
1446
1447 /* scm_must_malloc
1448 * Return newly malloced storage or throw an error.
1449 *
1450 * The parameter WHAT is a string for error reporting.
1451 * If the threshold scm_mtrigger will be passed by this
1452 * allocation, or if the first call to malloc fails,
1453 * garbage collect -- on the presumption that some objects
1454 * using malloced storage may be collected.
1455 *
1456 * The limit scm_mtrigger may be raised by this allocation.
1457 */
1458 void *
1459 scm_must_malloc (scm_sizet size, const char *what)
1460 {
1461 void *ptr;
1462 unsigned long nm = scm_mallocated + size;
1463
1464 if (nm <= scm_mtrigger)
1465 {
1466 SCM_SYSCALL (ptr = malloc (size));
1467 if (NULL != ptr)
1468 {
1469 scm_mallocated = nm;
1470 return ptr;
1471 }
1472 }
1473
1474 scm_igc (what);
1475
1476 nm = scm_mallocated + size;
1477 SCM_SYSCALL (ptr = malloc (size));
1478 if (NULL != ptr)
1479 {
1480 scm_mallocated = nm;
1481 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1482 if (nm > scm_mtrigger)
1483 scm_mtrigger = nm + nm / 2;
1484 else
1485 scm_mtrigger += scm_mtrigger / 2;
1486 }
1487 return ptr;
1488 }
1489
1490 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1491 return 0; /* never reached */
1492 }
1493
1494
1495 /* scm_must_realloc
1496 * is similar to scm_must_malloc.
1497 */
1498 void *
1499 scm_must_realloc (void *where,
1500 scm_sizet old_size,
1501 scm_sizet size,
1502 const char *what)
1503 {
1504 void *ptr;
1505 scm_sizet nm = scm_mallocated + size - old_size;
1506
1507 if (nm <= scm_mtrigger)
1508 {
1509 SCM_SYSCALL (ptr = realloc (where, size));
1510 if (NULL != ptr)
1511 {
1512 scm_mallocated = nm;
1513 return ptr;
1514 }
1515 }
1516
1517 scm_igc (what);
1518
1519 nm = scm_mallocated + size - old_size;
1520 SCM_SYSCALL (ptr = realloc (where, size));
1521 if (NULL != ptr)
1522 {
1523 scm_mallocated = nm;
1524 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1525 if (nm > scm_mtrigger)
1526 scm_mtrigger = nm + nm / 2;
1527 else
1528 scm_mtrigger += scm_mtrigger / 2;
1529 }
1530 return ptr;
1531 }
1532
1533 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1534 return 0; /* never reached */
1535 }
1536
1537 void
1538 scm_must_free (void *obj)
1539 {
1540 if (obj)
1541 free (obj);
1542 else
1543 scm_wta (SCM_INUM0, "already free", "");
1544 }
1545
1546 /* Announce that there has been some malloc done that will be freed
1547 * during gc. A typical use is for a smob that uses some malloced
1548 * memory but can not get it from scm_must_malloc (for whatever
1549 * reason). When a new object of this smob is created you call
1550 * scm_done_malloc with the size of the object. When your smob free
1551 * function is called, be sure to include this size in the return
1552 * value. */
1553
1554 void
1555 scm_done_malloc (long size)
1556 {
1557 scm_mallocated += size;
1558
1559 if (scm_mallocated > scm_mtrigger)
1560 {
1561 scm_igc ("foreign mallocs");
1562 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1563 {
1564 if (scm_mallocated > scm_mtrigger)
1565 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1566 else
1567 scm_mtrigger += scm_mtrigger / 2;
1568 }
1569 }
1570 }
1571
1572
1573 \f
1574
1575 /* {Heap Segments}
1576 *
1577 * Each heap segment is an array of objects of a particular size.
1578 * Every segment has an associated (possibly shared) freelist.
1579 * A table of segment records is kept that records the upper and
1580 * lower extents of the segment; this is used during the conservative
1581 * phase of gc to identify probably gc roots (because they point
1582 * into valid segments at reasonable offsets). */
1583
1584 /* scm_expmem
1585 * is true if the first segment was smaller than INIT_HEAP_SEG.
1586 * If scm_expmem is set to one, subsequent segment allocations will
1587 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1588 */
1589 int scm_expmem = 0;
1590
1591 /* scm_heap_org
1592 * is the lowest base address of any heap segment.
1593 */
1594 SCM_CELLPTR scm_heap_org;
1595
1596 struct scm_heap_seg_data * scm_heap_table = 0;
1597 int scm_n_heap_segs = 0;
1598
1599 /* scm_heap_size
1600 * is the total number of cells in heap segments.
1601 */
1602 unsigned long scm_heap_size = 0;
1603
1604 /* init_heap_seg
1605 * initializes a new heap segment and return the number of objects it contains.
1606 *
1607 * The segment origin, segment size in bytes, and the span of objects
1608 * in cells are input parameters. The freelist is both input and output.
1609 *
1610 * This function presume that the scm_heap_table has already been expanded
1611 * to accomodate a new segment record.
1612 */
1613
1614
1615 static scm_sizet
1616 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, int ncells, SCM *freelistp)
1617 {
1618 register SCM_CELLPTR ptr;
1619 #ifdef SCM_POINTERS_MUNGED
1620 register SCM scmptr;
1621 #else
1622 #undef scmptr
1623 #define scmptr ptr
1624 #endif
1625 SCM_CELLPTR seg_end;
1626 int new_seg_index;
1627 int n_new_objects;
1628
1629 if (seg_org == NULL)
1630 return 0;
1631
1632 ptr = seg_org;
1633
1634 /* Compute the ceiling on valid object pointers w/in this segment.
1635 */
1636 seg_end = CELL_DN ((char *) ptr + size);
1637
1638 /* Find the right place and insert the segment record.
1639 *
1640 */
1641 for (new_seg_index = 0;
1642 ( (new_seg_index < scm_n_heap_segs)
1643 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1644 new_seg_index++)
1645 ;
1646
1647 {
1648 int i;
1649 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1650 scm_heap_table[i] = scm_heap_table[i - 1];
1651 }
1652
1653 ++scm_n_heap_segs;
1654
1655 scm_heap_table[new_seg_index].valid = 0;
1656 scm_heap_table[new_seg_index].ncells = ncells;
1657 scm_heap_table[new_seg_index].freelistp = freelistp;
1658 scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
1659 scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
1660
1661
1662 /* Compute the least valid object pointer w/in this segment
1663 */
1664 ptr = CELL_UP (ptr);
1665
1666
1667 n_new_objects = seg_end - ptr;
1668
1669 /* Prepend objects in this segment to the freelist.
1670 */
1671 while (ptr < seg_end)
1672 {
1673 #ifdef SCM_POINTERS_MUNGED
1674 scmptr = PTR2SCM (ptr);
1675 #endif
1676 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
1677 SCM_SETCDR (scmptr, PTR2SCM (ptr + ncells));
1678 ptr += ncells;
1679 }
1680
1681 ptr -= ncells;
1682
1683 /* Patch up the last freelist pointer in the segment
1684 * to join it to the input freelist.
1685 */
1686 SCM_SETCDR (PTR2SCM (ptr), *freelistp);
1687 *freelistp = PTR2SCM (CELL_UP (seg_org));
1688
1689 scm_heap_size += (ncells * n_new_objects);
1690 return size;
1691 #ifdef scmptr
1692 #undef scmptr
1693 #endif
1694 }
1695
1696
1697 static void
1698 alloc_some_heap (int ncells, SCM *freelistp)
1699 {
1700 struct scm_heap_seg_data * tmptable;
1701 SCM_CELLPTR ptr;
1702 scm_sizet len;
1703
1704 /* Critical code sections (such as the garbage collector)
1705 * aren't supposed to add heap segments.
1706 */
1707 if (scm_gc_heap_lock)
1708 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
1709
1710 /* Expand the heap tables to have room for the new segment.
1711 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1712 * only if the allocation of the segment itself succeeds.
1713 */
1714 len = (1 + scm_n_heap_segs) * sizeof (struct scm_heap_seg_data);
1715
1716 SCM_SYSCALL (tmptable = ((struct scm_heap_seg_data *)
1717 realloc ((char *)scm_heap_table, len)));
1718 if (!tmptable)
1719 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
1720 else
1721 scm_heap_table = tmptable;
1722
1723
1724 /* Pick a size for the new heap segment.
1725 * The rule for picking the size of a segment is explained in
1726 * gc.h
1727 */
1728 if (scm_expmem)
1729 {
1730 len = (scm_sizet) (SCM_EXPHEAP (scm_heap_size) * sizeof (scm_cell));
1731 if ((scm_sizet) (SCM_EXPHEAP (scm_heap_size) * sizeof (scm_cell)) != len)
1732 len = 0;
1733 }
1734 else
1735 len = SCM_HEAP_SEG_SIZE;
1736
1737 {
1738 scm_sizet smallest;
1739
1740 smallest = (ncells * sizeof (scm_cell));
1741 if (len < smallest)
1742 len = (ncells * sizeof (scm_cell));
1743
1744 /* Allocate with decaying ambition. */
1745 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
1746 && (len >= smallest))
1747 {
1748 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (len));
1749 if (ptr)
1750 {
1751 init_heap_seg (ptr, len, ncells, freelistp);
1752 return;
1753 }
1754 len /= 2;
1755 }
1756 }
1757
1758 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
1759 }
1760
1761
1762
1763 GUILE_PROC (scm_unhash_name, "unhash-name", 1, 0, 0,
1764 (SCM name),
1765 "")
1766 #define FUNC_NAME s_scm_unhash_name
1767 {
1768 int x;
1769 int bound;
1770 SCM_VALIDATE_SYMBOL(1,name);
1771 SCM_DEFER_INTS;
1772 bound = scm_n_heap_segs;
1773 for (x = 0; x < bound; ++x)
1774 {
1775 SCM_CELLPTR p;
1776 SCM_CELLPTR pbound;
1777 p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
1778 pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
1779 while (p < pbound)
1780 {
1781 SCM incar;
1782 incar = p->car;
1783 if (1 == (7 & (int)incar))
1784 {
1785 --incar;
1786 if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
1787 && (SCM_CDR (incar) != 0)
1788 && (SCM_CDR (incar) != 1))
1789 {
1790 p->car = name;
1791 }
1792 }
1793 ++p;
1794 }
1795 }
1796 SCM_ALLOW_INTS;
1797 return name;
1798 }
1799 #undef FUNC_NAME
1800
1801
1802 \f
1803 /* {GC Protection Helper Functions}
1804 */
1805
1806
1807 void
1808 scm_remember (SCM *ptr)
1809 { /* empty */ }
1810
1811
1812 SCM
1813 scm_return_first (SCM elt, ...)
1814 {
1815 return elt;
1816 }
1817
1818
1819 SCM
1820 scm_permanent_object (SCM obj)
1821 {
1822 SCM_REDEFER_INTS;
1823 scm_permobjs = scm_cons (obj, scm_permobjs);
1824 SCM_REALLOW_INTS;
1825 return obj;
1826 }
1827
1828
1829 /* Protect OBJ from the garbage collector. OBJ will not be freed,
1830 even if all other references are dropped, until someone applies
1831 scm_unprotect_object to it. This function returns OBJ.
1832
1833 Calls to scm_protect_object nest. For every object O, there is a
1834 counter which scm_protect_object(O) increments and
1835 scm_unprotect_object(O) decrements, if it is greater than zero. If
1836 an object's counter is greater than zero, the garbage collector
1837 will not free it.
1838
1839 Of course, that's not how it's implemented. scm_protect_object and
1840 scm_unprotect_object just maintain a list of references to things.
1841 Since the GC knows about this list, all objects it mentions stay
1842 alive. scm_protect_object adds its argument to the list;
1843 scm_unprotect_object removes the first occurrence of its argument
1844 to the list. */
1845 SCM
1846 scm_protect_object (SCM obj)
1847 {
1848 scm_protects = scm_cons (obj, scm_protects);
1849
1850 return obj;
1851 }
1852
1853
1854 /* Remove any protection for OBJ established by a prior call to
1855 scm_protect_object. This function returns OBJ.
1856
1857 See scm_protect_object for more information. */
1858 SCM
1859 scm_unprotect_object (SCM obj)
1860 {
1861 SCM *tail_ptr = &scm_protects;
1862
1863 while (SCM_CONSP (*tail_ptr))
1864 if (SCM_CAR (*tail_ptr) == obj)
1865 {
1866 *tail_ptr = SCM_CDR (*tail_ptr);
1867 break;
1868 }
1869 else
1870 tail_ptr = SCM_CDRLOC (*tail_ptr);
1871
1872 return obj;
1873 }
1874
1875 int terminating;
1876
1877 /* called on process termination. */
1878 #ifdef HAVE_ATEXIT
1879 static void
1880 cleanup (void)
1881 #else
1882 #ifdef HAVE_ON_EXIT
1883 extern int on_exit (void (*procp) (), int arg);
1884
1885 static void
1886 cleanup (int status, void *arg)
1887 #else
1888 #error Dont know how to setup a cleanup handler on your system.
1889 #endif
1890 #endif
1891 {
1892 terminating = 1;
1893 scm_flush_all_ports ();
1894 }
1895
1896 \f
1897 int
1898 scm_init_storage (scm_sizet init_heap_size)
1899 {
1900 scm_sizet j;
1901
1902 j = SCM_NUM_PROTECTS;
1903 while (j)
1904 scm_sys_protects[--j] = SCM_BOOL_F;
1905 scm_block_gc = 1;
1906 scm_freelist = SCM_EOL;
1907 scm_expmem = 0;
1908
1909 j = SCM_HEAP_SEG_SIZE;
1910 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
1911 scm_heap_table = ((struct scm_heap_seg_data *)
1912 scm_must_malloc (sizeof (struct scm_heap_seg_data), "hplims"));
1913 if (0L == init_heap_size)
1914 init_heap_size = SCM_INIT_HEAP_SIZE;
1915 j = init_heap_size;
1916 if ((init_heap_size != j)
1917 || !init_heap_seg ((SCM_CELLPTR) malloc (j), j, 1, &scm_freelist))
1918 {
1919 j = SCM_HEAP_SEG_SIZE;
1920 if (!init_heap_seg ((SCM_CELLPTR) malloc (j), j, 1, &scm_freelist))
1921 return 1;
1922 }
1923 else
1924 scm_expmem = 1;
1925 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0]);
1926 /* scm_hplims[0] can change. do not remove scm_heap_org */
1927 scm_weak_vectors = SCM_EOL;
1928
1929 /* Initialise the list of ports. */
1930 scm_port_table = (scm_port **)
1931 malloc (sizeof (scm_port *) * scm_port_table_room);
1932 if (!scm_port_table)
1933 return 1;
1934
1935 #ifdef HAVE_ATEXIT
1936 atexit (cleanup);
1937 #else
1938 #ifdef HAVE_ON_EXIT
1939 on_exit (cleanup, 0);
1940 #endif
1941 #endif
1942
1943 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
1944 SCM_SETCDR (scm_undefineds, scm_undefineds);
1945
1946 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
1947 scm_nullstr = scm_makstr (0L, 0);
1948 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
1949 scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
1950 scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim));
1951 scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
1952 scm_stand_in_procs = SCM_EOL;
1953 scm_permobjs = SCM_EOL;
1954 scm_protects = SCM_EOL;
1955 scm_asyncs = SCM_EOL;
1956 scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
1957 scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
1958 #ifdef SCM_BIGDIG
1959 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
1960 #endif
1961 return 0;
1962 }
1963 \f
1964
1965 void
1966 scm_init_gc ()
1967 {
1968 #include "gc.x"
1969 }