Removed some stupid code.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 \f
46 #include <stdio.h>
47 #include "_scm.h"
48 #include "stime.h"
49 #include "stackchk.h"
50 #include "struct.h"
51 #include "genio.h"
52 #include "weaks.h"
53 #include "guardians.h"
54 #include "smob.h"
55 #include "unif.h"
56 #include "async.h"
57
58 #include "validate.h"
59 #include "gc.h"
60
61 #ifdef HAVE_MALLOC_H
62 #include <malloc.h>
63 #endif
64
65 #ifdef HAVE_UNISTD_H
66 #include <unistd.h>
67 #endif
68
69 #ifdef __STDC__
70 #include <stdarg.h>
71 #define var_start(x, y) va_start(x, y)
72 #else
73 #include <varargs.h>
74 #define var_start(x, y) va_start(x)
75 #endif
76
77 \f
78 /* {heap tuning parameters}
79 *
80 * These are parameters for controlling memory allocation. The heap
81 * is the area out of which scm_cons, and object headers are allocated.
82 *
83 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
84 * 64 bit machine. The units of the _SIZE parameters are bytes.
85 * Cons pairs and object headers occupy one heap cell.
86 *
87 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
88 * allocated initially the heap will grow by half its current size
89 * each subsequent time more heap is needed.
90 *
91 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
92 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
93 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
94 * is in scm_init_storage() and alloc_some_heap() in sys.c
95 *
96 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
97 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
98 *
99 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
100 * is needed.
101 *
102 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
103 * trigger a GC.
104 *
105 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
106 * reclaimed by a GC triggered by must_malloc. If less than this is
107 * reclaimed, the trigger threshold is raised. [I don't know what a
108 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
109 * work around a oscillation that caused almost constant GC.]
110 */
111
112 #define SCM_INIT_HEAP_SIZE (40000L * sizeof (scm_cell))
113 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
114 #ifdef _QC
115 # define SCM_HEAP_SEG_SIZE 32768L
116 #else
117 # ifdef sequent
118 # define SCM_HEAP_SEG_SIZE (7000L*sizeof(scm_cell))
119 # else
120 # define SCM_HEAP_SEG_SIZE (16384L*sizeof(scm_cell))
121 # endif
122 #endif
123 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
124 #define SCM_INIT_MALLOC_LIMIT 100000
125 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
126
127 #define SCM_GC_TRIGGER 10000
128 #define SCM_GC_TRIGGER2 10000
129
130 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
131 bounds for allocated storage */
132
133 #ifdef PROT386
134 /*in 386 protected mode we must only adjust the offset */
135 # define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
136 # define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
137 #else
138 # ifdef _UNICOS
139 # define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
140 # define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
141 # else
142 # define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
143 # define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
144 # endif /* UNICOS */
145 #endif /* PROT386 */
146
147
148 \f
149 /* scm_freelists
150 */
151
152 #ifdef GUILE_NEW_GC_SCHEME
153 SCM scm_freelist = SCM_EOL;
154 scm_freelist_t scm_master_freelist = {
155 SCM_EOL, 0, SCM_EOL, SCM_EOL, 0, 0, 1, 0, 0
156 };
157 SCM scm_freelist2 = SCM_EOL;
158 scm_freelist_t scm_master_freelist2 = {
159 SCM_EOL, 0, SCM_EOL, SCM_EOL, 0, 0, 2, 0, 0
160 };
161 #else
162 scm_freelist_t scm_freelist = { SCM_EOL, 1, 0, 0 };
163 scm_freelist_t scm_freelist2 = { SCM_EOL, 2, 0, 0 };
164 #endif
165
166 /* scm_mtrigger
167 * is the number of bytes of must_malloc allocation needed to trigger gc.
168 */
169 unsigned long scm_mtrigger;
170
171
172 /* scm_gc_heap_lock
173 * If set, don't expand the heap. Set only during gc, during which no allocation
174 * is supposed to take place anyway.
175 */
176 int scm_gc_heap_lock = 0;
177
178 /* GC Blocking
179 * Don't pause for collection if this is set -- just
180 * expand the heap.
181 */
182
183 int scm_block_gc = 1;
184
185 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
186 * collection (GC) more space is allocated for the heap.
187 */
188 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
189
190 /* During collection, this accumulates objects holding
191 * weak references.
192 */
193 SCM scm_weak_vectors;
194
195 /* GC Statistics Keeping
196 */
197 unsigned long scm_cells_allocated = 0;
198 long scm_mallocated = 0;
199 /* unsigned long scm_gc_cells_collected; */
200 unsigned long scm_gc_malloc_collected;
201 unsigned long scm_gc_ports_collected;
202 unsigned long scm_gc_rt;
203 unsigned long scm_gc_time_taken = 0;
204
205 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
206 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
207 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
208 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
209 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
210 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
211
212
213 struct scm_heap_seg_data
214 {
215 /* lower and upper bounds of the segment */
216 SCM_CELLPTR bounds[2];
217
218 /* address of the head-of-freelist pointer for this segment's cells.
219 All segments usually point to the same one, scm_freelist. */
220 scm_freelist_t *freelistp;
221
222 /* number of SCM words per object in this segment */
223 int span;
224
225 /* If SEG_DATA->valid is non-zero, the conservative marking
226 functions will apply SEG_DATA->valid to the purported pointer and
227 SEG_DATA, and mark the object iff the function returns non-zero.
228 At the moment, I don't think anyone uses this. */
229 int (*valid) ();
230 };
231
232
233
234
235 static void scm_mark_weak_vector_spines (void);
236 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
237 static void alloc_some_heap (scm_freelist_t *);
238
239
240 \f
241 /* Debugging functions. */
242
243 #ifdef GUILE_DEBUG_FREELIST
244
245 /* Return the number of the heap segment containing CELL. */
246 static int
247 which_seg (SCM cell)
248 {
249 int i;
250
251 for (i = 0; i < scm_n_heap_segs; i++)
252 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
253 && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
254 return i;
255 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
256 SCM_UNPACK (cell));
257 abort ();
258 }
259
260
261 static void
262 map_free_list (scm_freelist_t *freelistp)
263 {
264 int last_seg = -1, count = 0;
265 SCM f;
266
267 for (f = freelistp->cells; SCM_NIMP (f); f = SCM_CDR (f))
268 {
269 int this_seg = which_seg (f);
270
271 if (this_seg != last_seg)
272 {
273 if (last_seg != -1)
274 fprintf (stderr, " %5d %d-cells in segment %d\n",
275 count, freelistp->span, last_seg);
276 last_seg = this_seg;
277 count = 0;
278 }
279 count++;
280 }
281 if (last_seg != -1)
282 fprintf (stderr, " %5d %d-cells in segment %d\n",
283 count, freelistp->span, last_seg);
284 }
285
286 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
287 (),
288 "Print debugging information about the free-list.\n"
289 "`map-free-list' is only included in GUILE_DEBUG_FREELIST builds of Guile.")
290 #define FUNC_NAME s_scm_map_free_list
291 {
292 fprintf (stderr, "%d segments total\n", scm_n_heap_segs);
293 #ifdef GUILE_NEW_GC_SCHEME
294 map_free_list (&scm_master_freelist);
295 map_free_list (&scm_master_freelist2);
296 #else
297 map_free_list (&scm_freelist);
298 map_free_list (&scm_freelist2);
299 #endif
300 fflush (stderr);
301
302 return SCM_UNSPECIFIED;
303 }
304 #undef FUNC_NAME
305
306
307 /* Number of calls to SCM_NEWCELL since startup. */
308 static unsigned long scm_newcell_count;
309 static unsigned long scm_newcell2_count;
310
311 /* Search freelist for anything that isn't marked as a free cell.
312 Abort if we find something. */
313 static void
314 scm_check_freelist (scm_freelist_t *freelistp)
315 {
316 SCM f;
317 int i = 0;
318
319 for (f = freelistp->cells; SCM_NIMP (f); f = SCM_CDR (f), i++)
320 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
321 {
322 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
323 scm_newcell_count, i);
324 fflush (stderr);
325 abort ();
326 }
327 }
328
329 static int scm_debug_check_freelist = 0;
330
331 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
332 (SCM flag),
333 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
334 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
335 "compile-time flag was selected.\n")
336 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
337 {
338 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
339 return SCM_UNSPECIFIED;
340 }
341 #undef FUNC_NAME
342
343
344 #ifdef GUILE_NEW_GC_SCHEME
345
346 SCM
347 scm_debug_newcell (void)
348 {
349 SCM new;
350
351 scm_newcell_count++;
352 if (scm_debug_check_freelist)
353 {
354 scm_check_freelist (&scm_master_freelist);
355 scm_gc();
356 }
357
358 /* The rest of this is supposed to be identical to the SCM_NEWCELL
359 macro. */
360 if (SCM_IMP (scm_freelist))
361 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
362 else
363 {
364 new = scm_freelist;
365 scm_freelist = SCM_CDR (scm_freelist);
366 SCM_SETCAR (new, scm_tc16_allocated);
367 }
368
369 return new;
370 }
371
372 SCM
373 scm_debug_newcell2 (void)
374 {
375 SCM new;
376
377 scm_newcell2_count++;
378 if (scm_debug_check_freelist)
379 {
380 scm_check_freelist (&scm_master_freelist2);
381 scm_gc ();
382 }
383
384 /* The rest of this is supposed to be identical to the SCM_NEWCELL
385 macro. */
386 if (SCM_IMP (scm_freelist2))
387 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
388 else
389 {
390 new = scm_freelist2;
391 scm_freelist2 = SCM_CDR (scm_freelist2);
392 SCM_SETCAR (new, scm_tc16_allocated);
393 }
394
395 return new;
396 }
397
398 #else /* GUILE_NEW_GC_SCHEME */
399
400 SCM
401 scm_debug_newcell (void)
402 {
403 SCM new;
404
405 scm_newcell_count++;
406 if (scm_debug_check_freelist)
407 {
408 scm_check_freelist (&scm_freelist);
409 scm_gc();
410 }
411
412 /* The rest of this is supposed to be identical to the SCM_NEWCELL
413 macro. */
414 if (SCM_IMP (scm_freelist.cells))
415 new = scm_gc_for_newcell (&scm_freelist);
416 else
417 {
418 new = scm_freelist.cells;
419 scm_freelist.cells = SCM_CDR (scm_freelist.cells);
420 SCM_SETCAR (new, scm_tc16_allocated);
421 ++scm_cells_allocated;
422 }
423
424 return new;
425 }
426
427 SCM
428 scm_debug_newcell2 (void)
429 {
430 SCM new;
431
432 scm_newcell2_count++;
433 if (scm_debug_check_freelist) {
434 scm_check_freelist (&scm_freelist2);
435 scm_gc();
436 }
437
438 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
439 macro. */
440 if (SCM_IMP (scm_freelist2.cells))
441 new = scm_gc_for_newcell (&scm_freelist2);
442 else
443 {
444 new = scm_freelist2.cells;
445 scm_freelist2.cells = SCM_CDR (scm_freelist2.cells);
446 SCM_SETCAR (new, scm_tc16_allocated);
447 scm_cells_allocated += 2;
448 }
449
450 return new;
451 }
452
453 #endif /* GUILE_NEW_GC_SCHEME */
454 #endif /* GUILE_DEBUG_FREELIST */
455
456 \f
457
458 /* {Scheme Interface to GC}
459 */
460
461 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
462 (),
463 "Returns an association list of statistics about Guile's current use of storage. ")
464 #define FUNC_NAME s_scm_gc_stats
465 {
466 int i;
467 int n;
468 SCM heap_segs;
469 long int local_scm_mtrigger;
470 long int local_scm_mallocated;
471 long int local_scm_heap_size;
472 long int local_scm_cells_allocated;
473 long int local_scm_gc_time_taken;
474 SCM answer;
475
476 SCM_DEFER_INTS;
477 scm_block_gc = 1;
478 retry:
479 heap_segs = SCM_EOL;
480 n = scm_n_heap_segs;
481 for (i = scm_n_heap_segs; i--; )
482 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
483 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
484 heap_segs);
485 if (scm_n_heap_segs != n)
486 goto retry;
487 scm_block_gc = 0;
488
489 /// ? ?? ?
490 local_scm_mtrigger = scm_mtrigger;
491 local_scm_mallocated = scm_mallocated;
492 #ifdef GUILE_NEW_GC_SCHEME
493 local_scm_heap_size = scm_master_freelist.heap_size; /*fixme*/
494 #else
495 local_scm_heap_size = scm_freelist.heap_size; /*fixme*/
496 #endif
497 local_scm_cells_allocated = scm_cells_allocated;
498 local_scm_gc_time_taken = scm_gc_time_taken;
499
500 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
501 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
502 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
503 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
504 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
505 scm_cons (sym_heap_segments, heap_segs),
506 SCM_UNDEFINED);
507 SCM_ALLOW_INTS;
508 return answer;
509 }
510 #undef FUNC_NAME
511
512
513 void
514 scm_gc_start (const char *what)
515 {
516 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
517 /* scm_gc_cells_collected = 0; */
518 scm_gc_malloc_collected = 0;
519 scm_gc_ports_collected = 0;
520 }
521
522 void
523 scm_gc_end ()
524 {
525 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
526 scm_gc_time_taken += scm_gc_rt;
527 scm_system_async_mark (scm_gc_async);
528 }
529
530
531 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
532 (SCM obj),
533 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
534 "returned by this function for @var{obj}")
535 #define FUNC_NAME s_scm_object_address
536 {
537 return scm_ulong2num ((unsigned long) obj);
538 }
539 #undef FUNC_NAME
540
541
542 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
543 (),
544 "Scans all of SCM objects and reclaims for further use those that are\n"
545 "no longer accessible.")
546 #define FUNC_NAME s_scm_gc
547 {
548 SCM_DEFER_INTS;
549 scm_igc ("call");
550 SCM_ALLOW_INTS;
551 return SCM_UNSPECIFIED;
552 }
553 #undef FUNC_NAME
554
555
556 \f
557 /* {C Interface For When GC is Triggered}
558 */
559
560 #ifdef GUILE_NEW_GC_SCHEME
561
562 /* When we get POSIX threads support, the master will be global and
563 common while the freelist will be individual for each thread. */
564
565 SCM
566 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
567 {
568 SCM cell;
569 ++scm_ints_disabled;
570 if (master->triggeredp)
571 scm_igc ("cells");
572 else if (SCM_NULLP (master->clusters))
573 alloc_some_heap (master);
574 else if (SCM_NULLP (SCM_CDR (master->clusters)))
575 /* we are satisfied; GC instead of alloc next time around */
576 master->triggeredp = 1;
577 --scm_ints_disabled;
578 cell = SCM_CAR (master->clusters);
579 master->clusters = SCM_CDR (master->clusters);
580 *freelist = SCM_CDR (cell);
581 SCM_SETCAR (cell, scm_tc16_allocated);
582 return cell;
583 }
584
585 #else /* GUILE_NEW_GC_SCHEME */
586
587 void
588 scm_gc_for_alloc (scm_freelist_t *freelistp)
589 {
590 SCM_REDEFER_INTS;
591 scm_igc ("cells");
592 #ifdef GUILE_DEBUG_FREELIST
593 fprintf (stderr, "Collected: %d, min_yield: %d\n",
594 freelistp->collected, MIN_GC_YIELD (freelistp));
595 #endif
596 if ((freelistp->collected < MIN_GC_YIELD (freelistp))
597 || SCM_IMP (freelistp->cells))
598 alloc_some_heap (freelistp);
599 SCM_REALLOW_INTS;
600 }
601
602
603 SCM
604 scm_gc_for_newcell (scm_freelist_t *freelistp)
605 {
606 SCM fl;
607 scm_gc_for_alloc (freelistp);
608 fl = freelistp->cells;
609 freelistp->cells = SCM_CDR (fl);
610 SCM_SETCAR (fl, scm_tc16_allocated);
611 return fl;
612 }
613
614 #endif /* GUILE_NEW_GC_SCHEME */
615
616 void
617 scm_igc (const char *what)
618 {
619 int j;
620
621 #ifdef USE_THREADS
622 /* During the critical section, only the current thread may run. */
623 SCM_THREAD_CRITICAL_SECTION_START;
624 #endif
625
626 /* fprintf (stderr, "gc: %s\n", what); */
627
628 scm_gc_start (what);
629
630 if (!scm_stack_base || scm_block_gc)
631 {
632 scm_gc_end ();
633 return;
634 }
635
636 if (scm_mallocated < 0)
637 /* The byte count of allocated objects has underflowed. This is
638 probably because you forgot to report the sizes of objects you
639 have allocated, by calling scm_done_malloc or some such. When
640 the GC freed them, it subtracted their size from
641 scm_mallocated, which underflowed. */
642 abort ();
643
644 if (scm_gc_heap_lock)
645 /* We've invoked the collector while a GC is already in progress.
646 That should never happen. */
647 abort ();
648
649 ++scm_gc_heap_lock;
650
651 scm_weak_vectors = SCM_EOL;
652
653 scm_guardian_gc_init ();
654
655 /* unprotect any struct types with no instances */
656 #if 0
657 {
658 SCM type_list;
659 SCM * pos;
660
661 pos = &scm_type_obj_list;
662 type_list = scm_type_obj_list;
663 while (type_list != SCM_EOL)
664 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
665 {
666 pos = SCM_CDRLOC (type_list);
667 type_list = SCM_CDR (type_list);
668 }
669 else
670 {
671 *pos = SCM_CDR (type_list);
672 type_list = SCM_CDR (type_list);
673 }
674 }
675 #endif
676
677 /* flush dead entries from the continuation stack */
678 {
679 int x;
680 int bound;
681 SCM * elts;
682 elts = SCM_VELTS (scm_continuation_stack);
683 bound = SCM_LENGTH (scm_continuation_stack);
684 x = SCM_INUM (scm_continuation_stack_ptr);
685 while (x < bound)
686 {
687 elts[x] = SCM_BOOL_F;
688 ++x;
689 }
690 }
691
692 #ifndef USE_THREADS
693
694 /* Protect from the C stack. This must be the first marking
695 * done because it provides information about what objects
696 * are "in-use" by the C code. "in-use" objects are those
697 * for which the values from SCM_LENGTH and SCM_CHARS must remain
698 * usable. This requirement is stricter than a liveness
699 * requirement -- in particular, it constrains the implementation
700 * of scm_vector_set_length_x.
701 */
702 SCM_FLUSH_REGISTER_WINDOWS;
703 /* This assumes that all registers are saved into the jmp_buf */
704 setjmp (scm_save_regs_gc_mark);
705 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
706 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
707 sizeof scm_save_regs_gc_mark)
708 / sizeof (SCM_STACKITEM)));
709
710 {
711 /* stack_len is long rather than scm_sizet in order to guarantee that
712 &stack_len is long aligned */
713 #ifdef SCM_STACK_GROWS_UP
714 #ifdef nosve
715 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
716 #else
717 long stack_len = scm_stack_size (scm_stack_base);
718 #endif
719 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
720 #else
721 #ifdef nosve
722 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
723 #else
724 long stack_len = scm_stack_size (scm_stack_base);
725 #endif
726 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
727 #endif
728 }
729
730 #else /* USE_THREADS */
731
732 /* Mark every thread's stack and registers */
733 scm_threads_mark_stacks ();
734
735 #endif /* USE_THREADS */
736
737 /* FIXME: insert a phase to un-protect string-data preserved
738 * in scm_vector_set_length_x.
739 */
740
741 j = SCM_NUM_PROTECTS;
742 while (j--)
743 scm_gc_mark (scm_sys_protects[j]);
744
745 /* FIXME: we should have a means to register C functions to be run
746 * in different phases of GC
747 */
748 scm_mark_subr_table ();
749
750 #ifndef USE_THREADS
751 scm_gc_mark (scm_root->handle);
752 #endif
753
754 scm_mark_weak_vector_spines ();
755
756 scm_guardian_zombify ();
757
758 scm_gc_sweep ();
759
760 --scm_gc_heap_lock;
761 scm_gc_end ();
762
763 #ifdef USE_THREADS
764 SCM_THREAD_CRITICAL_SECTION_END;
765 #endif
766 }
767
768 \f
769 /* {Mark/Sweep}
770 */
771
772
773
774 /* Mark an object precisely.
775 */
776 void
777 scm_gc_mark (SCM p)
778 {
779 register long i;
780 register SCM ptr;
781
782 ptr = p;
783
784 gc_mark_loop:
785 if (SCM_IMP (ptr))
786 return;
787
788 gc_mark_nimp:
789 if (SCM_NCELLP (ptr))
790 scm_wta (ptr, "rogue pointer in heap", NULL);
791
792 switch (SCM_TYP7 (ptr))
793 {
794 case scm_tcs_cons_nimcar:
795 if (SCM_GCMARKP (ptr))
796 break;
797 SCM_SETGCMARK (ptr);
798 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
799 {
800 ptr = SCM_CAR (ptr);
801 goto gc_mark_nimp;
802 }
803 scm_gc_mark (SCM_CAR (ptr));
804 ptr = SCM_GCCDR (ptr);
805 goto gc_mark_nimp;
806 case scm_tcs_cons_imcar:
807 if (SCM_GCMARKP (ptr))
808 break;
809 SCM_SETGCMARK (ptr);
810 ptr = SCM_GCCDR (ptr);
811 goto gc_mark_loop;
812 case scm_tc7_pws:
813 if (SCM_GCMARKP (ptr))
814 break;
815 SCM_SETGCMARK (ptr);
816 scm_gc_mark (SCM_CELL_WORD (ptr, 2));
817 ptr = SCM_GCCDR (ptr);
818 goto gc_mark_loop;
819 case scm_tcs_cons_gloc:
820 if (SCM_GCMARKP (ptr))
821 break;
822 SCM_SETGCMARK (ptr);
823 {
824 SCM vcell;
825 vcell = SCM_CAR (ptr) - 1L;
826 switch (SCM_UNPACK (SCM_CDR (vcell)))
827 {
828 default:
829 scm_gc_mark (vcell);
830 ptr = SCM_GCCDR (ptr);
831 goto gc_mark_loop;
832 case 1: /* ! */
833 case 0: /* ! */
834 {
835 SCM layout;
836 SCM * vtable_data;
837 int len;
838 char * fields_desc;
839 register SCM * mem;
840 register int x;
841
842 vtable_data = (SCM *)vcell;
843 layout = vtable_data[scm_vtable_index_layout];
844 len = SCM_LENGTH (layout);
845 fields_desc = SCM_CHARS (layout);
846 /* We're using SCM_GCCDR here like STRUCT_DATA, except
847 that it removes the mark */
848 mem = (SCM *)SCM_GCCDR (ptr);
849
850 if (SCM_UNPACK (vtable_data[scm_struct_i_flags]) & SCM_STRUCTF_ENTITY)
851 {
852 scm_gc_mark (mem[scm_struct_i_procedure]);
853 scm_gc_mark (mem[scm_struct_i_setter]);
854 }
855 if (len)
856 {
857 for (x = 0; x < len - 2; x += 2, ++mem)
858 if (fields_desc[x] == 'p')
859 scm_gc_mark (*mem);
860 if (fields_desc[x] == 'p')
861 {
862 int j;
863 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
864 for (j = (long int) *mem; x; --x)
865 scm_gc_mark (*++mem);
866 else
867 scm_gc_mark (*mem);
868 }
869 }
870 if (!SCM_CDR (vcell))
871 {
872 SCM_SETGCMARK (vcell);
873 ptr = vtable_data[scm_vtable_index_vtable];
874 goto gc_mark_loop;
875 }
876 }
877 }
878 }
879 break;
880 case scm_tcs_closures:
881 if (SCM_GCMARKP (ptr))
882 break;
883 SCM_SETGCMARK (ptr);
884 if (SCM_IMP (SCM_CDR (ptr)))
885 {
886 ptr = SCM_CLOSCAR (ptr);
887 goto gc_mark_nimp;
888 }
889 scm_gc_mark (SCM_CLOSCAR (ptr));
890 ptr = SCM_GCCDR (ptr);
891 goto gc_mark_nimp;
892 case scm_tc7_vector:
893 case scm_tc7_lvector:
894 #ifdef CCLO
895 case scm_tc7_cclo:
896 #endif
897 if (SCM_GC8MARKP (ptr))
898 break;
899 SCM_SETGC8MARK (ptr);
900 i = SCM_LENGTH (ptr);
901 if (i == 0)
902 break;
903 while (--i > 0)
904 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
905 scm_gc_mark (SCM_VELTS (ptr)[i]);
906 ptr = SCM_VELTS (ptr)[0];
907 goto gc_mark_loop;
908 case scm_tc7_contin:
909 if SCM_GC8MARKP
910 (ptr) break;
911 SCM_SETGC8MARK (ptr);
912 if (SCM_VELTS (ptr))
913 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
914 (scm_sizet)
915 (SCM_LENGTH (ptr) +
916 (sizeof (SCM_STACKITEM) + -1 +
917 sizeof (scm_contregs)) /
918 sizeof (SCM_STACKITEM)));
919 break;
920 #ifdef HAVE_ARRAYS
921 case scm_tc7_bvect:
922 case scm_tc7_byvect:
923 case scm_tc7_ivect:
924 case scm_tc7_uvect:
925 case scm_tc7_fvect:
926 case scm_tc7_dvect:
927 case scm_tc7_cvect:
928 case scm_tc7_svect:
929 #ifdef HAVE_LONG_LONGS
930 case scm_tc7_llvect:
931 #endif
932 #endif
933 case scm_tc7_string:
934 SCM_SETGC8MARK (ptr);
935 break;
936
937 case scm_tc7_substring:
938 if (SCM_GC8MARKP(ptr))
939 break;
940 SCM_SETGC8MARK (ptr);
941 ptr = SCM_CDR (ptr);
942 goto gc_mark_loop;
943
944 case scm_tc7_wvect:
945 if (SCM_GC8MARKP(ptr))
946 break;
947 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
948 scm_weak_vectors = ptr;
949 SCM_SETGC8MARK (ptr);
950 if (SCM_IS_WHVEC_ANY (ptr))
951 {
952 int x;
953 int len;
954 int weak_keys;
955 int weak_values;
956
957 len = SCM_LENGTH (ptr);
958 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
959 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
960
961 for (x = 0; x < len; ++x)
962 {
963 SCM alist;
964 alist = SCM_VELTS (ptr)[x];
965
966 /* mark everything on the alist except the keys or
967 * values, according to weak_values and weak_keys. */
968 while ( SCM_CONSP (alist)
969 && !SCM_GCMARKP (alist)
970 && SCM_CONSP (SCM_CAR (alist)))
971 {
972 SCM kvpair;
973 SCM next_alist;
974
975 kvpair = SCM_CAR (alist);
976 next_alist = SCM_CDR (alist);
977 /*
978 * Do not do this:
979 * SCM_SETGCMARK (alist);
980 * SCM_SETGCMARK (kvpair);
981 *
982 * It may be that either the key or value is protected by
983 * an escaped reference to part of the spine of this alist.
984 * If we mark the spine here, and only mark one or neither of the
985 * key and value, they may never be properly marked.
986 * This leads to a horrible situation in which an alist containing
987 * freelist cells is exported.
988 *
989 * So only mark the spines of these arrays last of all marking.
990 * If somebody confuses us by constructing a weak vector
991 * with a circular alist then we are hosed, but at least we
992 * won't prematurely drop table entries.
993 */
994 if (!weak_keys)
995 scm_gc_mark (SCM_CAR (kvpair));
996 if (!weak_values)
997 scm_gc_mark (SCM_GCCDR (kvpair));
998 alist = next_alist;
999 }
1000 if (SCM_NIMP (alist))
1001 scm_gc_mark (alist);
1002 }
1003 }
1004 break;
1005
1006 case scm_tc7_msymbol:
1007 if (SCM_GC8MARKP(ptr))
1008 break;
1009 SCM_SETGC8MARK (ptr);
1010 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1011 ptr = SCM_SYMBOL_PROPS (ptr);
1012 goto gc_mark_loop;
1013 case scm_tc7_ssymbol:
1014 if (SCM_GC8MARKP(ptr))
1015 break;
1016 SCM_SETGC8MARK (ptr);
1017 break;
1018 case scm_tcs_subrs:
1019 break;
1020 case scm_tc7_port:
1021 i = SCM_PTOBNUM (ptr);
1022 if (!(i < scm_numptob))
1023 goto def;
1024 if (SCM_GC8MARKP (ptr))
1025 break;
1026 SCM_SETGC8MARK (ptr);
1027 if (SCM_PTAB_ENTRY(ptr))
1028 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1029 if (scm_ptobs[i].mark)
1030 {
1031 ptr = (scm_ptobs[i].mark) (ptr);
1032 goto gc_mark_loop;
1033 }
1034 else
1035 return;
1036 break;
1037 case scm_tc7_smob:
1038 if (SCM_GC8MARKP (ptr))
1039 break;
1040 SCM_SETGC8MARK (ptr);
1041 switch (SCM_GCTYP16 (ptr))
1042 { /* should be faster than going through scm_smobs */
1043 case scm_tc_free_cell:
1044 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1045 case scm_tc16_allocated:
1046 case scm_tc16_big:
1047 case scm_tc16_real:
1048 case scm_tc16_complex:
1049 break;
1050 default:
1051 i = SCM_SMOBNUM (ptr);
1052 if (!(i < scm_numsmob))
1053 goto def;
1054 if (scm_smobs[i].mark)
1055 {
1056 ptr = (scm_smobs[i].mark) (ptr);
1057 goto gc_mark_loop;
1058 }
1059 else
1060 return;
1061 }
1062 break;
1063 default:
1064 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1065 }
1066 }
1067
1068
1069 /* Mark a Region Conservatively
1070 */
1071
1072 void
1073 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1074 {
1075 register long m = n;
1076 register int i, j;
1077 register SCM_CELLPTR ptr;
1078
1079 while (0 <= --m)
1080 if (SCM_CELLP (*(SCM **) (& x[m])))
1081 {
1082 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & x[m]));
1083 i = 0;
1084 j = scm_n_heap_segs - 1;
1085 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1086 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1087 {
1088 while (i <= j)
1089 {
1090 int seg_id;
1091 seg_id = -1;
1092 if ( (i == j)
1093 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1094 seg_id = i;
1095 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1096 seg_id = j;
1097 else
1098 {
1099 int k;
1100 k = (i + j) / 2;
1101 if (k == i)
1102 break;
1103 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1104 {
1105 j = k;
1106 ++i;
1107 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1108 continue;
1109 else
1110 break;
1111 }
1112 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1113 {
1114 i = k;
1115 --j;
1116 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1117 continue;
1118 else
1119 break;
1120 }
1121 }
1122 if ( !scm_heap_table[seg_id].valid
1123 || scm_heap_table[seg_id].valid (ptr,
1124 &scm_heap_table[seg_id]))
1125 scm_gc_mark (*(SCM *) & x[m]);
1126 break;
1127 }
1128
1129 }
1130 }
1131 }
1132
1133
1134 /* The following is a C predicate which determines if an SCM value can be
1135 regarded as a pointer to a cell on the heap. The code is duplicated
1136 from scm_mark_locations. */
1137
1138
1139 int
1140 scm_cellp (SCM value)
1141 {
1142 register int i, j;
1143 register SCM_CELLPTR ptr;
1144
1145 if SCM_CELLP (*(SCM **) (& value))
1146 {
1147 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & value));
1148 i = 0;
1149 j = scm_n_heap_segs - 1;
1150 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1151 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1152 {
1153 while (i <= j)
1154 {
1155 int seg_id;
1156 seg_id = -1;
1157 if ( (i == j)
1158 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1159 seg_id = i;
1160 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1161 seg_id = j;
1162 else
1163 {
1164 int k;
1165 k = (i + j) / 2;
1166 if (k == i)
1167 break;
1168 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1169 {
1170 j = k;
1171 ++i;
1172 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1173 continue;
1174 else
1175 break;
1176 }
1177 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1178 {
1179 i = k;
1180 --j;
1181 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1182 continue;
1183 else
1184 break;
1185 }
1186 }
1187 if ( !scm_heap_table[seg_id].valid
1188 || scm_heap_table[seg_id].valid (ptr,
1189 &scm_heap_table[seg_id]))
1190 return 1;
1191 break;
1192 }
1193
1194 }
1195 }
1196 return 0;
1197 }
1198
1199
1200 static void
1201 scm_mark_weak_vector_spines ()
1202 {
1203 SCM w;
1204
1205 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1206 {
1207 if (SCM_IS_WHVEC_ANY (w))
1208 {
1209 SCM *ptr;
1210 SCM obj;
1211 int j;
1212 int n;
1213
1214 obj = w;
1215 ptr = SCM_VELTS (w);
1216 n = SCM_LENGTH (w);
1217 for (j = 0; j < n; ++j)
1218 {
1219 SCM alist;
1220
1221 alist = ptr[j];
1222 while ( SCM_CONSP (alist)
1223 && !SCM_GCMARKP (alist)
1224 && SCM_CONSP (SCM_CAR (alist)))
1225 {
1226 SCM_SETGCMARK (alist);
1227 SCM_SETGCMARK (SCM_CAR (alist));
1228 alist = SCM_GCCDR (alist);
1229 }
1230 }
1231 }
1232 }
1233 }
1234
1235
1236
1237 void
1238 scm_gc_sweep ()
1239 {
1240 register SCM_CELLPTR ptr;
1241 #ifdef SCM_POINTERS_MUNGED
1242 register SCM scmptr;
1243 #else
1244 #undef scmptr
1245 #define scmptr (SCM)ptr
1246 #endif
1247 register SCM nfreelist;
1248 register scm_freelist_t *hp_freelist;
1249 register long m;
1250 register int span;
1251 #ifdef GUILE_NEW_GC_SCHEME
1252 long n_objects;
1253 #endif
1254 long i;
1255 scm_sizet seg_size;
1256
1257 m = 0;
1258
1259 #ifdef GUILE_NEW_GC_SCHEME
1260 /* Reset all free list pointers. We'll reconstruct them completely
1261 while scanning. */
1262 for (i = 0; i < scm_n_heap_segs; i++)
1263 {
1264 scm_heap_table[i].freelistp->cells = SCM_EOL;
1265 scm_heap_table[i].freelistp->n_objects
1266 = scm_heap_table[i].freelistp->gc_trigger;
1267 scm_heap_table[i].freelistp->clusters = SCM_EOL;
1268 scm_heap_table[i].freelistp->clustertail
1269 = &scm_heap_table[i].freelistp->clusters;
1270 scm_heap_table[i].freelistp->triggeredp = 0;
1271 }
1272 #else
1273 /* Reset all free list pointers. We'll reconstruct them completely
1274 while scanning. */
1275 for (i = 0; i < scm_n_heap_segs; i++)
1276 scm_heap_table[i].freelistp->cells = SCM_EOL;
1277 #endif
1278
1279 for (i = 0; i < scm_n_heap_segs; i++)
1280 {
1281 register scm_sizet n = 0;
1282 register scm_sizet j;
1283
1284 /* Unmarked cells go onto the front of the freelist this heap
1285 segment points to. Rather than updating the real freelist
1286 pointer as we go along, we accumulate the new head in
1287 nfreelist. Then, if it turns out that the entire segment is
1288 free, we free (i.e., malloc's free) the whole segment, and
1289 simply don't assign nfreelist back into the real freelist. */
1290 hp_freelist = scm_heap_table[i].freelistp;
1291 nfreelist = hp_freelist->cells;
1292 #ifdef GUILE_NEW_GC_SCHEME
1293 n_objects = hp_freelist->n_objects;
1294 #endif
1295 span = scm_heap_table[i].span;
1296 hp_freelist->collected = 0;
1297
1298 ptr = CELL_UP (scm_heap_table[i].bounds[0]);
1299 seg_size = CELL_DN (scm_heap_table[i].bounds[1]) - ptr;
1300 for (j = seg_size + span; j -= span; ptr += span)
1301 {
1302 #ifdef SCM_POINTERS_MUNGED
1303 scmptr = PTR2SCM (ptr);
1304 #endif
1305 switch SCM_TYP7 (scmptr)
1306 {
1307 case scm_tcs_cons_gloc:
1308 if (SCM_GCMARKP (scmptr))
1309 {
1310 if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1)
1311 SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0);
1312 goto cmrkcontinue;
1313 }
1314 {
1315 SCM vcell;
1316 vcell = SCM_CAR (scmptr) - 1L;
1317
1318 if ((SCM_CDR (vcell) == 0) || (SCM_UNPACK (SCM_CDR (vcell)) == 1))
1319 {
1320 scm_struct_free_t free
1321 = (scm_struct_free_t) ((SCM*) vcell)[scm_struct_i_free];
1322 m += free ((SCM *) vcell, (SCM *) SCM_GCCDR (scmptr));
1323 }
1324 }
1325 break;
1326 case scm_tcs_cons_imcar:
1327 case scm_tcs_cons_nimcar:
1328 case scm_tcs_closures:
1329 case scm_tc7_pws:
1330 if (SCM_GCMARKP (scmptr))
1331 goto cmrkcontinue;
1332 break;
1333 case scm_tc7_wvect:
1334 if (SCM_GC8MARKP (scmptr))
1335 {
1336 goto c8mrkcontinue;
1337 }
1338 else
1339 {
1340 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1341 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1342 break;
1343 }
1344
1345 case scm_tc7_vector:
1346 case scm_tc7_lvector:
1347 #ifdef CCLO
1348 case scm_tc7_cclo:
1349 #endif
1350 if (SCM_GC8MARKP (scmptr))
1351 goto c8mrkcontinue;
1352
1353 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1354 freechars:
1355 scm_must_free (SCM_CHARS (scmptr));
1356 /* SCM_SETCHARS(scmptr, 0);*/
1357 break;
1358 #ifdef HAVE_ARRAYS
1359 case scm_tc7_bvect:
1360 if SCM_GC8MARKP (scmptr)
1361 goto c8mrkcontinue;
1362 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1363 goto freechars;
1364 case scm_tc7_byvect:
1365 if SCM_GC8MARKP (scmptr)
1366 goto c8mrkcontinue;
1367 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1368 goto freechars;
1369 case scm_tc7_ivect:
1370 case scm_tc7_uvect:
1371 if SCM_GC8MARKP (scmptr)
1372 goto c8mrkcontinue;
1373 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1374 goto freechars;
1375 case scm_tc7_svect:
1376 if SCM_GC8MARKP (scmptr)
1377 goto c8mrkcontinue;
1378 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1379 goto freechars;
1380 #ifdef HAVE_LONG_LONGS
1381 case scm_tc7_llvect:
1382 if SCM_GC8MARKP (scmptr)
1383 goto c8mrkcontinue;
1384 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1385 goto freechars;
1386 #endif
1387 case scm_tc7_fvect:
1388 if SCM_GC8MARKP (scmptr)
1389 goto c8mrkcontinue;
1390 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1391 goto freechars;
1392 case scm_tc7_dvect:
1393 if SCM_GC8MARKP (scmptr)
1394 goto c8mrkcontinue;
1395 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1396 goto freechars;
1397 case scm_tc7_cvect:
1398 if SCM_GC8MARKP (scmptr)
1399 goto c8mrkcontinue;
1400 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1401 goto freechars;
1402 #endif
1403 case scm_tc7_substring:
1404 if (SCM_GC8MARKP (scmptr))
1405 goto c8mrkcontinue;
1406 break;
1407 case scm_tc7_string:
1408 if (SCM_GC8MARKP (scmptr))
1409 goto c8mrkcontinue;
1410 m += SCM_HUGE_LENGTH (scmptr) + 1;
1411 goto freechars;
1412 case scm_tc7_msymbol:
1413 if (SCM_GC8MARKP (scmptr))
1414 goto c8mrkcontinue;
1415 m += ( SCM_LENGTH (scmptr)
1416 + 1
1417 + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
1418 scm_must_free ((char *)SCM_SLOTS (scmptr));
1419 break;
1420 case scm_tc7_contin:
1421 if SCM_GC8MARKP (scmptr)
1422 goto c8mrkcontinue;
1423 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1424 if (SCM_VELTS (scmptr))
1425 goto freechars;
1426 case scm_tc7_ssymbol:
1427 if SCM_GC8MARKP(scmptr)
1428 goto c8mrkcontinue;
1429 break;
1430 case scm_tcs_subrs:
1431 continue;
1432 case scm_tc7_port:
1433 if SCM_GC8MARKP (scmptr)
1434 goto c8mrkcontinue;
1435 if SCM_OPENP (scmptr)
1436 {
1437 int k = SCM_PTOBNUM (scmptr);
1438 if (!(k < scm_numptob))
1439 goto sweeperr;
1440 /* Keep "revealed" ports alive. */
1441 if (scm_revealed_count (scmptr) > 0)
1442 continue;
1443 /* Yes, I really do mean scm_ptobs[k].free */
1444 /* rather than ftobs[k].close. .close */
1445 /* is for explicit CLOSE-PORT by user */
1446 m += (scm_ptobs[k].free) (scmptr);
1447 SCM_SETSTREAM (scmptr, 0);
1448 scm_remove_from_port_table (scmptr);
1449 scm_gc_ports_collected++;
1450 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1451 }
1452 break;
1453 case scm_tc7_smob:
1454 switch SCM_GCTYP16 (scmptr)
1455 {
1456 case scm_tc_free_cell:
1457 case scm_tc16_real:
1458 if SCM_GC8MARKP (scmptr)
1459 goto c8mrkcontinue;
1460 break;
1461 #ifdef SCM_BIGDIG
1462 case scm_tc16_big:
1463 if SCM_GC8MARKP (scmptr)
1464 goto c8mrkcontinue;
1465 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1466 goto freechars;
1467 #endif /* def SCM_BIGDIG */
1468 case scm_tc16_complex:
1469 if SCM_GC8MARKP (scmptr)
1470 goto c8mrkcontinue;
1471 m += 2 * sizeof (double);
1472 goto freechars;
1473 default:
1474 if SCM_GC8MARKP (scmptr)
1475 goto c8mrkcontinue;
1476
1477 {
1478 int k;
1479 k = SCM_SMOBNUM (scmptr);
1480 if (!(k < scm_numsmob))
1481 goto sweeperr;
1482 m += (scm_smobs[k].free) ((SCM) scmptr);
1483 break;
1484 }
1485 }
1486 break;
1487 default:
1488 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1489 }
1490 #if 0
1491 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1492 exit (2);
1493 #endif
1494 #ifndef GUILE_NEW_GC_SCHEME
1495 n += span;
1496 #else
1497 if (--n_objects < 0)
1498 {
1499 SCM_SETCAR (scmptr, nfreelist);
1500 *hp_freelist->clustertail = scmptr;
1501 hp_freelist->clustertail = SCM_CDRLOC (scmptr);
1502
1503 nfreelist = SCM_EOL;
1504 n_objects = hp_freelist->gc_trigger;
1505 n += span * (n_objects + 1);
1506 }
1507 else
1508 #endif
1509 {
1510 /* Stick the new cell on the front of nfreelist. It's
1511 critical that we mark this cell as freed; otherwise, the
1512 conservative collector might trace it as some other type
1513 of object. */
1514 SCM_SETCAR (scmptr, scm_tc_free_cell);
1515 SCM_SETCDR (scmptr, nfreelist);
1516 nfreelist = scmptr;
1517 }
1518
1519 continue;
1520 c8mrkcontinue:
1521 SCM_CLRGC8MARK (scmptr);
1522 continue;
1523 cmrkcontinue:
1524 SCM_CLRGCMARK (scmptr);
1525 }
1526 #ifdef GC_FREE_SEGMENTS
1527 if (n == seg_size)
1528 {
1529 register long j;
1530
1531 hp_freelist->heap_size -= seg_size;
1532 free ((char *) scm_heap_table[i].bounds[0]);
1533 scm_heap_table[i].bounds[0] = 0;
1534 for (j = i + 1; j < scm_n_heap_segs; j++)
1535 scm_heap_table[j - 1] = scm_heap_table[j];
1536 scm_n_heap_segs -= 1;
1537 i--; /* We need to scan the segment just moved. */
1538 }
1539 else
1540 #endif /* ifdef GC_FREE_SEGMENTS */
1541 {
1542 /* Update the real freelist pointer to point to the head of
1543 the list of free cells we've built for this segment. */
1544 hp_freelist->cells = nfreelist;
1545 #ifdef GUILE_NEW_GC_SCHEME
1546 hp_freelist->n_objects = n_objects;
1547 #endif
1548 }
1549
1550 #ifdef GUILE_NEW_GC_SCHEME
1551 j = span * (hp_freelist->gc_trigger - n_objects);
1552 /* sum up---if this is last turn for this freelist */
1553 hp_freelist->collected += n + j;
1554 n -= j; /* compensate for the sum up */
1555 #else
1556 hp_freelist->collected += n;
1557 #endif
1558 scm_cells_allocated += hp_freelist->heap_size - hp_freelist->collected;
1559
1560 #ifdef GUILE_DEBUG_FREELIST
1561 scm_check_freelist (hp_freelist);
1562 scm_map_free_list ();
1563 #endif
1564 }
1565
1566 #ifdef GUILE_NEW_GC_SCHEME
1567 for (i = 0; i < scm_n_heap_segs; i++)
1568 {
1569 scm_freelist_t *hp_freelist = scm_heap_table[i].freelistp;
1570 if (hp_freelist->gc_trigger - hp_freelist->n_objects > 1)
1571 {
1572 SCM c = hp_freelist->cells;
1573 SCM_SETCAR (c, SCM_CDR (c));
1574 SCM_SETCDR (c, SCM_EOL);
1575 *hp_freelist->clustertail = c;
1576 hp_freelist->n_objects = hp_freelist->gc_trigger;
1577 }
1578 else
1579 *hp_freelist->clustertail = SCM_EOL;
1580 #if 0
1581 fprintf (stderr, "%d:%d: ",
1582 i, scm_ilength (hp_freelist->clusters));
1583 {
1584 SCM ls, c = hp_freelist->clusters;
1585 int n;
1586 while (SCM_NNULLP (c))
1587 {
1588 ls = SCM_CAR (c);
1589 for (n = 0; SCM_NNULLP (ls); ls = SCM_CDR (ls))
1590 ++n;
1591 fprintf (stderr, "%d ", n);
1592 c = SCM_CDR (c);
1593 }
1594 fprintf (stderr, "\n");
1595 }
1596 #endif
1597 }
1598 #endif
1599
1600 /* Scan weak vectors. */
1601 {
1602 SCM *ptr, w;
1603 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1604 {
1605 if (!SCM_IS_WHVEC_ANY (w))
1606 {
1607 register long j, n;
1608
1609 ptr = SCM_VELTS (w);
1610 n = SCM_LENGTH (w);
1611 for (j = 0; j < n; ++j)
1612 if (SCM_FREEP (ptr[j]))
1613 ptr[j] = SCM_BOOL_F;
1614 }
1615 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1616 {
1617 SCM obj = w;
1618 register long n = SCM_LENGTH (w);
1619 register long j;
1620
1621 ptr = SCM_VELTS (w);
1622
1623 for (j = 0; j < n; ++j)
1624 {
1625 SCM * fixup;
1626 SCM alist;
1627 int weak_keys;
1628 int weak_values;
1629
1630 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1631 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1632
1633 fixup = ptr + j;
1634 alist = *fixup;
1635
1636 while ( SCM_CONSP (alist)
1637 && SCM_CONSP (SCM_CAR (alist)))
1638 {
1639 SCM key;
1640 SCM value;
1641
1642 key = SCM_CAAR (alist);
1643 value = SCM_CDAR (alist);
1644 if ( (weak_keys && SCM_FREEP (key))
1645 || (weak_values && SCM_FREEP (value)))
1646 {
1647 *fixup = SCM_CDR (alist);
1648 }
1649 else
1650 fixup = SCM_CDRLOC (alist);
1651 alist = SCM_CDR (alist);
1652 }
1653 }
1654 }
1655 }
1656 }
1657 scm_mallocated -= m;
1658 scm_gc_malloc_collected = m;
1659 }
1660
1661
1662 \f
1663
1664 /* {Front end to malloc}
1665 *
1666 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1667 *
1668 * These functions provide services comperable to malloc, realloc, and
1669 * free. They are for allocating malloced parts of scheme objects.
1670 * The primary purpose of the front end is to impose calls to gc.
1671 */
1672
1673 /* scm_must_malloc
1674 * Return newly malloced storage or throw an error.
1675 *
1676 * The parameter WHAT is a string for error reporting.
1677 * If the threshold scm_mtrigger will be passed by this
1678 * allocation, or if the first call to malloc fails,
1679 * garbage collect -- on the presumption that some objects
1680 * using malloced storage may be collected.
1681 *
1682 * The limit scm_mtrigger may be raised by this allocation.
1683 */
1684 void *
1685 scm_must_malloc (scm_sizet size, const char *what)
1686 {
1687 void *ptr;
1688 unsigned long nm = scm_mallocated + size;
1689
1690 if (nm <= scm_mtrigger)
1691 {
1692 SCM_SYSCALL (ptr = malloc (size));
1693 if (NULL != ptr)
1694 {
1695 scm_mallocated = nm;
1696 return ptr;
1697 }
1698 }
1699
1700 scm_igc (what);
1701
1702 nm = scm_mallocated + size;
1703 SCM_SYSCALL (ptr = malloc (size));
1704 if (NULL != ptr)
1705 {
1706 scm_mallocated = nm;
1707 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1708 if (nm > scm_mtrigger)
1709 scm_mtrigger = nm + nm / 2;
1710 else
1711 scm_mtrigger += scm_mtrigger / 2;
1712 }
1713 return ptr;
1714 }
1715
1716 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1717 return 0; /* never reached */
1718 }
1719
1720
1721 /* scm_must_realloc
1722 * is similar to scm_must_malloc.
1723 */
1724 void *
1725 scm_must_realloc (void *where,
1726 scm_sizet old_size,
1727 scm_sizet size,
1728 const char *what)
1729 {
1730 void *ptr;
1731 scm_sizet nm = scm_mallocated + size - old_size;
1732
1733 if (nm <= scm_mtrigger)
1734 {
1735 SCM_SYSCALL (ptr = realloc (where, size));
1736 if (NULL != ptr)
1737 {
1738 scm_mallocated = nm;
1739 return ptr;
1740 }
1741 }
1742
1743 scm_igc (what);
1744
1745 nm = scm_mallocated + size - old_size;
1746 SCM_SYSCALL (ptr = realloc (where, size));
1747 if (NULL != ptr)
1748 {
1749 scm_mallocated = nm;
1750 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1751 if (nm > scm_mtrigger)
1752 scm_mtrigger = nm + nm / 2;
1753 else
1754 scm_mtrigger += scm_mtrigger / 2;
1755 }
1756 return ptr;
1757 }
1758
1759 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1760 return 0; /* never reached */
1761 }
1762
1763 void
1764 scm_must_free (void *obj)
1765 {
1766 if (obj)
1767 free (obj);
1768 else
1769 scm_wta (SCM_INUM0, "already free", "");
1770 }
1771
1772 /* Announce that there has been some malloc done that will be freed
1773 * during gc. A typical use is for a smob that uses some malloced
1774 * memory but can not get it from scm_must_malloc (for whatever
1775 * reason). When a new object of this smob is created you call
1776 * scm_done_malloc with the size of the object. When your smob free
1777 * function is called, be sure to include this size in the return
1778 * value. */
1779
1780 void
1781 scm_done_malloc (long size)
1782 {
1783 scm_mallocated += size;
1784
1785 if (scm_mallocated > scm_mtrigger)
1786 {
1787 scm_igc ("foreign mallocs");
1788 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1789 {
1790 if (scm_mallocated > scm_mtrigger)
1791 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1792 else
1793 scm_mtrigger += scm_mtrigger / 2;
1794 }
1795 }
1796 }
1797
1798
1799 \f
1800
1801 /* {Heap Segments}
1802 *
1803 * Each heap segment is an array of objects of a particular size.
1804 * Every segment has an associated (possibly shared) freelist.
1805 * A table of segment records is kept that records the upper and
1806 * lower extents of the segment; this is used during the conservative
1807 * phase of gc to identify probably gc roots (because they point
1808 * into valid segments at reasonable offsets). */
1809
1810 /* scm_expmem
1811 * is true if the first segment was smaller than INIT_HEAP_SEG.
1812 * If scm_expmem is set to one, subsequent segment allocations will
1813 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1814 */
1815 int scm_expmem = 0;
1816
1817 /* scm_heap_org
1818 * is the lowest base address of any heap segment.
1819 */
1820 SCM_CELLPTR scm_heap_org;
1821
1822 struct scm_heap_seg_data * scm_heap_table = 0;
1823 int scm_n_heap_segs = 0;
1824
1825 /* init_heap_seg
1826 * initializes a new heap segment and return the number of objects it contains.
1827 *
1828 * The segment origin, segment size in bytes, and the span of objects
1829 * in cells are input parameters. The freelist is both input and output.
1830 *
1831 * This function presume that the scm_heap_table has already been expanded
1832 * to accomodate a new segment record.
1833 */
1834
1835
1836 static scm_sizet
1837 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelistp)
1838 {
1839 register SCM_CELLPTR ptr;
1840 #ifdef SCM_POINTERS_MUNGED
1841 register SCM scmptr;
1842 #else
1843 #undef scmptr
1844 #define scmptr ptr
1845 #endif
1846 SCM_CELLPTR seg_end;
1847 int new_seg_index;
1848 int n_new_cells;
1849 int span = freelistp->span;
1850
1851 if (seg_org == NULL)
1852 return 0;
1853
1854 ptr = seg_org;
1855
1856 size = (size / sizeof (scm_cell) / span) * span * sizeof (scm_cell);
1857
1858 /* Compute the ceiling on valid object pointers w/in this segment.
1859 */
1860 seg_end = CELL_DN ((char *) ptr + size);
1861
1862 /* Find the right place and insert the segment record.
1863 *
1864 */
1865 for (new_seg_index = 0;
1866 ( (new_seg_index < scm_n_heap_segs)
1867 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1868 new_seg_index++)
1869 ;
1870
1871 {
1872 int i;
1873 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1874 scm_heap_table[i] = scm_heap_table[i - 1];
1875 }
1876
1877 ++scm_n_heap_segs;
1878
1879 scm_heap_table[new_seg_index].valid = 0;
1880 scm_heap_table[new_seg_index].span = span;
1881 scm_heap_table[new_seg_index].freelistp = freelistp;
1882 scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
1883 scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
1884
1885
1886 /* Compute the least valid object pointer w/in this segment
1887 */
1888 ptr = CELL_UP (ptr);
1889
1890
1891 /*n_new_cells*/
1892 n_new_cells = seg_end - ptr;
1893
1894 #ifdef GUILE_NEW_GC_SCHEME
1895
1896 freelistp->heap_size += n_new_cells;
1897
1898 /* Partition objects in this segment into clusters
1899 */
1900 {
1901 SCM clusters;
1902 SCM *clusterp = &clusters;
1903 int trigger = span * freelistp->gc_trigger;
1904 int n, c = 0;
1905
1906 while (n_new_cells > span)
1907 {
1908 if (n_new_cells > trigger)
1909 n = span + trigger;
1910 else
1911 n = n_new_cells;
1912 n_new_cells -= n;
1913 n -= span;
1914 c += span;
1915
1916 *clusterp = PTR2SCM (ptr);
1917 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
1918 clusterp = SCM_CDRLOC (*clusterp);
1919
1920 ptr += span;
1921 seg_end = ptr + n;
1922 while (ptr < seg_end)
1923 {
1924 #ifdef SCM_POINTERS_MUNGED
1925 scmptr = PTR2SCM (ptr);
1926 #endif
1927 SCM_SETCAR (scmptr, scm_tc_free_cell);
1928 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
1929 ptr += span;
1930 }
1931 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
1932 }
1933
1934 /* Correction for cluster cells + spill */
1935 freelistp->heap_size -= c + n_new_cells;
1936
1937 /* Patch up the last cluster pointer in the segment
1938 * to join it to the input freelist.
1939 */
1940 *clusterp = freelistp->clusters;
1941 freelistp->clusters = clusters;
1942 }
1943
1944 #else /* GUILE_NEW_GC_SCHEME */
1945
1946 /* Prepend objects in this segment to the freelist.
1947 */
1948 while (ptr < seg_end)
1949 {
1950 #ifdef SCM_POINTERS_MUNGED
1951 scmptr = PTR2SCM (ptr);
1952 #endif
1953 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
1954 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
1955 ptr += span;
1956 }
1957
1958 ptr -= span;
1959
1960 /* Patch up the last freelist pointer in the segment
1961 * to join it to the input freelist.
1962 */
1963 SCM_SETCDR (PTR2SCM (ptr), freelistp->cells);
1964 freelistp->cells = PTR2SCM (CELL_UP (seg_org));
1965
1966 freelistp->heap_size += n_new_cells;
1967
1968 #endif /* GUILE_NEW_GC_SCHEME */
1969
1970 return size;
1971 #ifdef scmptr
1972 #undef scmptr
1973 #endif
1974 }
1975
1976
1977 static void
1978 alloc_some_heap (scm_freelist_t *freelistp)
1979 {
1980 struct scm_heap_seg_data * tmptable;
1981 SCM_CELLPTR ptr;
1982 scm_sizet len;
1983
1984 /* Critical code sections (such as the garbage collector)
1985 * aren't supposed to add heap segments.
1986 */
1987 if (scm_gc_heap_lock)
1988 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
1989
1990 /* Expand the heap tables to have room for the new segment.
1991 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1992 * only if the allocation of the segment itself succeeds.
1993 */
1994 len = (1 + scm_n_heap_segs) * sizeof (struct scm_heap_seg_data);
1995
1996 SCM_SYSCALL (tmptable = ((struct scm_heap_seg_data *)
1997 realloc ((char *)scm_heap_table, len)));
1998 if (!tmptable)
1999 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
2000 else
2001 scm_heap_table = tmptable;
2002
2003
2004 /* Pick a size for the new heap segment.
2005 * The rule for picking the size of a segment is explained in
2006 * gc.h
2007 */
2008 if (scm_expmem)
2009 {
2010 len = (scm_sizet) SCM_EXPHEAP (freelistp->heap_size * sizeof (scm_cell));
2011 if ((scm_sizet) SCM_EXPHEAP (freelistp->heap_size * sizeof (scm_cell))
2012 != len)
2013 len = 0;
2014 }
2015 else
2016 len = SCM_HEAP_SEG_SIZE;
2017
2018 {
2019 scm_sizet smallest;
2020
2021 smallest = (freelistp->span * sizeof (scm_cell));
2022 if (len < smallest)
2023 len = (freelistp->span * sizeof (scm_cell));
2024
2025 /* Allocate with decaying ambition. */
2026 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2027 && (len >= smallest))
2028 {
2029 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (len));
2030 if (ptr)
2031 {
2032 init_heap_seg (ptr, len, freelistp);
2033 return;
2034 }
2035 len /= 2;
2036 }
2037 }
2038
2039 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2040 }
2041
2042
2043
2044 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2045 (SCM name),
2046 "")
2047 #define FUNC_NAME s_scm_unhash_name
2048 {
2049 int x;
2050 int bound;
2051 SCM_VALIDATE_SYMBOL (1,name);
2052 SCM_DEFER_INTS;
2053 bound = scm_n_heap_segs;
2054 for (x = 0; x < bound; ++x)
2055 {
2056 SCM_CELLPTR p;
2057 SCM_CELLPTR pbound;
2058 p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
2059 pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
2060 while (p < pbound)
2061 {
2062 SCM incar;
2063 incar = p->car;
2064 if (1 == (7 & (int)incar))
2065 {
2066 --incar;
2067 if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
2068 && (SCM_CDR (incar) != 0)
2069 && (SCM_UNPACK (SCM_CDR (incar)) != 1))
2070 {
2071 p->car = name;
2072 }
2073 }
2074 ++p;
2075 }
2076 }
2077 SCM_ALLOW_INTS;
2078 return name;
2079 }
2080 #undef FUNC_NAME
2081
2082
2083 \f
2084 /* {GC Protection Helper Functions}
2085 */
2086
2087
2088 void
2089 scm_remember (SCM *ptr)
2090 { /* empty */ }
2091
2092
2093 /*
2094 These crazy functions prevent garbage collection
2095 of arguments after the first argument by
2096 ensuring they remain live throughout the
2097 function because they are used in the last
2098 line of the code block.
2099 It'd be better to have a nice compiler hint to
2100 aid the conservative stack-scanning GC. --03/09/00 gjb */
2101 SCM
2102 scm_return_first (SCM elt, ...)
2103 {
2104 return elt;
2105 }
2106
2107 int
2108 scm_return_first_int (int i, ...)
2109 {
2110 return i;
2111 }
2112
2113
2114 SCM
2115 scm_permanent_object (SCM obj)
2116 {
2117 SCM_REDEFER_INTS;
2118 scm_permobjs = scm_cons (obj, scm_permobjs);
2119 SCM_REALLOW_INTS;
2120 return obj;
2121 }
2122
2123
2124 /* Protect OBJ from the garbage collector. OBJ will not be freed,
2125 even if all other references are dropped, until someone applies
2126 scm_unprotect_object to it. This function returns OBJ.
2127
2128 Calls to scm_protect_object nest. For every object OBJ, there is a
2129 counter which scm_protect_object(OBJ) increments and
2130 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
2131 an object's counter is greater than zero, the garbage collector
2132 will not free it.
2133
2134 Of course, that's not how it's implemented. scm_protect_object and
2135 scm_unprotect_object just maintain a list of references to things.
2136 Since the GC knows about this list, all objects it mentions stay
2137 alive. scm_protect_object adds its argument to the list;
2138 scm_unprotect_object removes the first occurrence of its argument
2139 to the list. */
2140 SCM
2141 scm_protect_object (SCM obj)
2142 {
2143 scm_protects = scm_cons (obj, scm_protects);
2144
2145 return obj;
2146 }
2147
2148
2149 /* Remove any protection for OBJ established by a prior call to
2150 scm_protect_object. This function returns OBJ.
2151
2152 See scm_protect_object for more information. */
2153 SCM
2154 scm_unprotect_object (SCM obj)
2155 {
2156 SCM *tail_ptr = &scm_protects;
2157
2158 while (SCM_CONSP (*tail_ptr))
2159 if (SCM_CAR (*tail_ptr) == obj)
2160 {
2161 *tail_ptr = SCM_CDR (*tail_ptr);
2162 break;
2163 }
2164 else
2165 tail_ptr = SCM_CDRLOC (*tail_ptr);
2166
2167 return obj;
2168 }
2169
2170 int terminating;
2171
2172 /* called on process termination. */
2173 #ifdef HAVE_ATEXIT
2174 static void
2175 cleanup (void)
2176 #else
2177 #ifdef HAVE_ON_EXIT
2178 extern int on_exit (void (*procp) (), int arg);
2179
2180 static void
2181 cleanup (int status, void *arg)
2182 #else
2183 #error Dont know how to setup a cleanup handler on your system.
2184 #endif
2185 #endif
2186 {
2187 terminating = 1;
2188 scm_flush_all_ports ();
2189 }
2190
2191 \f
2192 static int
2193 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelistp)
2194 {
2195 if (0L == init_heap_size)
2196 init_heap_size = SCM_INIT_HEAP_SIZE;
2197 if (!init_heap_seg ((SCM_CELLPTR) malloc (init_heap_size),
2198 init_heap_size,
2199 freelistp))
2200 {
2201 init_heap_size = SCM_HEAP_SEG_SIZE;
2202 if (!init_heap_seg ((SCM_CELLPTR) malloc (init_heap_size),
2203 init_heap_size,
2204 freelistp))
2205 return 1;
2206 }
2207 else
2208 scm_expmem = 1;
2209
2210 return 0;
2211 }
2212
2213 \f
2214 #ifdef GUILE_NEW_GC_SCHEME
2215 int
2216 scm_init_storage (scm_sizet init_heap_size, int gc_trigger,
2217 scm_sizet init_heap2_size, int gc_trigger2)
2218 #else
2219 int
2220 scm_init_storage (scm_sizet init_heap_size, scm_sizet init_heap2_size)
2221 #endif
2222 {
2223 scm_sizet j;
2224
2225 j = SCM_NUM_PROTECTS;
2226 while (j)
2227 scm_sys_protects[--j] = SCM_BOOL_F;
2228 scm_block_gc = 1;
2229
2230 #ifdef GUILE_NEW_GC_SCHEME
2231 scm_freelist = SCM_EOL;
2232 scm_master_freelist.clusters = SCM_EOL;
2233 scm_master_freelist.triggeredp = 0;
2234 scm_master_freelist.gc_trigger
2235 = gc_trigger ? gc_trigger : SCM_GC_TRIGGER;
2236 scm_master_freelist.span = 1;
2237 scm_master_freelist.collected = 0;
2238 scm_master_freelist.heap_size = 0;
2239 #else
2240 scm_freelist.cells = SCM_EOL;
2241 scm_freelist.span = 1;
2242 scm_freelist.collected = 0;
2243 scm_freelist.heap_size = 0;
2244 #endif
2245
2246 #ifdef GUILE_NEW_GC_SCHEME
2247 scm_freelist2 = SCM_EOL;
2248 scm_master_freelist2.clusters = SCM_EOL;
2249 scm_master_freelist2.triggeredp = 0;
2250 scm_master_freelist2.gc_trigger
2251 = gc_trigger2 ? gc_trigger2 : SCM_GC_TRIGGER2;
2252 scm_master_freelist2.span = 2;
2253 scm_master_freelist2.collected = 0;
2254 scm_master_freelist2.heap_size = 0;
2255 #else
2256 scm_freelist2.cells = SCM_EOL;
2257 scm_freelist2.span = 2;
2258 scm_freelist2.collected = 0;
2259 scm_freelist2.heap_size = 0;
2260 #endif
2261
2262 scm_expmem = 0;
2263
2264 j = SCM_HEAP_SEG_SIZE;
2265 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2266 scm_heap_table = ((struct scm_heap_seg_data *)
2267 scm_must_malloc (sizeof (struct scm_heap_seg_data) * 2, "hplims"));
2268
2269 #ifdef GUILE_NEW_GC_SCHEME
2270 if (make_initial_segment (init_heap_size, &scm_master_freelist) ||
2271 make_initial_segment (init_heap2_size, &scm_master_freelist2))
2272 return 1;
2273 #else
2274 if (make_initial_segment (init_heap_size, &scm_freelist) ||
2275 make_initial_segment (init_heap2_size, &scm_freelist2))
2276 return 1;
2277 #endif
2278
2279 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0]);
2280
2281 /* scm_hplims[0] can change. do not remove scm_heap_org */
2282 scm_weak_vectors = SCM_EOL;
2283
2284 /* Initialise the list of ports. */
2285 scm_port_table = (scm_port **)
2286 malloc (sizeof (scm_port *) * scm_port_table_room);
2287 if (!scm_port_table)
2288 return 1;
2289
2290 #ifdef HAVE_ATEXIT
2291 atexit (cleanup);
2292 #else
2293 #ifdef HAVE_ON_EXIT
2294 on_exit (cleanup, 0);
2295 #endif
2296 #endif
2297
2298 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2299 SCM_SETCDR (scm_undefineds, scm_undefineds);
2300
2301 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2302 scm_nullstr = scm_makstr (0L, 0);
2303 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2304 scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2305 scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim));
2306 scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2307 scm_stand_in_procs = SCM_EOL;
2308 scm_permobjs = SCM_EOL;
2309 scm_protects = SCM_EOL;
2310 scm_asyncs = SCM_EOL;
2311 scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2312 scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2313 #ifdef SCM_BIGDIG
2314 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2315 #endif
2316 return 0;
2317 }
2318 \f
2319
2320 void
2321 scm_init_gc ()
2322 {
2323 #include "gc.x"
2324 }