1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
52 #include "guardians.h"
74 #define var_start(x, y) va_start(x, y)
77 #define var_start(x, y) va_start(x)
81 /* {heap tuning parameters}
83 * These are parameters for controlling memory allocation. The heap
84 * is the area out of which scm_cons, and object headers are allocated.
86 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
87 * 64 bit machine. The units of the _SIZE parameters are bytes.
88 * Cons pairs and object headers occupy one heap cell.
90 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
91 * allocated initially the heap will grow by half its current size
92 * each subsequent time more heap is needed.
94 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
95 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
96 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
97 * is in scm_init_storage() and alloc_some_heap() in sys.c
99 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
100 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
102 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
105 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
108 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
109 * reclaimed by a GC triggered by must_malloc. If less than this is
110 * reclaimed, the trigger threshold is raised. [I don't know what a
111 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
112 * work around a oscillation that caused almost constant GC.]
115 #define SCM_INIT_HEAP_SIZE_1 (40000L * sizeof (scm_cell))
116 #define SCM_CLUSTER_SIZE_1 2000L
117 #define SCM_GC_TRIGGER_1 -50
119 #define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
120 #define SCM_CLUSTER_SIZE_2 1000L
121 /* The following value may seem large, but note that if we get to GC at
122 * all, this means that we have a numerically intensive application
124 #define SCM_GC_TRIGGER_2 -50
126 #define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
128 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
130 # define SCM_HEAP_SEG_SIZE 32768L
133 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
135 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
138 /* Make heap grow with factor 1.5 */
139 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
140 #define SCM_INIT_MALLOC_LIMIT 100000
141 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
143 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
144 bounds for allocated storage */
147 /*in 386 protected mode we must only adjust the offset */
148 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
149 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
152 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
153 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
155 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
156 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
159 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
160 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
167 typedef struct scm_freelist_t
{
168 /* collected cells */
170 #ifdef GUILE_NEW_GC_SCHEME
171 /* number of cells left to collect before cluster is full */
172 unsigned int left_to_collect
;
173 /* a list of freelists, each of size gc_trigger,
174 except the last one which may be shorter */
177 /* this is the number of cells in each cluster, including the spine cell */
179 /* set to grow the heap when we run out of clusters
182 /* minimum number of objects allocated before GC is triggered
185 /* defines gc_trigger as percent of heap size
186 * 0 => constant trigger
188 int gc_trigger_fraction
;
190 /* number of cells per object on this list */
192 /* number of collected cells during last GC */
194 /* total number of cells in heap segments
195 * belonging to this list.
200 #ifdef GUILE_NEW_GC_SCHEME
201 SCM scm_freelist
= SCM_EOL
;
202 scm_freelist_t scm_master_freelist
= {
203 SCM_EOL
, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_1
, 0, 0, 0, 1, 0, 0
205 SCM scm_freelist2
= SCM_EOL
;
206 scm_freelist_t scm_master_freelist2
= {
207 SCM_EOL
, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_2
, 0, 0, 0, 2, 0, 0
210 scm_freelist_t scm_freelist
= { SCM_EOL
, 1, 0, 0 };
211 scm_freelist_t scm_freelist2
= { SCM_EOL
, 2, 0, 0 };
215 * is the number of bytes of must_malloc allocation needed to trigger gc.
217 unsigned long scm_mtrigger
;
221 * If set, don't expand the heap. Set only during gc, during which no allocation
222 * is supposed to take place anyway.
224 int scm_gc_heap_lock
= 0;
227 * Don't pause for collection if this is set -- just
231 int scm_block_gc
= 1;
233 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
234 * collection (GC) more space is allocated for the heap.
236 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
238 /* During collection, this accumulates objects holding
241 SCM scm_weak_vectors
;
243 /* GC Statistics Keeping
245 unsigned long scm_cells_allocated
= 0;
246 long scm_mallocated
= 0;
247 /* unsigned long scm_gc_cells_collected; */
248 unsigned long scm_gc_malloc_collected
;
249 unsigned long scm_gc_ports_collected
;
250 unsigned long scm_gc_rt
;
251 unsigned long scm_gc_time_taken
= 0;
253 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
254 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
255 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
256 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
257 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
258 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
260 typedef struct scm_heap_seg_data_t
262 /* lower and upper bounds of the segment */
263 SCM_CELLPTR bounds
[2];
265 /* address of the head-of-freelist pointer for this segment's cells.
266 All segments usually point to the same one, scm_freelist. */
267 scm_freelist_t
*freelist
;
269 /* number of SCM words per object in this segment */
272 /* If SEG_DATA->valid is non-zero, the conservative marking
273 functions will apply SEG_DATA->valid to the purported pointer and
274 SEG_DATA, and mark the object iff the function returns non-zero.
275 At the moment, I don't think anyone uses this. */
277 } scm_heap_seg_data_t
;
282 static void scm_mark_weak_vector_spines (void);
283 static scm_sizet
init_heap_seg (SCM_CELLPTR
, scm_sizet
, scm_freelist_t
*);
284 static void alloc_some_heap (scm_freelist_t
*);
288 /* Debugging functions. */
290 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
292 /* Return the number of the heap segment containing CELL. */
298 for (i
= 0; i
< scm_n_heap_segs
; i
++)
299 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], (SCM_CELLPTR
) cell
)
300 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], (SCM_CELLPTR
) cell
))
302 fprintf (stderr
, "which_seg: can't find segment containing cell %lx\n",
308 #ifdef GUILE_NEW_GC_SCHEME
310 map_free_list (scm_freelist_t
*master
, SCM freelist
)
312 int last_seg
= -1, count
= 0;
315 for (f
= freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
))
317 int this_seg
= which_seg (f
);
319 if (this_seg
!= last_seg
)
322 fprintf (stderr
, " %5d %d-cells in segment %d\n",
323 count
, master
->span
, last_seg
);
330 fprintf (stderr
, " %5d %d-cells in segment %d\n",
331 count
, master
->span
, last_seg
);
335 map_free_list (scm_freelist_t
*freelist
)
337 int last_seg
= -1, count
= 0;
340 for (f
= freelist
->cells
; SCM_NIMP (f
); f
= SCM_CDR (f
))
342 int this_seg
= which_seg (f
);
344 if (this_seg
!= last_seg
)
347 fprintf (stderr
, " %5d %d-cells in segment %d\n",
348 count
, freelist
->span
, last_seg
);
355 fprintf (stderr
, " %5d %d-cells in segment %d\n",
356 count
, freelist
->span
, last_seg
);
360 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
362 "Print debugging information about the free-list.\n"
363 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
364 #define FUNC_NAME s_scm_map_free_list
367 fprintf (stderr
, "%d segments total (%d:%d",
369 scm_heap_table
[0].span
,
370 scm_heap_table
[0].bounds
[1] - scm_heap_table
[0].bounds
[0]);
371 for (i
= 1; i
< scm_n_heap_segs
; i
++)
372 fprintf (stderr
, ", %d:%d",
373 scm_heap_table
[i
].span
,
374 scm_heap_table
[i
].bounds
[1] - scm_heap_table
[i
].bounds
[0]);
375 fprintf (stderr
, ")\n");
376 #ifdef GUILE_NEW_GC_SCHEME
377 map_free_list (&scm_master_freelist
, scm_freelist
);
378 map_free_list (&scm_master_freelist2
, scm_freelist2
);
380 map_free_list (&scm_freelist
);
381 map_free_list (&scm_freelist2
);
385 return SCM_UNSPECIFIED
;
389 #ifdef GUILE_NEW_GC_SCHEME
390 static int last_cluster
;
391 static int last_size
;
394 free_list_length (char *title
, int i
, SCM freelist
)
398 for (ls
= freelist
; SCM_NNULLP (ls
); ls
= SCM_CDR (ls
))
399 if (SCM_UNPACK_CAR (ls
) == scm_tc_free_cell
)
403 fprintf (stderr
, "bad cell in %s at position %d\n", title
, n
);
410 if (last_cluster
== i
- 1)
411 fprintf (stderr
, "\t%d\n", last_size
);
413 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
416 fprintf (stderr
, "%s %d", title
, i
);
418 fprintf (stderr
, "%s\t%d\n", title
, n
);
426 free_list_lengths (char *title
, scm_freelist_t
*master
, SCM freelist
)
429 int i
= 0, len
, n
= 0;
430 fprintf (stderr
, "%s\n\n", title
);
431 n
+= free_list_length ("free list", -1, freelist
);
432 for (clusters
= master
->clusters
;
433 SCM_NNULLP (clusters
);
434 clusters
= SCM_CDR (clusters
))
436 len
= free_list_length ("cluster", i
++, SCM_CAR (clusters
));
439 if (last_cluster
== i
- 1)
440 fprintf (stderr
, "\t%d\n", last_size
);
442 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
443 fprintf (stderr
, "\ntotal %d objects\n\n", n
);
446 SCM_DEFINE (scm_free_list_length
, "free-list-length", 0, 0, 0,
448 "Print debugging information about the free-list.\n"
449 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
450 #define FUNC_NAME s_scm_free_list_length
452 free_list_lengths ("1-words", &scm_master_freelist
, scm_freelist
);
453 free_list_lengths ("2-words", &scm_master_freelist2
, scm_freelist2
);
454 return SCM_UNSPECIFIED
;
461 #ifdef GUILE_DEBUG_FREELIST
463 /* Number of calls to SCM_NEWCELL since startup. */
464 static unsigned long scm_newcell_count
;
465 static unsigned long scm_newcell2_count
;
467 /* Search freelist for anything that isn't marked as a free cell.
468 Abort if we find something. */
469 #ifdef GUILE_NEW_GC_SCHEME
471 scm_check_freelist (SCM freelist
)
476 for (f
= freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
), i
++)
477 if (SCM_CAR (f
) != (SCM
) scm_tc_free_cell
)
479 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
480 scm_newcell_count
, i
);
487 scm_check_freelist (scm_freelist_t
*freelist
)
492 for (f
= freelist
->cells
; SCM_NIMP (f
); f
= SCM_CDR (f
), i
++)
493 if (SCM_CAR (f
) != (SCM
) scm_tc_free_cell
)
495 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
496 scm_newcell_count
, i
);
503 static int scm_debug_check_freelist
= 0;
505 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
507 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
508 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
509 "compile-time flag was selected.\n")
510 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
512 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
513 return SCM_UNSPECIFIED
;
518 #ifdef GUILE_NEW_GC_SCHEME
521 scm_debug_newcell (void)
526 if (scm_debug_check_freelist
)
528 scm_check_freelist (scm_freelist
);
532 /* The rest of this is supposed to be identical to the SCM_NEWCELL
534 if (SCM_IMP (scm_freelist
))
535 new = scm_gc_for_newcell (&scm_master_freelist
, &scm_freelist
);
539 scm_freelist
= SCM_CDR (scm_freelist
);
540 SCM_SETCAR (new, scm_tc16_allocated
);
547 scm_debug_newcell2 (void)
551 scm_newcell2_count
++;
552 if (scm_debug_check_freelist
)
554 scm_check_freelist (scm_freelist2
);
558 /* The rest of this is supposed to be identical to the SCM_NEWCELL
560 if (SCM_IMP (scm_freelist2
))
561 new = scm_gc_for_newcell (&scm_master_freelist2
, &scm_freelist2
);
565 scm_freelist2
= SCM_CDR (scm_freelist2
);
566 SCM_SETCAR (new, scm_tc16_allocated
);
572 #else /* GUILE_NEW_GC_SCHEME */
575 scm_debug_newcell (void)
580 if (scm_debug_check_freelist
)
582 scm_check_freelist (&scm_freelist
);
586 /* The rest of this is supposed to be identical to the SCM_NEWCELL
588 if (SCM_IMP (scm_freelist
.cells
))
589 new = scm_gc_for_newcell (&scm_freelist
);
592 new = scm_freelist
.cells
;
593 scm_freelist
.cells
= SCM_CDR (scm_freelist
.cells
);
594 SCM_SETCAR (new, scm_tc16_allocated
);
595 ++scm_cells_allocated
;
602 scm_debug_newcell2 (void)
606 scm_newcell2_count
++;
607 if (scm_debug_check_freelist
) {
608 scm_check_freelist (&scm_freelist2
);
612 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
614 if (SCM_IMP (scm_freelist2
.cells
))
615 new = scm_gc_for_newcell (&scm_freelist2
);
618 new = scm_freelist2
.cells
;
619 scm_freelist2
.cells
= SCM_CDR (scm_freelist2
.cells
);
620 SCM_SETCAR (new, scm_tc16_allocated
);
621 scm_cells_allocated
+= 2;
627 #endif /* GUILE_NEW_GC_SCHEME */
628 #endif /* GUILE_DEBUG_FREELIST */
632 /* {Scheme Interface to GC}
635 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
637 "Returns an association list of statistics about Guile's current use of storage. ")
638 #define FUNC_NAME s_scm_gc_stats
643 long int local_scm_mtrigger
;
644 long int local_scm_mallocated
;
645 long int local_scm_heap_size
;
646 long int local_scm_cells_allocated
;
647 long int local_scm_gc_time_taken
;
655 for (i
= scm_n_heap_segs
; i
--; )
656 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
657 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
659 if (scm_n_heap_segs
!= n
)
664 local_scm_mtrigger
= scm_mtrigger
;
665 local_scm_mallocated
= scm_mallocated
;
666 #ifdef GUILE_NEW_GC_SCHEME
667 local_scm_heap_size
= scm_master_freelist
.heap_size
; /*fixme*/
669 local_scm_heap_size
= scm_freelist
.heap_size
; /*fixme*/
671 local_scm_cells_allocated
= scm_cells_allocated
;
672 local_scm_gc_time_taken
= scm_gc_time_taken
;
674 answer
= scm_listify (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
675 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
676 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
677 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
678 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
679 scm_cons (sym_heap_segments
, heap_segs
),
688 scm_gc_start (const char *what
)
690 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ());
691 /* scm_gc_cells_collected = 0; */
692 scm_gc_malloc_collected
= 0;
693 scm_gc_ports_collected
= 0;
699 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt
;
700 scm_gc_time_taken
+= scm_gc_rt
;
701 scm_system_async_mark (scm_gc_async
);
705 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
707 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
708 "returned by this function for @var{obj}")
709 #define FUNC_NAME s_scm_object_address
711 return scm_ulong2num ((unsigned long) obj
);
716 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
718 "Scans all of SCM objects and reclaims for further use those that are\n"
719 "no longer accessible.")
720 #define FUNC_NAME s_scm_gc
725 return SCM_UNSPECIFIED
;
731 /* {C Interface For When GC is Triggered}
734 #ifdef GUILE_NEW_GC_SCHEME
736 /* When we get POSIX threads support, the master will be global and
737 * common while the freelist will be individual for each thread.
741 scm_gc_for_newcell (scm_freelist_t
*master
, SCM
*freelist
)
747 if (SCM_NULLP (master
->clusters
))
749 if (master
->grow_heap_p
)
751 master
->grow_heap_p
= 0;
752 alloc_some_heap (master
);
757 cell
= SCM_CAR (master
->clusters
);
758 master
->clusters
= SCM_CDR (master
->clusters
);
760 while (SCM_NULLP (cell
));
762 *freelist
= SCM_CDR (cell
);
763 SCM_SETCAR (cell
, scm_tc16_allocated
);
768 /* This is a support routine which can be used to reserve a cluster
769 * for some special use, such as debugging. It won't be useful until
770 * free cells are preserved between garbage collections.
774 scm_alloc_cluster (scm_freelist_t
*master
)
777 cell
= scm_gc_for_newcell (master
, &freelist
);
778 SCM_SETCDR (cell
, freelist
);
783 #else /* GUILE_NEW_GC_SCHEME */
786 scm_gc_for_alloc (scm_freelist_t
*freelist
)
790 #ifdef GUILE_DEBUG_FREELIST
791 fprintf (stderr
, "Collected: %d, min_yield: %d\n",
792 freelist
->collected
, MIN_GC_YIELD (freelist
));
794 if ((freelist
->collected
< MIN_GC_YIELD (freelist
))
795 || SCM_IMP (freelist
->cells
))
796 alloc_some_heap (freelist
);
802 scm_gc_for_newcell (scm_freelist_t
*freelist
)
805 scm_gc_for_alloc (freelist
);
806 fl
= freelist
->cells
;
807 freelist
->cells
= SCM_CDR (fl
);
808 SCM_SETCAR (fl
, scm_tc16_allocated
);
812 #endif /* GUILE_NEW_GC_SCHEME */
815 scm_igc (const char *what
)
821 SCM_NULLP (scm_freelist
)
823 : (SCM_NULLP (scm_freelist2
) ? "o" : "m"));
826 /* During the critical section, only the current thread may run. */
827 SCM_THREAD_CRITICAL_SECTION_START
;
830 /* fprintf (stderr, "gc: %s\n", what); */
834 if (!scm_stack_base
|| scm_block_gc
)
840 if (scm_mallocated
< 0)
841 /* The byte count of allocated objects has underflowed. This is
842 probably because you forgot to report the sizes of objects you
843 have allocated, by calling scm_done_malloc or some such. When
844 the GC freed them, it subtracted their size from
845 scm_mallocated, which underflowed. */
848 if (scm_gc_heap_lock
)
849 /* We've invoked the collector while a GC is already in progress.
850 That should never happen. */
855 scm_weak_vectors
= SCM_EOL
;
857 scm_guardian_gc_init ();
859 /* unprotect any struct types with no instances */
865 pos
= &scm_type_obj_list
;
866 type_list
= scm_type_obj_list
;
867 while (type_list
!= SCM_EOL
)
868 if (SCM_VELTS (SCM_CAR (type_list
))[scm_struct_i_refcnt
])
870 pos
= SCM_CDRLOC (type_list
);
871 type_list
= SCM_CDR (type_list
);
875 *pos
= SCM_CDR (type_list
);
876 type_list
= SCM_CDR (type_list
);
881 /* flush dead entries from the continuation stack */
886 elts
= SCM_VELTS (scm_continuation_stack
);
887 bound
= SCM_LENGTH (scm_continuation_stack
);
888 x
= SCM_INUM (scm_continuation_stack_ptr
);
891 elts
[x
] = SCM_BOOL_F
;
898 /* Protect from the C stack. This must be the first marking
899 * done because it provides information about what objects
900 * are "in-use" by the C code. "in-use" objects are those
901 * for which the values from SCM_LENGTH and SCM_CHARS must remain
902 * usable. This requirement is stricter than a liveness
903 * requirement -- in particular, it constrains the implementation
904 * of scm_vector_set_length_x.
906 SCM_FLUSH_REGISTER_WINDOWS
;
907 /* This assumes that all registers are saved into the jmp_buf */
908 setjmp (scm_save_regs_gc_mark
);
909 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
910 ( (scm_sizet
) (sizeof (SCM_STACKITEM
) - 1 +
911 sizeof scm_save_regs_gc_mark
)
912 / sizeof (SCM_STACKITEM
)));
915 /* stack_len is long rather than scm_sizet in order to guarantee that
916 &stack_len is long aligned */
917 #ifdef SCM_STACK_GROWS_UP
919 long stack_len
= (SCM_STACKITEM
*) (&stack_len
) - scm_stack_base
;
921 long stack_len
= scm_stack_size (scm_stack_base
);
923 scm_mark_locations (scm_stack_base
, (scm_sizet
) stack_len
);
926 long stack_len
= scm_stack_base
- (SCM_STACKITEM
*) (&stack_len
);
928 long stack_len
= scm_stack_size (scm_stack_base
);
930 scm_mark_locations ((scm_stack_base
- stack_len
), (scm_sizet
) stack_len
);
934 #else /* USE_THREADS */
936 /* Mark every thread's stack and registers */
937 scm_threads_mark_stacks ();
939 #endif /* USE_THREADS */
941 /* FIXME: insert a phase to un-protect string-data preserved
942 * in scm_vector_set_length_x.
945 j
= SCM_NUM_PROTECTS
;
947 scm_gc_mark (scm_sys_protects
[j
]);
949 /* FIXME: we should have a means to register C functions to be run
950 * in different phases of GC
952 scm_mark_subr_table ();
955 scm_gc_mark (scm_root
->handle
);
958 scm_mark_weak_vector_spines ();
960 scm_guardian_zombify ();
968 SCM_THREAD_CRITICAL_SECTION_END
;
978 /* Mark an object precisely.
993 if (SCM_NCELLP (ptr
))
994 scm_wta (ptr
, "rogue pointer in heap", NULL
);
996 switch (SCM_TYP7 (ptr
))
998 case scm_tcs_cons_nimcar
:
999 if (SCM_GCMARKP (ptr
))
1001 SCM_SETGCMARK (ptr
);
1002 if (SCM_IMP (SCM_CDR (ptr
))) /* SCM_IMP works even with a GC mark */
1004 ptr
= SCM_CAR (ptr
);
1007 scm_gc_mark (SCM_CAR (ptr
));
1008 ptr
= SCM_GCCDR (ptr
);
1010 case scm_tcs_cons_imcar
:
1011 if (SCM_GCMARKP (ptr
))
1013 SCM_SETGCMARK (ptr
);
1014 ptr
= SCM_GCCDR (ptr
);
1017 if (SCM_GCMARKP (ptr
))
1019 SCM_SETGCMARK (ptr
);
1020 scm_gc_mark (SCM_CELL_WORD (ptr
, 2));
1021 ptr
= SCM_GCCDR (ptr
);
1023 case scm_tcs_cons_gloc
:
1024 if (SCM_GCMARKP (ptr
))
1026 SCM_SETGCMARK (ptr
);
1029 vcell
= SCM_CAR (ptr
) - 1L;
1030 switch (SCM_UNPACK (SCM_CDR (vcell
)))
1033 scm_gc_mark (vcell
);
1034 ptr
= SCM_GCCDR (ptr
);
1046 vtable_data
= (SCM
*)vcell
;
1047 layout
= vtable_data
[scm_vtable_index_layout
];
1048 len
= SCM_LENGTH (layout
);
1049 fields_desc
= SCM_CHARS (layout
);
1050 /* We're using SCM_GCCDR here like STRUCT_DATA, except
1051 that it removes the mark */
1052 mem
= (SCM
*)SCM_GCCDR (ptr
);
1054 if (SCM_UNPACK (vtable_data
[scm_struct_i_flags
]) & SCM_STRUCTF_ENTITY
)
1056 scm_gc_mark (mem
[scm_struct_i_procedure
]);
1057 scm_gc_mark (mem
[scm_struct_i_setter
]);
1061 for (x
= 0; x
< len
- 2; x
+= 2, ++mem
)
1062 if (fields_desc
[x
] == 'p')
1064 if (fields_desc
[x
] == 'p')
1067 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
1068 for (j
= (long int) *mem
; x
; --x
)
1069 scm_gc_mark (*++mem
);
1074 if (!SCM_CDR (vcell
))
1076 SCM_SETGCMARK (vcell
);
1077 ptr
= vtable_data
[scm_vtable_index_vtable
];
1084 case scm_tcs_closures
:
1085 if (SCM_GCMARKP (ptr
))
1087 SCM_SETGCMARK (ptr
);
1088 if (SCM_IMP (SCM_CDR (ptr
)))
1090 ptr
= SCM_CLOSCAR (ptr
);
1093 scm_gc_mark (SCM_CLOSCAR (ptr
));
1094 ptr
= SCM_GCCDR (ptr
);
1096 case scm_tc7_vector
:
1097 case scm_tc7_lvector
:
1101 if (SCM_GC8MARKP (ptr
))
1103 SCM_SETGC8MARK (ptr
);
1104 i
= SCM_LENGTH (ptr
);
1108 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
1109 scm_gc_mark (SCM_VELTS (ptr
)[i
]);
1110 ptr
= SCM_VELTS (ptr
)[0];
1112 case scm_tc7_contin
:
1115 SCM_SETGC8MARK (ptr
);
1116 if (SCM_VELTS (ptr
))
1117 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr
),
1120 (sizeof (SCM_STACKITEM
) + -1 +
1121 sizeof (scm_contregs
)) /
1122 sizeof (SCM_STACKITEM
)));
1126 case scm_tc7_byvect
:
1133 #ifdef HAVE_LONG_LONGS
1134 case scm_tc7_llvect
:
1137 case scm_tc7_string
:
1138 SCM_SETGC8MARK (ptr
);
1141 case scm_tc7_substring
:
1142 if (SCM_GC8MARKP(ptr
))
1144 SCM_SETGC8MARK (ptr
);
1145 ptr
= SCM_CDR (ptr
);
1149 if (SCM_GC8MARKP(ptr
))
1151 SCM_WVECT_GC_CHAIN (ptr
) = scm_weak_vectors
;
1152 scm_weak_vectors
= ptr
;
1153 SCM_SETGC8MARK (ptr
);
1154 if (SCM_IS_WHVEC_ANY (ptr
))
1161 len
= SCM_LENGTH (ptr
);
1162 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
1163 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
1165 for (x
= 0; x
< len
; ++x
)
1168 alist
= SCM_VELTS (ptr
)[x
];
1170 /* mark everything on the alist except the keys or
1171 * values, according to weak_values and weak_keys. */
1172 while ( SCM_CONSP (alist
)
1173 && !SCM_GCMARKP (alist
)
1174 && SCM_CONSP (SCM_CAR (alist
)))
1179 kvpair
= SCM_CAR (alist
);
1180 next_alist
= SCM_CDR (alist
);
1183 * SCM_SETGCMARK (alist);
1184 * SCM_SETGCMARK (kvpair);
1186 * It may be that either the key or value is protected by
1187 * an escaped reference to part of the spine of this alist.
1188 * If we mark the spine here, and only mark one or neither of the
1189 * key and value, they may never be properly marked.
1190 * This leads to a horrible situation in which an alist containing
1191 * freelist cells is exported.
1193 * So only mark the spines of these arrays last of all marking.
1194 * If somebody confuses us by constructing a weak vector
1195 * with a circular alist then we are hosed, but at least we
1196 * won't prematurely drop table entries.
1199 scm_gc_mark (SCM_CAR (kvpair
));
1201 scm_gc_mark (SCM_GCCDR (kvpair
));
1204 if (SCM_NIMP (alist
))
1205 scm_gc_mark (alist
);
1210 case scm_tc7_msymbol
:
1211 if (SCM_GC8MARKP(ptr
))
1213 SCM_SETGC8MARK (ptr
);
1214 scm_gc_mark (SCM_SYMBOL_FUNC (ptr
));
1215 ptr
= SCM_SYMBOL_PROPS (ptr
);
1217 case scm_tc7_ssymbol
:
1218 if (SCM_GC8MARKP(ptr
))
1220 SCM_SETGC8MARK (ptr
);
1225 i
= SCM_PTOBNUM (ptr
);
1226 if (!(i
< scm_numptob
))
1228 if (SCM_GC8MARKP (ptr
))
1230 SCM_SETGC8MARK (ptr
);
1231 if (SCM_PTAB_ENTRY(ptr
))
1232 scm_gc_mark (SCM_PTAB_ENTRY(ptr
)->file_name
);
1233 if (scm_ptobs
[i
].mark
)
1235 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1242 if (SCM_GC8MARKP (ptr
))
1244 SCM_SETGC8MARK (ptr
);
1245 switch (SCM_GCTYP16 (ptr
))
1246 { /* should be faster than going through scm_smobs */
1247 case scm_tc_free_cell
:
1248 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1249 case scm_tc16_allocated
:
1252 case scm_tc16_complex
:
1255 i
= SCM_SMOBNUM (ptr
);
1256 if (!(i
< scm_numsmob
))
1258 if (scm_smobs
[i
].mark
)
1260 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1268 def
:scm_wta (ptr
, "unknown type in ", "gc_mark");
1273 /* Mark a Region Conservatively
1277 scm_mark_locations (SCM_STACKITEM x
[], scm_sizet n
)
1279 register long m
= n
;
1281 register SCM_CELLPTR ptr
;
1284 if (SCM_CELLP (*(SCM
**) (& x
[m
])))
1286 ptr
= (SCM_CELLPTR
) SCM2PTR ((*(SCM
**) & x
[m
]));
1288 j
= scm_n_heap_segs
- 1;
1289 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1290 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1297 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
1299 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1307 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1311 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1316 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1320 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1326 if ( !scm_heap_table
[seg_id
].valid
1327 || scm_heap_table
[seg_id
].valid (ptr
,
1328 &scm_heap_table
[seg_id
]))
1329 if ( scm_heap_table
[seg_id
].span
== 1
1330 || SCM_DOUBLE_CELLP (*(SCM
**) (& x
[m
])))
1331 scm_gc_mark (*(SCM
*) & x
[m
]);
1340 /* The following is a C predicate which determines if an SCM value can be
1341 regarded as a pointer to a cell on the heap. The code is duplicated
1342 from scm_mark_locations. */
1346 scm_cellp (SCM value
)
1349 register SCM_CELLPTR ptr
;
1351 if SCM_CELLP (*(SCM
**) (& value
))
1353 ptr
= (SCM_CELLPTR
) SCM2PTR ((*(SCM
**) & value
));
1355 j
= scm_n_heap_segs
- 1;
1356 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1357 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1364 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
1366 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1374 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1378 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1383 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1387 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1393 if ( !scm_heap_table
[seg_id
].valid
1394 || scm_heap_table
[seg_id
].valid (ptr
,
1395 &scm_heap_table
[seg_id
]))
1407 scm_mark_weak_vector_spines ()
1411 for (w
= scm_weak_vectors
; w
!= SCM_EOL
; w
= SCM_WVECT_GC_CHAIN (w
))
1413 if (SCM_IS_WHVEC_ANY (w
))
1421 ptr
= SCM_VELTS (w
);
1423 for (j
= 0; j
< n
; ++j
)
1428 while ( SCM_CONSP (alist
)
1429 && !SCM_GCMARKP (alist
)
1430 && SCM_CONSP (SCM_CAR (alist
)))
1432 SCM_SETGCMARK (alist
);
1433 SCM_SETGCMARK (SCM_CAR (alist
));
1434 alist
= SCM_GCCDR (alist
);
1442 #ifdef GUILE_NEW_GC_SCHEME
1444 gc_sweep_freelist_start (scm_freelist_t
*freelist
)
1446 freelist
->cells
= SCM_EOL
;
1447 freelist
->left_to_collect
= freelist
->cluster_size
;
1448 freelist
->clusters
= SCM_EOL
;
1449 freelist
->clustertail
= &freelist
->clusters
;
1450 freelist
->collected
= 0;
1454 gc_sweep_freelist_finish (scm_freelist_t
*freelist
)
1456 *freelist
->clustertail
= freelist
->cells
;
1457 if (SCM_NNULLP (freelist
->cells
))
1459 SCM c
= freelist
->cells
;
1460 SCM_SETCAR (c
, SCM_CDR (c
));
1461 SCM_SETCDR (c
, SCM_EOL
);
1462 freelist
->collected
+=
1463 freelist
->span
* (freelist
->cluster_size
- freelist
->left_to_collect
);
1466 freelist
->grow_heap_p
= (freelist
->collected
< freelist
->gc_trigger
);
1473 register SCM_CELLPTR ptr
;
1474 #ifdef SCM_POINTERS_MUNGED
1475 register SCM scmptr
;
1478 #define scmptr (SCM)ptr
1480 register SCM nfreelist
;
1481 register scm_freelist_t
*freelist
;
1489 #ifdef GUILE_NEW_GC_SCHEME
1490 gc_sweep_freelist_start (&scm_master_freelist
);
1491 gc_sweep_freelist_start (&scm_master_freelist2
);
1493 /* Reset all free list pointers. We'll reconstruct them completely
1495 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1496 scm_heap_table
[i
].freelist
->cells
= SCM_EOL
;
1499 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1501 #ifdef GUILE_NEW_GC_SCHEME
1502 register unsigned int left_to_collect
;
1504 register scm_sizet n
= 0;
1506 register scm_sizet j
;
1508 /* Unmarked cells go onto the front of the freelist this heap
1509 segment points to. Rather than updating the real freelist
1510 pointer as we go along, we accumulate the new head in
1511 nfreelist. Then, if it turns out that the entire segment is
1512 free, we free (i.e., malloc's free) the whole segment, and
1513 simply don't assign nfreelist back into the real freelist. */
1514 freelist
= scm_heap_table
[i
].freelist
;
1515 nfreelist
= freelist
->cells
;
1516 #ifdef GUILE_NEW_GC_SCHEME
1517 left_to_collect
= freelist
->left_to_collect
;
1519 span
= scm_heap_table
[i
].span
;
1521 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0], span
);
1522 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1], span
) - ptr
;
1523 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1525 #ifdef SCM_POINTERS_MUNGED
1526 scmptr
= PTR2SCM (ptr
);
1528 switch SCM_TYP7 (scmptr
)
1530 case scm_tcs_cons_gloc
:
1531 if (SCM_GCMARKP (scmptr
))
1533 if (SCM_CDR (SCM_CAR (scmptr
) - 1) == (SCM
)1)
1534 SCM_SETCDR (SCM_CAR (scmptr
) - 1, (SCM
) 0);
1539 vcell
= SCM_CAR (scmptr
) - 1L;
1541 if ((SCM_CDR (vcell
) == 0) || (SCM_UNPACK (SCM_CDR (vcell
)) == 1))
1543 scm_struct_free_t free
1544 = (scm_struct_free_t
) ((SCM
*) vcell
)[scm_struct_i_free
];
1545 m
+= free ((SCM
*) vcell
, (SCM
*) SCM_GCCDR (scmptr
));
1549 case scm_tcs_cons_imcar
:
1550 case scm_tcs_cons_nimcar
:
1551 case scm_tcs_closures
:
1553 if (SCM_GCMARKP (scmptr
))
1557 if (SCM_GC8MARKP (scmptr
))
1563 m
+= (2 + SCM_LENGTH (scmptr
)) * sizeof (SCM
);
1564 scm_must_free ((char *)(SCM_VELTS (scmptr
) - 2));
1568 case scm_tc7_vector
:
1569 case scm_tc7_lvector
:
1573 if (SCM_GC8MARKP (scmptr
))
1576 m
+= (SCM_LENGTH (scmptr
) * sizeof (SCM
));
1578 scm_must_free (SCM_CHARS (scmptr
));
1579 /* SCM_SETCHARS(scmptr, 0);*/
1583 if SCM_GC8MARKP (scmptr
)
1585 m
+= sizeof (long) * ((SCM_HUGE_LENGTH (scmptr
) + SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1587 case scm_tc7_byvect
:
1588 if SCM_GC8MARKP (scmptr
)
1590 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (char);
1594 if SCM_GC8MARKP (scmptr
)
1596 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long);
1599 if SCM_GC8MARKP (scmptr
)
1601 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (short);
1603 #ifdef HAVE_LONG_LONGS
1604 case scm_tc7_llvect
:
1605 if SCM_GC8MARKP (scmptr
)
1607 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long_long
);
1611 if SCM_GC8MARKP (scmptr
)
1613 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (float);
1616 if SCM_GC8MARKP (scmptr
)
1618 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (double);
1621 if SCM_GC8MARKP (scmptr
)
1623 m
+= SCM_HUGE_LENGTH (scmptr
) * 2 * sizeof (double);
1626 case scm_tc7_substring
:
1627 if (SCM_GC8MARKP (scmptr
))
1630 case scm_tc7_string
:
1631 if (SCM_GC8MARKP (scmptr
))
1633 m
+= SCM_HUGE_LENGTH (scmptr
) + 1;
1635 case scm_tc7_msymbol
:
1636 if (SCM_GC8MARKP (scmptr
))
1638 m
+= ( SCM_LENGTH (scmptr
)
1640 + sizeof (SCM
) * ((SCM
*)SCM_CHARS (scmptr
) - SCM_SLOTS(scmptr
)));
1641 scm_must_free ((char *)SCM_SLOTS (scmptr
));
1643 case scm_tc7_contin
:
1644 if SCM_GC8MARKP (scmptr
)
1646 m
+= SCM_LENGTH (scmptr
) * sizeof (SCM_STACKITEM
) + sizeof (scm_contregs
);
1647 if (SCM_VELTS (scmptr
))
1649 case scm_tc7_ssymbol
:
1650 if SCM_GC8MARKP(scmptr
)
1656 if SCM_GC8MARKP (scmptr
)
1658 if SCM_OPENP (scmptr
)
1660 int k
= SCM_PTOBNUM (scmptr
);
1661 if (!(k
< scm_numptob
))
1663 /* Keep "revealed" ports alive. */
1664 if (scm_revealed_count (scmptr
) > 0)
1666 /* Yes, I really do mean scm_ptobs[k].free */
1667 /* rather than ftobs[k].close. .close */
1668 /* is for explicit CLOSE-PORT by user */
1669 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1670 SCM_SETSTREAM (scmptr
, 0);
1671 scm_remove_from_port_table (scmptr
);
1672 scm_gc_ports_collected
++;
1673 SCM_SETAND_CAR (scmptr
, ~SCM_OPN
);
1677 switch SCM_GCTYP16 (scmptr
)
1679 case scm_tc_free_cell
:
1681 if SCM_GC8MARKP (scmptr
)
1686 if SCM_GC8MARKP (scmptr
)
1688 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1690 #endif /* def SCM_BIGDIG */
1691 case scm_tc16_complex
:
1692 if SCM_GC8MARKP (scmptr
)
1694 m
+= 2 * sizeof (double);
1697 if SCM_GC8MARKP (scmptr
)
1702 k
= SCM_SMOBNUM (scmptr
);
1703 if (!(k
< scm_numsmob
))
1705 m
+= (scm_smobs
[k
].free
) ((SCM
) scmptr
);
1711 sweeperr
:scm_wta (scmptr
, "unknown type in ", "gc_sweep");
1714 if (SCM_CAR (scmptr
) == (SCM
) scm_tc_free_cell
)
1717 #ifndef GUILE_NEW_GC_SCHEME
1720 if (!--left_to_collect
)
1722 SCM_SETCAR (scmptr
, nfreelist
);
1723 *freelist
->clustertail
= scmptr
;
1724 freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1726 nfreelist
= SCM_EOL
;
1727 freelist
->collected
+= span
* freelist
->cluster_size
;
1728 left_to_collect
= freelist
->cluster_size
;
1733 /* Stick the new cell on the front of nfreelist. It's
1734 critical that we mark this cell as freed; otherwise, the
1735 conservative collector might trace it as some other type
1737 SCM_SETCAR (scmptr
, scm_tc_free_cell
);
1738 SCM_SETCDR (scmptr
, nfreelist
);
1744 SCM_CLRGC8MARK (scmptr
);
1747 SCM_CLRGCMARK (scmptr
);
1749 #ifdef GC_FREE_SEGMENTS
1754 freelist
->heap_size
-= seg_size
;
1755 free ((char *) scm_heap_table
[i
].bounds
[0]);
1756 scm_heap_table
[i
].bounds
[0] = 0;
1757 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1758 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1759 scm_n_heap_segs
-= 1;
1760 i
--; /* We need to scan the segment just moved. */
1763 #endif /* ifdef GC_FREE_SEGMENTS */
1765 /* Update the real freelist pointer to point to the head of
1766 the list of free cells we've built for this segment. */
1767 freelist
->cells
= nfreelist
;
1768 #ifdef GUILE_NEW_GC_SCHEME
1769 freelist
->left_to_collect
= left_to_collect
;
1773 #ifndef GUILE_NEW_GC_SCHEME
1774 freelist
->collected
+= n
;
1775 scm_cells_allocated
+= freelist
->heap_size
- freelist
->collected
;
1778 #ifdef GUILE_DEBUG_FREELIST
1779 #ifdef GUILE_NEW_GC_SCHEME
1780 scm_check_freelist (freelist
== &scm_master_freelist
1784 scm_check_freelist (freelist
);
1786 scm_map_free_list ();
1790 #ifdef GUILE_NEW_GC_SCHEME
1791 gc_sweep_freelist_finish (&scm_master_freelist
);
1792 gc_sweep_freelist_finish (&scm_master_freelist2
);
1794 /* When we move to POSIX threads private freelists should probably
1795 be GC-protected instead. */
1796 scm_freelist
= SCM_EOL
;
1797 scm_freelist2
= SCM_EOL
;
1800 /* Scan weak vectors. */
1803 for (w
= scm_weak_vectors
; w
!= SCM_EOL
; w
= SCM_WVECT_GC_CHAIN (w
))
1805 if (!SCM_IS_WHVEC_ANY (w
))
1809 ptr
= SCM_VELTS (w
);
1811 for (j
= 0; j
< n
; ++j
)
1812 if (SCM_FREEP (ptr
[j
]))
1813 ptr
[j
] = SCM_BOOL_F
;
1815 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1818 register long n
= SCM_LENGTH (w
);
1821 ptr
= SCM_VELTS (w
);
1823 for (j
= 0; j
< n
; ++j
)
1830 weak_keys
= SCM_IS_WHVEC (obj
) || SCM_IS_WHVEC_B (obj
);
1831 weak_values
= SCM_IS_WHVEC_V (obj
) || SCM_IS_WHVEC_B (obj
);
1836 while ( SCM_CONSP (alist
)
1837 && SCM_CONSP (SCM_CAR (alist
)))
1842 key
= SCM_CAAR (alist
);
1843 value
= SCM_CDAR (alist
);
1844 if ( (weak_keys
&& SCM_FREEP (key
))
1845 || (weak_values
&& SCM_FREEP (value
)))
1847 *fixup
= SCM_CDR (alist
);
1850 fixup
= SCM_CDRLOC (alist
);
1851 alist
= SCM_CDR (alist
);
1857 scm_mallocated
-= m
;
1858 scm_gc_malloc_collected
= m
;
1864 /* {Front end to malloc}
1866 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1868 * These functions provide services comperable to malloc, realloc, and
1869 * free. They are for allocating malloced parts of scheme objects.
1870 * The primary purpose of the front end is to impose calls to gc.
1874 * Return newly malloced storage or throw an error.
1876 * The parameter WHAT is a string for error reporting.
1877 * If the threshold scm_mtrigger will be passed by this
1878 * allocation, or if the first call to malloc fails,
1879 * garbage collect -- on the presumption that some objects
1880 * using malloced storage may be collected.
1882 * The limit scm_mtrigger may be raised by this allocation.
1885 scm_must_malloc (scm_sizet size
, const char *what
)
1888 unsigned long nm
= scm_mallocated
+ size
;
1890 if (nm
<= scm_mtrigger
)
1892 SCM_SYSCALL (ptr
= malloc (size
));
1895 scm_mallocated
= nm
;
1902 nm
= scm_mallocated
+ size
;
1903 SCM_SYSCALL (ptr
= malloc (size
));
1906 scm_mallocated
= nm
;
1907 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1908 if (nm
> scm_mtrigger
)
1909 scm_mtrigger
= nm
+ nm
/ 2;
1911 scm_mtrigger
+= scm_mtrigger
/ 2;
1916 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1917 return 0; /* never reached */
1922 * is similar to scm_must_malloc.
1925 scm_must_realloc (void *where
,
1931 scm_sizet nm
= scm_mallocated
+ size
- old_size
;
1933 if (nm
<= scm_mtrigger
)
1935 SCM_SYSCALL (ptr
= realloc (where
, size
));
1938 scm_mallocated
= nm
;
1945 nm
= scm_mallocated
+ size
- old_size
;
1946 SCM_SYSCALL (ptr
= realloc (where
, size
));
1949 scm_mallocated
= nm
;
1950 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1951 if (nm
> scm_mtrigger
)
1952 scm_mtrigger
= nm
+ nm
/ 2;
1954 scm_mtrigger
+= scm_mtrigger
/ 2;
1959 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1960 return 0; /* never reached */
1964 scm_must_free (void *obj
)
1969 scm_wta (SCM_INUM0
, "already free", "");
1972 /* Announce that there has been some malloc done that will be freed
1973 * during gc. A typical use is for a smob that uses some malloced
1974 * memory but can not get it from scm_must_malloc (for whatever
1975 * reason). When a new object of this smob is created you call
1976 * scm_done_malloc with the size of the object. When your smob free
1977 * function is called, be sure to include this size in the return
1981 scm_done_malloc (long size
)
1983 scm_mallocated
+= size
;
1985 if (scm_mallocated
> scm_mtrigger
)
1987 scm_igc ("foreign mallocs");
1988 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
1990 if (scm_mallocated
> scm_mtrigger
)
1991 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
1993 scm_mtrigger
+= scm_mtrigger
/ 2;
1999 #ifdef GUILE_NEW_GC_SCHEME
2001 adjust_gc_trigger (scm_freelist_t
*freelist
)
2003 /* Adjust GC trigger based on total heap size */
2004 if (freelist
->gc_trigger_fraction
)
2005 freelist
->gc_trigger
= ((scm_master_freelist
.heap_size
2006 + scm_master_freelist2
.heap_size
)
2007 * freelist
->gc_trigger_fraction
2017 * Each heap segment is an array of objects of a particular size.
2018 * Every segment has an associated (possibly shared) freelist.
2019 * A table of segment records is kept that records the upper and
2020 * lower extents of the segment; this is used during the conservative
2021 * phase of gc to identify probably gc roots (because they point
2022 * into valid segments at reasonable offsets). */
2025 * is true if the first segment was smaller than INIT_HEAP_SEG.
2026 * If scm_expmem is set to one, subsequent segment allocations will
2027 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2031 scm_sizet scm_max_segment_size
;
2034 * is the lowest base address of any heap segment.
2036 SCM_CELLPTR scm_heap_org
;
2038 scm_heap_seg_data_t
* scm_heap_table
= 0;
2039 int scm_n_heap_segs
= 0;
2042 * initializes a new heap segment and return the number of objects it contains.
2044 * The segment origin, segment size in bytes, and the span of objects
2045 * in cells are input parameters. The freelist is both input and output.
2047 * This function presume that the scm_heap_table has already been expanded
2048 * to accomodate a new segment record.
2053 init_heap_seg (SCM_CELLPTR seg_org
, scm_sizet size
, scm_freelist_t
*freelist
)
2055 register SCM_CELLPTR ptr
;
2056 #ifdef SCM_POINTERS_MUNGED
2057 register SCM scmptr
;
2062 SCM_CELLPTR seg_end
;
2065 int span
= freelist
->span
;
2067 if (seg_org
== NULL
)
2070 ptr
= CELL_UP (seg_org
, span
);
2072 /* Compute the ceiling on valid object pointers w/in this segment.
2074 seg_end
= CELL_DN ((char *) seg_org
+ size
, span
);
2076 /* Find the right place and insert the segment record.
2079 for (new_seg_index
= 0;
2080 ( (new_seg_index
< scm_n_heap_segs
)
2081 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
));
2087 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
2088 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
2093 scm_heap_table
[new_seg_index
].valid
= 0;
2094 scm_heap_table
[new_seg_index
].span
= span
;
2095 scm_heap_table
[new_seg_index
].freelist
= freelist
;
2096 scm_heap_table
[new_seg_index
].bounds
[0] = (SCM_CELLPTR
)ptr
;
2097 scm_heap_table
[new_seg_index
].bounds
[1] = (SCM_CELLPTR
)seg_end
;
2100 /* Compute the least valid object pointer w/in this segment
2102 ptr
= CELL_UP (ptr
, span
);
2106 n_new_cells
= seg_end
- ptr
;
2108 #ifdef GUILE_NEW_GC_SCHEME
2110 freelist
->heap_size
+= n_new_cells
;
2112 /* Partition objects in this segment into clusters */
2115 SCM
*clusterp
= &clusters
;
2116 int n_cluster_cells
= span
* freelist
->cluster_size
;
2118 while (n_new_cells
> span
) /* at least one spine + one freecell */
2120 /* Determine end of cluster
2122 if (n_new_cells
>= n_cluster_cells
)
2124 seg_end
= ptr
+ n_cluster_cells
;
2125 n_new_cells
-= n_cluster_cells
;
2128 /* [cmm] looks like the segment size doesn't divide cleanly by
2129 cluster size. bad cmm! */
2132 /* Allocate cluster spine
2134 *clusterp
= PTR2SCM (ptr
);
2135 SCM_SETCAR (*clusterp
, PTR2SCM (ptr
+ span
));
2136 clusterp
= SCM_CDRLOC (*clusterp
);
2139 while (ptr
< seg_end
)
2141 #ifdef SCM_POINTERS_MUNGED
2142 scmptr
= PTR2SCM (ptr
);
2144 SCM_SETCAR (scmptr
, scm_tc_free_cell
);
2145 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ span
));
2149 SCM_SETCDR (PTR2SCM (ptr
- span
), SCM_EOL
);
2152 /* Patch up the last cluster pointer in the segment
2153 * to join it to the input freelist.
2155 *clusterp
= freelist
->clusters
;
2156 freelist
->clusters
= clusters
;
2159 adjust_gc_trigger (&scm_master_freelist
);
2160 adjust_gc_trigger (&scm_master_freelist2
);
2162 #else /* GUILE_NEW_GC_SCHEME */
2164 /* Prepend objects in this segment to the freelist.
2166 while (ptr
< seg_end
)
2168 #ifdef SCM_POINTERS_MUNGED
2169 scmptr
= PTR2SCM (ptr
);
2171 SCM_SETCAR (scmptr
, (SCM
) scm_tc_free_cell
);
2172 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ span
));
2178 /* Patch up the last freelist pointer in the segment
2179 * to join it to the input freelist.
2181 SCM_SETCDR (PTR2SCM (ptr
), freelist
->cells
);
2182 freelist
->cells
= PTR2SCM (CELL_UP (seg_org
, span
));
2184 freelist
->heap_size
+= n_new_cells
;
2186 #endif /* GUILE_NEW_GC_SCHEME */
2189 fprintf (stderr
, "H");
2197 #ifndef GUILE_NEW_GC_SCHEME
2198 #define round_to_cluster_size(freelist, len) len
2202 round_to_cluster_size (scm_freelist_t
*freelist
, scm_sizet len
)
2204 scm_sizet cluster_size_in_bytes
= CLUSTER_SIZE_IN_BYTES (freelist
);
2207 (len
+ cluster_size_in_bytes
- 1) / cluster_size_in_bytes
* cluster_size_in_bytes
2208 + ALIGNMENT_SLACK (freelist
);
2214 alloc_some_heap (scm_freelist_t
*freelist
)
2216 scm_heap_seg_data_t
* tmptable
;
2220 /* Critical code sections (such as the garbage collector)
2221 * aren't supposed to add heap segments.
2223 if (scm_gc_heap_lock
)
2224 scm_wta (SCM_UNDEFINED
, "need larger initial", "heap");
2226 /* Expand the heap tables to have room for the new segment.
2227 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
2228 * only if the allocation of the segment itself succeeds.
2230 len
= (1 + scm_n_heap_segs
) * sizeof (scm_heap_seg_data_t
);
2232 SCM_SYSCALL (tmptable
= ((scm_heap_seg_data_t
*)
2233 realloc ((char *)scm_heap_table
, len
)));
2235 scm_wta (SCM_UNDEFINED
, "could not grow", "hplims");
2237 scm_heap_table
= tmptable
;
2240 /* Pick a size for the new heap segment.
2241 * The rule for picking the size of a segment is explained in
2244 #ifdef GUILE_NEW_GC_SCHEME
2246 /* Assure that the new segment is large enough for the new trigger */
2247 int slack
= freelist
->gc_trigger
- freelist
->collected
;
2248 int min_cells
= 100 * slack
/ (99 - freelist
->gc_trigger_fraction
);
2249 len
= SCM_EXPHEAP (freelist
->heap_size
);
2251 fprintf (stderr
, "(%d < %d)", len
, min_cells
);
2253 if (len
< min_cells
)
2254 len
= min_cells
+ 1;
2255 len
*= sizeof (scm_cell
);
2258 if (len
> scm_max_segment_size
)
2259 len
= scm_max_segment_size
;
2263 len
= (scm_sizet
) SCM_EXPHEAP (freelist
->heap_size
* sizeof (scm_cell
));
2264 if ((scm_sizet
) SCM_EXPHEAP (freelist
->heap_size
* sizeof (scm_cell
))
2269 len
= SCM_HEAP_SEG_SIZE
;
2270 #endif /* GUILE_NEW_GC_SCHEME */
2275 #ifndef GUILE_NEW_GC_SCHEME
2276 smallest
= (freelist
->span
* sizeof (scm_cell
));
2278 smallest
= CLUSTER_SIZE_IN_BYTES (freelist
);
2284 /* Allocate with decaying ambition. */
2285 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2286 && (len
>= smallest
))
2288 scm_sizet rounded_len
= round_to_cluster_size(freelist
, len
);
2289 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (rounded_len
));
2292 init_heap_seg (ptr
, rounded_len
, freelist
);
2299 scm_wta (SCM_UNDEFINED
, "could not grow", "heap");
2304 SCM_DEFINE (scm_unhash_name
, "unhash-name", 1, 0, 0,
2307 #define FUNC_NAME s_scm_unhash_name
2311 SCM_VALIDATE_SYMBOL (1,name
);
2313 bound
= scm_n_heap_segs
;
2314 for (x
= 0; x
< bound
; ++x
)
2318 p
= (SCM_CELLPTR
)scm_heap_table
[x
].bounds
[0];
2319 pbound
= (SCM_CELLPTR
)scm_heap_table
[x
].bounds
[1];
2324 if (1 == (7 & (int)incar
))
2327 if ( ((name
== SCM_BOOL_T
) || (SCM_CAR (incar
) == name
))
2328 && (SCM_CDR (incar
) != 0)
2329 && (SCM_UNPACK (SCM_CDR (incar
)) != 1))
2344 /* {GC Protection Helper Functions}
2349 scm_remember (SCM
*ptr
)
2354 These crazy functions prevent garbage collection
2355 of arguments after the first argument by
2356 ensuring they remain live throughout the
2357 function because they are used in the last
2358 line of the code block.
2359 It'd be better to have a nice compiler hint to
2360 aid the conservative stack-scanning GC. --03/09/00 gjb */
2362 scm_return_first (SCM elt
, ...)
2368 scm_return_first_int (int i
, ...)
2375 scm_permanent_object (SCM obj
)
2378 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2384 /* Protect OBJ from the garbage collector. OBJ will not be freed,
2385 even if all other references are dropped, until someone applies
2386 scm_unprotect_object to it. This function returns OBJ.
2388 Calls to scm_protect_object nest. For every object OBJ, there is a
2389 counter which scm_protect_object(OBJ) increments and
2390 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
2391 an object's counter is greater than zero, the garbage collector
2394 Of course, that's not how it's implemented. scm_protect_object and
2395 scm_unprotect_object just maintain a list of references to things.
2396 Since the GC knows about this list, all objects it mentions stay
2397 alive. scm_protect_object adds its argument to the list;
2398 scm_unprotect_object removes the first occurrence of its argument
2401 scm_protect_object (SCM obj
)
2403 scm_protects
= scm_cons (obj
, scm_protects
);
2409 /* Remove any protection for OBJ established by a prior call to
2410 scm_protect_object. This function returns OBJ.
2412 See scm_protect_object for more information. */
2414 scm_unprotect_object (SCM obj
)
2416 SCM
*tail_ptr
= &scm_protects
;
2418 while (SCM_CONSP (*tail_ptr
))
2419 if (SCM_CAR (*tail_ptr
) == obj
)
2421 *tail_ptr
= SCM_CDR (*tail_ptr
);
2425 tail_ptr
= SCM_CDRLOC (*tail_ptr
);
2432 /* called on process termination. */
2438 extern int on_exit (void (*procp
) (), int arg
);
2441 cleanup (int status
, void *arg
)
2443 #error Dont know how to setup a cleanup handler on your system.
2448 scm_flush_all_ports ();
2453 make_initial_segment (scm_sizet init_heap_size
, scm_freelist_t
*freelist
)
2455 scm_sizet rounded_size
= round_to_cluster_size (freelist
, init_heap_size
);
2456 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2460 rounded_size
= round_to_cluster_size (freelist
, SCM_HEAP_SEG_SIZE
);
2461 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2469 freelist
->grow_heap_p
= (freelist
->heap_size
< freelist
->gc_trigger
);
2475 #ifdef GUILE_NEW_GC_SCHEME
2477 init_freelist (scm_freelist_t
*freelist
,
2482 freelist
->clusters
= SCM_EOL
;
2483 freelist
->cluster_size
= cluster_size
+ 1;
2485 freelist
->gc_trigger_fraction
= - gc_trigger
;
2488 freelist
->gc_trigger
= gc_trigger
;
2489 freelist
->gc_trigger_fraction
= 0;
2491 freelist
->span
= span
;
2492 freelist
->collected
= 0;
2493 freelist
->heap_size
= 0;
2497 scm_init_storage (scm_sizet init_heap_size_1
, int gc_trigger_1
,
2498 scm_sizet init_heap_size_2
, int gc_trigger_2
,
2499 scm_sizet max_segment_size
)
2502 scm_init_storage (scm_sizet init_heap_size
, scm_sizet init_heap2_size
)
2507 if (!init_heap_size_1
)
2508 init_heap_size_1
= SCM_INIT_HEAP_SIZE_1
;
2509 if (!init_heap_size_2
)
2510 init_heap_size_2
= SCM_INIT_HEAP_SIZE_2
;
2512 j
= SCM_NUM_PROTECTS
;
2514 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2517 #ifdef GUILE_NEW_GC_SCHEME
2518 scm_freelist
= SCM_EOL
;
2519 scm_freelist2
= SCM_EOL
;
2520 init_freelist (&scm_master_freelist
,
2521 1, SCM_CLUSTER_SIZE_1
,
2522 gc_trigger_1
? gc_trigger_1
: SCM_GC_TRIGGER_1
);
2523 init_freelist (&scm_master_freelist2
,
2524 2, SCM_CLUSTER_SIZE_2
,
2525 gc_trigger_2
? gc_trigger_2
: SCM_GC_TRIGGER_2
);
2526 scm_max_segment_size
2527 = max_segment_size
? max_segment_size
: SCM_MAX_SEGMENT_SIZE
;
2529 scm_freelist
.cells
= SCM_EOL
;
2530 scm_freelist
.span
= 1;
2531 scm_freelist
.collected
= 0;
2532 scm_freelist
.heap_size
= 0;
2534 scm_freelist2
.cells
= SCM_EOL
;
2535 scm_freelist2
.span
= 2;
2536 scm_freelist2
.collected
= 0;
2537 scm_freelist2
.heap_size
= 0;
2542 j
= SCM_HEAP_SEG_SIZE
;
2543 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2544 scm_heap_table
= ((scm_heap_seg_data_t
*)
2545 scm_must_malloc (sizeof (scm_heap_seg_data_t
) * 2, "hplims"));
2547 #ifdef GUILE_NEW_GC_SCHEME
2548 if (make_initial_segment (init_heap_size_1
, &scm_master_freelist
) ||
2549 make_initial_segment (init_heap_size_2
, &scm_master_freelist2
))
2552 if (make_initial_segment (init_heap_size_1
, &scm_freelist
) ||
2553 make_initial_segment (init_heap_size_2
, &scm_freelist2
))
2557 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0], 1);
2559 /* scm_hplims[0] can change. do not remove scm_heap_org */
2560 scm_weak_vectors
= SCM_EOL
;
2562 /* Initialise the list of ports. */
2563 scm_port_table
= (scm_port
**)
2564 malloc (sizeof (scm_port
*) * scm_port_table_room
);
2565 if (!scm_port_table
)
2572 on_exit (cleanup
, 0);
2576 scm_undefineds
= scm_cons (SCM_UNDEFINED
, SCM_EOL
);
2577 SCM_SETCDR (scm_undefineds
, scm_undefineds
);
2579 scm_listofnull
= scm_cons (SCM_EOL
, SCM_EOL
);
2580 scm_nullstr
= scm_makstr (0L, 0);
2581 scm_nullvect
= scm_make_vector (SCM_INUM0
, SCM_UNDEFINED
);
2582 scm_symhash
= scm_make_vector ((SCM
) SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2583 scm_weak_symhash
= scm_make_weak_key_hash_table ((SCM
) SCM_MAKINUM (scm_symhash_dim
));
2584 scm_symhash_vars
= scm_make_vector ((SCM
) SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2585 scm_stand_in_procs
= SCM_EOL
;
2586 scm_permobjs
= SCM_EOL
;
2587 scm_protects
= SCM_EOL
;
2588 scm_asyncs
= SCM_EOL
;
2589 scm_sysintern ("most-positive-fixnum", (SCM
) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM
));
2590 scm_sysintern ("most-negative-fixnum", (SCM
) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM
));
2592 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD
));