1 /* Copyright (C) 1995,1996 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17 * As a special exception, the Free Software Foundation gives permission
18 * for additional uses of the text contained in its release of GUILE.
20 * The exception is that, if you link the GUILE library with other files
21 * to produce an executable, this does not by itself cause the
22 * resulting executable to be covered by the GNU General Public License.
23 * Your use of that executable is in no way restricted on account of
24 * linking the GUILE library code into it.
26 * This exception does not however invalidate any other reasons why
27 * the executable file might be covered by the GNU General Public License.
29 * This exception applies only to the code released by the
30 * Free Software Foundation under the name GUILE. If you copy
31 * code from other Free Software Foundation releases into a copy of
32 * GUILE, as the General Public License permits, the exception does
33 * not apply to the code that you add in this way. To avoid misleading
34 * anyone as to the status of such modified files, you must delete
35 * this exception notice from them.
37 * If you write modifications of your own for GUILE, it is your choice
38 * whether to permit this exception to apply to your modifications.
39 * If you do not wish that, delete this exception notice.
54 /* {heap tuning parameters}
56 * These are parameters for controlling memory allocation. The heap
57 * is the area out of which scm_cons, and object headers are allocated.
59 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
60 * 64 bit machine. The units of the _SIZE parameters are bytes.
61 * Cons pairs and object headers occupy one heap cell.
63 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
64 * allocated initially the heap will grow by half its current size
65 * each subsequent time more heap is needed.
67 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
68 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
69 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
70 * is in scm_init_storage() and alloc_some_heap() in sys.c
72 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
73 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
75 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
78 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
82 #define SCM_INIT_HEAP_SIZE (32768L*sizeof(scm_cell))
83 #define SCM_MIN_HEAP_SEG_SIZE (2048L*sizeof(scm_cell))
85 # define SCM_HEAP_SEG_SIZE 32768L
88 # define SCM_HEAP_SEG_SIZE (7000L*sizeof(scm_cell))
90 # define SCM_HEAP_SEG_SIZE (16384L*sizeof(scm_cell))
93 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size*2)
94 #define SCM_INIT_MALLOC_LIMIT 100000
96 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
97 bounds for allocated storage */
100 /*in 386 protected mode we must only adjust the offset */
101 # define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
102 # define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
105 # define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
106 # define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
108 # define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
109 # define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
116 * is the head of freelist of cons pairs.
118 SCM scm_freelist
= SCM_EOL
;
121 * is the number of bytes of must_malloc allocation needed to trigger gc.
127 * If set, don't expand the heap. Set only during gc, during which no allocation
128 * is supposed to take place anyway.
130 int scm_gc_heap_lock
= 0;
133 * Don't pause for collection if this is set -- just
137 int scm_block_gc
= 1;
139 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
140 * collection (GC) more space is allocated for the heap.
142 #define MIN_GC_YIELD (scm_heap_size/4)
144 /* During collection, this accumulates objects holding
147 SCM
*scm_weak_vectors
;
151 /* GC Statistics Keeping
153 unsigned long scm_cells_allocated
= 0;
154 unsigned long scm_mallocated
= 0;
155 unsigned long scm_gc_cells_collected
;
156 unsigned long scm_gc_malloc_collected
;
157 unsigned long scm_gc_ports_collected
;
158 unsigned long scm_gc_rt
;
159 unsigned long scm_gc_time_taken
= 0;
161 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
162 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
163 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
164 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
165 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
166 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
169 struct scm_heap_seg_data
171 SCM_CELLPTR bounds
[2]; /* lower and upper */
172 SCM
*freelistp
; /* the value of this may be shared */
173 int ncells
; /* per object in this segment */
180 static void scm_mark_weak_vector_spines
PROTO ((void));
181 static scm_sizet init_heap_seg
PROTO ((SCM_CELLPTR
, scm_sizet
, int, SCM
*));
182 static void alloc_some_heap
PROTO ((int, SCM
*));
187 /* {Scheme Interface to GC}
190 SCM_PROC (s_gc_stats
, "gc-stats", 0, 0, 0, scm_gc_stats
);
197 SCM local_scm_mtrigger
;
198 SCM local_scm_mallocated
;
199 SCM local_scm_heap_size
;
200 SCM local_scm_cells_allocated
;
201 SCM local_scm_gc_time_taken
;
209 for (i
= scm_n_heap_segs
; i
--; )
210 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
211 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
213 if (scm_n_heap_segs
!= n
)
217 local_scm_mtrigger
= scm_mtrigger
;
218 local_scm_mallocated
= scm_mallocated
;
219 local_scm_heap_size
= scm_heap_size
;
220 local_scm_cells_allocated
= scm_cells_allocated
;
221 local_scm_gc_time_taken
= scm_gc_time_taken
;
223 answer
= scm_listify (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
224 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
225 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
226 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
227 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
228 scm_cons (sym_heap_segments
, heap_segs
),
239 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ());
240 scm_gc_cells_collected
= 0;
241 scm_gc_malloc_collected
= 0;
242 scm_gc_ports_collected
= 0;
248 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt
;
249 scm_gc_time_taken
= scm_gc_time_taken
+ scm_gc_rt
;
250 scm_take_signal (SCM_GC_SIGNAL
);
254 SCM_PROC(s_object_address
, "object-address", 1, 0, 0, scm_object_addr
);
256 scm_object_addr (obj
)
259 return scm_ulong2num ((unsigned long)obj
);
263 SCM_PROC(s_gc
, "gc", 0, 0, 0, scm_gc
);
270 return SCM_UNSPECIFIED
;
275 /* {C Interface For When GC is Triggered}
279 scm_gc_for_alloc (ncells
, freelistp
)
285 if ((scm_gc_cells_collected
< MIN_GC_YIELD
) || SCM_IMP (*freelistp
))
287 alloc_some_heap (ncells
, freelistp
);
294 scm_gc_for_newcell ()
297 scm_gc_for_alloc (1, &scm_freelist
);
299 scm_freelist
= SCM_CDR (fl
);
310 if (!scm_stack_base
|| scm_block_gc
)
319 /* unprotect any struct types with no instances */
325 pos
= &scm_type_obj_list
;
326 type_list
= scm_type_obj_list
;
327 while (type_list
!= SCM_EOL
)
328 if (SCM_VELTS (SCM_CAR (type_list
))[scm_struct_i_refcnt
])
330 pos
= &SCM_CDR (type_list
);
331 type_list
= SCM_CDR (type_list
);
335 *pos
= SCM_CDR (type_list
);
336 type_list
= SCM_CDR (type_list
);
341 /* flush dead entries from the continuation stack */
346 elts
= SCM_VELTS (scm_continuation_stack
);
347 bound
= SCM_LENGTH (scm_continuation_stack
);
348 x
= SCM_INUM (scm_continuation_stack_ptr
);
351 elts
[x
] = SCM_BOOL_F
;
356 /* Protect from the C stack. This must be the first marking
357 * done because it provides information about what objects
358 * are "in-use" by the C code. "in-use" objects are those
359 * for which the values from SCM_LENGTH and SCM_CHARS must remain
360 * usable. This requirement is stricter than a liveness
361 * requirement -- in particular, it constrains the implementation
362 * of scm_vector_set_length_x.
364 SCM_FLUSH_REGISTER_WINDOWS
;
365 /* This assumes that all registers are saved into the jmp_buf */
366 setjmp (scm_save_regs_gc_mark
);
367 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
368 ( (scm_sizet
) sizeof scm_save_regs_gc_mark
369 / sizeof (SCM_STACKITEM
)));
372 /* stack_len is long rather than scm_sizet in order to guarantee that
373 &stack_len is long aligned */
374 #ifdef SCM_STACK_GROWS_UP
376 long stack_len
= (SCM_STACKITEM
*) (&stack_len
) - scm_stack_base
;
378 long stack_len
= scm_stack_size (scm_stack_base
);
380 scm_mark_locations (scm_stack_base
, (scm_sizet
) stack_len
);
383 long stack_len
= scm_stack_base
- (SCM_STACKITEM
*) (&stack_len
);
385 long stack_len
= scm_stack_size (scm_stack_base
);
387 scm_mark_locations ((scm_stack_base
- stack_len
), (scm_sizet
) stack_len
);
392 /* FIXME: insert a phase to un-protect string-data preserved
393 * in scm_vector_set_length_x.
396 j
= SCM_NUM_PROTECTS
;
398 scm_gc_mark (scm_sys_protects
[j
]);
400 scm_gc_mark (scm_rootcont
);
401 scm_gc_mark (scm_dynwinds
);
402 scm_gc_mark (scm_continuation_stack
);
403 scm_gc_mark (scm_continuation_stack_ptr
);
404 scm_gc_mark (scm_progargs
);
405 scm_gc_mark (scm_exitval
);
406 scm_gc_mark (scm_cur_inp
);
407 scm_gc_mark (scm_cur_outp
);
408 scm_gc_mark (scm_cur_errp
);
409 scm_gc_mark (scm_def_inp
);
410 scm_gc_mark (scm_def_outp
);
411 scm_gc_mark (scm_def_errp
);
412 scm_gc_mark (scm_top_level_lookup_thunk_var
);
413 scm_gc_mark (scm_system_transformer
);
415 scm_mark_weak_vector_spines ();
429 /* Mark an object precisely.
445 if (SCM_NCELLP (ptr
))
446 scm_wta (ptr
, "rogue pointer in ", "heap");
448 switch (SCM_TYP7 (ptr
))
450 case scm_tcs_cons_nimcar
:
451 if (SCM_GCMARKP (ptr
))
454 if (SCM_IMP (SCM_CDR (ptr
))) /* SCM_IMP works even with a GC mark */
459 scm_gc_mark (SCM_CAR (ptr
));
460 ptr
= SCM_GCCDR (ptr
);
462 case scm_tcs_cons_imcar
:
463 if (SCM_GCMARKP (ptr
))
466 ptr
= SCM_GCCDR (ptr
);
468 case scm_tcs_cons_gloc
:
469 if (SCM_GCMARKP (ptr
))
474 vcell
= SCM_CAR (ptr
) - 1L;
475 switch (SCM_CDR (vcell
))
479 ptr
= SCM_GCCDR (ptr
);
491 vtable_data
= (SCM
*)vcell
;
492 layout
= vtable_data
[scm_struct_i_layout
];
493 len
= SCM_LENGTH (layout
);
494 fields_desc
= SCM_CHARS (layout
);
495 mem
= (SCM
*)SCM_GCCDR (ptr
); /* like struct_data but removes mark */
497 for (x
= 0; x
< len
; x
+= 2)
498 if (fields_desc
[x
] == 'p')
499 scm_gc_mark (mem
[x
/ 2]);
500 if (!SCM_CDR (vcell
))
502 SCM_SETGCMARK (vcell
);
503 ptr
= vtable_data
[scm_struct_i_vtable
];
510 case scm_tcs_closures
:
511 if (SCM_GCMARKP (ptr
))
514 if (SCM_IMP (SCM_CDR (ptr
)))
516 ptr
= SCM_CLOSCAR (ptr
);
519 scm_gc_mark (SCM_CLOSCAR (ptr
));
520 ptr
= SCM_GCCDR (ptr
);
523 case scm_tc7_lvector
:
527 if (SCM_GC8MARKP (ptr
))
529 SCM_SETGC8MARK (ptr
);
530 i
= SCM_LENGTH (ptr
);
534 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
535 scm_gc_mark (SCM_VELTS (ptr
)[i
]);
536 ptr
= SCM_VELTS (ptr
)[0];
541 SCM_SETGC8MARK (ptr
);
542 scm_mark_locations (SCM_VELTS (ptr
),
543 (scm_sizet
) (SCM_LENGTH (ptr
) + sizeof (regs
) / sizeof (SCM_STACKITEM
)));
558 case scm_tc7_mb_string
:
559 SCM_SETGC8MARK (ptr
);
562 case scm_tc7_substring
:
563 case scm_tc7_mb_substring
:
564 if (SCM_GC8MARKP(ptr
))
566 SCM_SETGC8MARK (ptr
);
571 if (SCM_GC8MARKP(ptr
))
573 scm_weak_vectors
[scm_n_weak
++] = ptr
;
574 if (scm_n_weak
>= scm_weak_size
)
576 SCM_SYSCALL (scm_weak_vectors
=
577 (SCM
*) realloc ((char *) scm_weak_vectors
,
578 sizeof (SCM
*) * (scm_weak_size
*= 2)));
579 if (scm_weak_vectors
== NULL
)
581 scm_gen_puts (scm_regular_string
,
584 scm_gen_puts (scm_regular_string
,
585 "\nFATAL ERROR DURING CRITICAL SCM_CODE SECTION\n",
587 exit(SCM_EXIT_FAILURE
);
590 SCM_SETGC8MARK (ptr
);
591 if (SCM_IS_WHVEC_ANY (ptr
))
598 len
= SCM_LENGTH (ptr
);
599 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
600 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
602 for (x
= 0; x
< len
; ++x
)
605 alist
= SCM_VELTS (ptr
)[x
];
606 /* mark everything on the alist
607 * except the keys or values, according to weak_values and weak_keys.
609 while ( SCM_NIMP (alist
)
611 && !SCM_GCMARKP (alist
)
612 && SCM_NIMP (SCM_CAR (alist
))
613 && SCM_CONSP (SCM_CAR (alist
)))
618 kvpair
= SCM_CAR (alist
);
619 next_alist
= SCM_CDR (alist
);
622 * SCM_SETGCMARK (alist);
623 * SCM_SETGCMARK (kvpair);
625 * It may be that either the key or value is protected by
626 * an escaped reference to part of the spine of this alist.
627 * If we mark the spine here, and only mark one or neither of the
628 * key and value, they may never be properly marked.
629 * This leads to a horrible situation in which an alist containing
630 * freelist cells is exported.
632 * So only mark the spines of these arrays last of all marking.
633 * If somebody confuses us by constructing a weak vector
634 * with a circular alist then we are hosed, but at least we
635 * won't prematurely drop table entries.
638 scm_gc_mark (SCM_CAR (kvpair
));
640 scm_gc_mark (SCM_GCCDR (kvpair
));
643 if (SCM_NIMP (alist
))
649 case scm_tc7_msymbol
:
650 if (SCM_GC8MARKP(ptr
))
652 SCM_SETGC8MARK (ptr
);
653 scm_gc_mark (SCM_SYMBOL_FUNC (ptr
));
654 ptr
= SCM_SYMBOL_PROPS (ptr
);
656 case scm_tc7_ssymbol
:
657 if (SCM_GC8MARKP(ptr
))
659 SCM_SETGC8MARK (ptr
);
662 ptr
= (SCM
)(scm_heap_org
+ (((unsigned long)SCM_CAR (ptr
)) >> 8));
665 i
= SCM_PTOBNUM (ptr
);
666 if (!(i
< scm_numptob
))
668 if (SCM_GC8MARKP (ptr
))
670 if (SCM_PTAB_ENTRY(ptr
))
671 scm_gc_mark (SCM_PTAB_ENTRY(ptr
)->file_name
);
672 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
676 if (SCM_GC8MARKP (ptr
))
678 switch SCM_TYP16 (ptr
)
679 { /* should be faster than going through scm_smobs */
680 case scm_tc_free_cell
:
681 /* printf("found free_cell %X ", ptr); fflush(stdout); */
682 SCM_SETGC8MARK (ptr
);
683 SCM_CDR (ptr
) = SCM_EOL
;
685 case scm_tcs_bignums
:
687 SCM_SETGC8MARK (ptr
);
690 i
= SCM_SMOBNUM (ptr
);
691 if (!(i
< scm_numsmob
))
693 ptr
= (scm_smobs
[i
].mark
) (ptr
);
698 def
:scm_wta (ptr
, "unknown type in ", "gc_mark");
703 /* Mark a Region Conservatively
707 scm_mark_locations (x
, n
)
713 register SCM_CELLPTR ptr
;
716 if SCM_CELLP (*(SCM
**) & x
[m
])
718 ptr
= (SCM_CELLPTR
) SCM2PTR ((*(SCM
**) & x
[m
]));
720 j
= scm_n_heap_segs
- 1;
721 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
722 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
729 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
731 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
739 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
743 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
748 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
752 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
758 if ( !scm_heap_table
[seg_id
].valid
759 || scm_heap_table
[seg_id
].valid (ptr
,
760 &scm_heap_table
[seg_id
]))
761 scm_gc_mark (*(SCM
*) & x
[m
]);
771 scm_mark_weak_vector_spines ()
775 for (i
= 0; i
< scm_n_weak
; ++i
)
777 if (SCM_IS_WHVEC_ANY (scm_weak_vectors
[i
]))
784 obj
= scm_weak_vectors
[i
];
785 ptr
= SCM_VELTS (scm_weak_vectors
[i
]);
786 n
= SCM_LENGTH (scm_weak_vectors
[i
]);
787 for (j
= 0; j
< n
; ++j
)
792 while ( SCM_NIMP (alist
)
794 && !SCM_GCMARKP (alist
)
795 && SCM_NIMP (SCM_CAR (alist
))
796 && SCM_CONSP (SCM_CAR (alist
)))
798 SCM_SETGCMARK (alist
);
799 SCM_SETGCMARK (SCM_CAR (alist
));
800 alist
= SCM_GCCDR (alist
);
812 register SCM_CELLPTR ptr
;
813 #ifdef SCM_POINTERS_MUNGED
817 #define scmptr (SCM)ptr
819 register SCM nfreelist
;
820 register SCM
*hp_freelist
;
823 register scm_sizet j
;
832 while (i
< scm_n_heap_segs
)
834 hp_freelist
= scm_heap_table
[i
].freelistp
;
836 span
= scm_heap_table
[i
].ncells
;
837 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0]);
838 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1]) - ptr
;
840 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
842 #ifdef SCM_POINTERS_MUNGED
843 scmptr
= PTR2SCM (ptr
);
845 switch SCM_TYP7 (scmptr
)
847 case scm_tcs_cons_gloc
:
848 if (SCM_GCMARKP (scmptr
))
850 if (SCM_CDR (SCM_CAR (scmptr
) - 1) == (SCM
)1)
851 SCM_CDR (SCM_CAR (scmptr
) - 1) = (SCM
)0;
856 vcell
= SCM_CAR (scmptr
) - 1L;
858 if ((SCM_CDR (vcell
) == 0) || (SCM_CDR (vcell
) == 1))
862 mem
= (SCM
*)SCM_CDR (scmptr
);
865 m
+= amt
* sizeof (SCM
);
869 case scm_tcs_cons_imcar
:
870 case scm_tcs_cons_nimcar
:
871 case scm_tcs_closures
:
872 if (SCM_GCMARKP (scmptr
))
876 if (SCM_GC8MARKP (scmptr
))
882 m
+= (1 + SCM_LENGTH (scmptr
)) * sizeof (SCM
);
883 scm_must_free ((char *)(SCM_VELTS (scmptr
) - 1));
888 case scm_tc7_lvector
:
892 if (SCM_GC8MARKP (scmptr
))
895 m
+= (SCM_LENGTH (scmptr
) * sizeof (SCM
));
897 scm_must_free (SCM_CHARS (scmptr
));
898 /* SCM_SETCHARS(scmptr, 0);*/
901 if SCM_GC8MARKP (scmptr
)
903 m
+= sizeof (long) * ((SCM_HUGE_LENGTH (scmptr
) + SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
906 if SCM_GC8MARKP (scmptr
)
908 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (char);
912 if SCM_GC8MARKP (scmptr
)
914 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long);
917 if SCM_GC8MARKP (scmptr
)
919 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (short);
923 if SCM_GC8MARKP (scmptr
)
925 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long_long
);
929 if SCM_GC8MARKP (scmptr
)
931 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (float);
934 if SCM_GC8MARKP (scmptr
)
936 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (double);
939 if SCM_GC8MARKP (scmptr
)
941 m
+= SCM_HUGE_LENGTH (scmptr
) * 2 * sizeof (double);
943 case scm_tc7_substring
:
944 case scm_tc7_mb_substring
:
945 if (SCM_GC8MARKP (scmptr
))
949 case scm_tc7_mb_string
:
950 if (SCM_GC8MARKP (scmptr
))
952 m
+= SCM_HUGE_LENGTH (scmptr
) + 1;
954 case scm_tc7_msymbol
:
955 if (SCM_GC8MARKP (scmptr
))
957 m
+= ( SCM_LENGTH (scmptr
)
959 + sizeof (SCM
) * ((SCM
*)SCM_CHARS (scmptr
) - SCM_SLOTS(scmptr
)));
960 scm_must_free ((char *)SCM_SLOTS (scmptr
));
963 if SCM_GC8MARKP (scmptr
)
965 m
+= SCM_LENGTH (scmptr
) * sizeof (SCM_STACKITEM
) + sizeof (regs
);
967 case scm_tc7_ssymbol
:
968 if SCM_GC8MARKP(scmptr
)
974 if SCM_GC8MARKP (scmptr
)
976 if SCM_OPENP (scmptr
)
978 int k
= SCM_PTOBNUM (scmptr
);
979 if (!(k
< scm_numptob
))
981 /* Keep "revealed" ports alive. */
982 if (scm_revealed_count(scmptr
) > 0)
984 /* Yes, I really do mean scm_ptobs[k].free */
985 /* rather than ftobs[k].close. .close */
986 /* is for explicit CLOSE-PORT by user */
987 (scm_ptobs
[k
].free
) (SCM_STREAM (scmptr
));
988 SCM_SETSTREAM (scmptr
, 0);
989 scm_remove_from_port_table (scmptr
);
990 scm_gc_ports_collected
++;
991 SCM_CAR (scmptr
) &= ~SCM_OPN
;
995 switch SCM_GCTYP16 (scmptr
)
997 case scm_tc_free_cell
:
998 if SCM_GC8MARKP (scmptr
)
1002 case scm_tcs_bignums
:
1003 if SCM_GC8MARKP (scmptr
)
1005 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1007 #endif /* def SCM_BIGDIG */
1009 if SCM_GC8MARKP (scmptr
)
1011 switch ((int) (SCM_CAR (scmptr
) >> 16))
1013 case (SCM_IMAG_PART
| SCM_REAL_PART
) >> 16:
1014 m
+= sizeof (double);
1015 case SCM_REAL_PART
>> 16:
1016 case SCM_IMAG_PART
>> 16:
1017 m
+= sizeof (double);
1026 if SCM_GC8MARKP (scmptr
)
1031 k
= SCM_SMOBNUM (scmptr
);
1032 if (!(k
< scm_numsmob
))
1034 m
+= (scm_smobs
[k
].free
) ((SCM
) scmptr
);
1040 sweeperr
:scm_wta (scmptr
, "unknown type in ", "gc_sweep");
1044 if (SCM_CAR (scmptr
) == (SCM
) scm_tc_free_cell
)
1047 SCM_CAR (scmptr
) = (SCM
) scm_tc_free_cell
;
1048 SCM_CDR (scmptr
) = nfreelist
;
1051 if ((nfreelist
< scm_heap_table
[0].bounds
[0]) ||
1052 (nfreelist
>= scm_heap_table
[0].bounds
[1]))
1057 SCM_CLRGC8MARK (scmptr
);
1060 SCM_CLRGCMARK (scmptr
);
1062 #ifdef GC_FREE_SEGMENTS
1065 scm_heap_size
-= seg_size
;
1066 free ((char *) scm_heap_table
[i
- 1].bounds
[0]);
1067 scm_heap_table
[i
- 1].bounds
[0] = 0;
1068 for (j
= i
; j
< scm_n_heap_segs
; j
++)
1069 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1070 scm_n_heap_segs
-= 1;
1071 i
-= 1; /* need to scan segment just moved. */
1074 #endif /* ifdef GC_FREE_SEGMENTS */
1075 *hp_freelist
= nfreelist
;
1077 scm_gc_cells_collected
+= n
;
1080 /* Scan weak vectors. */
1083 for (i
= 0; i
< scm_n_weak
; ++i
)
1085 if (!SCM_IS_WHVEC_ANY (scm_weak_vectors
[i
]))
1087 ptr
= SCM_VELTS (scm_weak_vectors
[i
]);
1088 n
= SCM_LENGTH (scm_weak_vectors
[i
]);
1089 for (j
= 0; j
< n
; ++j
)
1090 if (SCM_NIMP (ptr
[j
]) && SCM_FREEP (ptr
[j
]))
1091 ptr
[j
] = SCM_BOOL_F
;
1093 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1096 obj
= scm_weak_vectors
[i
];
1097 ptr
= SCM_VELTS (scm_weak_vectors
[i
]);
1098 n
= SCM_LENGTH (scm_weak_vectors
[i
]);
1099 for (j
= 0; j
< n
; ++j
)
1106 weak_keys
= SCM_IS_WHVEC (obj
) || SCM_IS_WHVEC_B (obj
);
1107 weak_values
= SCM_IS_WHVEC_V (obj
) || SCM_IS_WHVEC_B (obj
);
1112 while (SCM_NIMP (alist
)
1113 && SCM_CONSP (alist
)
1114 && SCM_NIMP (SCM_CAR (alist
))
1115 && SCM_CONSP (SCM_CAR (alist
)))
1120 key
= SCM_CAAR (alist
);
1121 value
= SCM_CDAR (alist
);
1122 if ( (weak_keys
&& SCM_NIMP (key
) && SCM_FREEP (key
))
1123 || (weak_values
&& SCM_NIMP (value
) && SCM_FREEP (value
)))
1125 *fixup
= SCM_CDR (alist
);
1128 fixup
= &SCM_CDR (alist
);
1129 alist
= SCM_CDR (alist
);
1135 scm_cells_allocated
= (scm_heap_size
- scm_gc_cells_collected
);
1136 scm_mallocated
-= m
;
1137 scm_gc_malloc_collected
= m
;
1143 /* {Front end to malloc}
1145 * scm_must_malloc, scm_must_realloc, scm_must_free
1147 * These functions provide services comperable to malloc, realloc, and
1148 * free. They are for allocating malloced parts of scheme objects.
1149 * The primary purpose of the front end is to impose calls to gc.
1153 * Return newly malloced storage or throw an error.
1155 * The parameter WHAT is a string for error reporting.
1156 * If the threshold scm_mtrigger will be passed by this
1157 * allocation, or if the first call to malloc fails,
1158 * garbage collect -- on the presumption that some objects
1159 * using malloced storage may be collected.
1161 * The limit scm_mtrigger may be raised by this allocation.
1164 scm_must_malloc (len
, what
)
1169 scm_sizet size
= len
;
1170 long nm
= scm_mallocated
+ size
;
1173 scm_wta (SCM_MAKINUM (len
), (char *) SCM_NALLOC
, what
);
1174 if ((nm
<= scm_mtrigger
))
1176 SCM_SYSCALL (ptr
= (char *) malloc (size
));
1179 scm_mallocated
= nm
;
1184 nm
= scm_mallocated
+ size
;
1185 SCM_SYSCALL (ptr
= (char *) malloc (size
));
1188 scm_mallocated
= nm
;
1189 if (nm
> scm_mtrigger
)
1190 scm_mtrigger
= nm
+ nm
/ 2;
1198 * is similar to scm_must_malloc.
1201 scm_must_realloc (where
, olen
, len
, what
)
1208 scm_sizet size
= len
;
1209 long nm
= scm_mallocated
+ size
- olen
;
1212 scm_wta (SCM_MAKINUM (len
), (char *) SCM_NALLOC
, what
);
1213 if ((nm
<= scm_mtrigger
))
1215 SCM_SYSCALL (ptr
= (char *) realloc (where
, size
));
1218 scm_mallocated
= nm
;
1223 nm
= scm_mallocated
+ size
- olen
;
1224 SCM_SYSCALL (ptr
= (char *) realloc (where
, size
));
1227 scm_mallocated
= nm
;
1228 if (nm
> scm_mtrigger
)
1229 scm_mtrigger
= nm
+ nm
/ 2;
1242 scm_wta (SCM_INUM0
, "already free", "");
1250 * Each heap segment is an array of objects of a particular size.
1251 * Every segment has an associated (possibly shared) freelist.
1252 * A table of segment records is kept that records the upper and
1253 * lower extents of the segment; this is used during the conservative
1254 * phase of gc to identify probably gc roots (because they point
1255 * into valid segments at reasonable offsets).
1259 * is true if the first segment was smaller than INIT_HEAP_SEG.
1260 * If scm_expmem is set to one, subsequent segment allocations will
1261 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1266 * is the lowest base address of any heap segment.
1268 SCM_CELLPTR scm_heap_org
;
1270 struct scm_heap_seg_data
* scm_heap_table
= 0;
1271 int scm_n_heap_segs
= 0;
1274 * is the total number of cells in heap segments.
1276 long scm_heap_size
= 0;
1279 * initializes a new heap segment and return the number of objects it contains.
1281 * The segment origin, segment size in bytes, and the span of objects
1282 * in cells are input parameters. The freelist is both input and output.
1284 * This function presume that the scm_heap_table has already been expanded
1285 * to accomodate a new segment record.
1290 init_heap_seg (seg_org
, size
, ncells
, freelistp
)
1291 SCM_CELLPTR seg_org
;
1296 register SCM_CELLPTR ptr
;
1297 #ifdef SCM_POINTERS_MUNGED
1298 register SCM scmptr
;
1303 SCM_CELLPTR seg_end
;
1304 scm_sizet new_seg_index
;
1305 scm_sizet n_new_objects
;
1307 if (seg_org
== NULL
)
1312 /* Compute the ceiling on valid object pointers w/in this segment.
1314 seg_end
= CELL_DN ((char *) ptr
+ size
);
1316 /* Find the right place and insert the segment record.
1319 for (new_seg_index
= 0;
1320 ( (new_seg_index
< scm_n_heap_segs
)
1321 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
));
1327 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
1328 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
1333 scm_heap_table
[new_seg_index
].valid
= 0;
1334 scm_heap_table
[new_seg_index
].ncells
= ncells
;
1335 scm_heap_table
[new_seg_index
].freelistp
= freelistp
;
1336 scm_heap_table
[new_seg_index
].bounds
[0] = (SCM_CELLPTR
)ptr
;
1337 scm_heap_table
[new_seg_index
].bounds
[1] = (SCM_CELLPTR
)seg_end
;
1340 /* Compute the least valid object pointer w/in this segment
1342 ptr
= CELL_UP (ptr
);
1345 n_new_objects
= seg_end
- ptr
;
1347 /* Prepend objects in this segment to the freelist.
1349 while (ptr
< seg_end
)
1351 #ifdef SCM_POINTERS_MUNGED
1352 scmptr
= PTR2SCM (ptr
);
1354 SCM_CAR (scmptr
) = (SCM
) scm_tc_free_cell
;
1355 SCM_CDR (scmptr
) = PTR2SCM (ptr
+ ncells
);
1361 /* Patch up the last freelist pointer in the segment
1362 * to join it to the input freelist.
1364 SCM_CDR (PTR2SCM (ptr
)) = *freelistp
;
1365 *freelistp
= PTR2SCM (CELL_UP (seg_org
));
1367 scm_heap_size
+= (ncells
* n_new_objects
);
1376 alloc_some_heap (ncells
, freelistp
)
1380 struct scm_heap_seg_data
* tmptable
;
1384 /* Critical code sections (such as the garbage collector)
1385 * aren't supposed to add heap segments.
1387 if (scm_gc_heap_lock
)
1388 scm_wta (SCM_UNDEFINED
, "need larger initial", "heap");
1390 /* Expand the heap tables to have room for the new segment.
1391 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1392 * only if the allocation of the segment itself succeeds.
1394 len
= (1 + scm_n_heap_segs
) * sizeof (struct scm_heap_seg_data
);
1396 SCM_SYSCALL (tmptable
= ((struct scm_heap_seg_data
*)
1397 realloc ((char *)scm_heap_table
, len
)));
1399 scm_wta (SCM_UNDEFINED
, "could not grow", "hplims");
1401 scm_heap_table
= tmptable
;
1404 /* Pick a size for the new heap segment.
1405 * The rule for picking the size of a segment is explained in
1410 len
= (scm_sizet
) (SCM_EXPHEAP (scm_heap_size
) * sizeof (scm_cell
));
1411 if ((scm_sizet
) (SCM_EXPHEAP (scm_heap_size
) * sizeof (scm_cell
)) != len
)
1415 len
= SCM_HEAP_SEG_SIZE
;
1420 smallest
= (ncells
* sizeof (scm_cell
));
1422 len
= (ncells
* sizeof (scm_cell
));
1424 /* Allocate with decaying ambition. */
1425 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
1426 && (len
>= smallest
))
1428 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (len
));
1431 init_heap_seg (ptr
, len
, ncells
, freelistp
);
1438 scm_wta (SCM_UNDEFINED
, "could not grow", "heap");
1443 SCM_PROC (s_unhash_name
, "unhash-name", 1, 0, 0, scm_unhash_name
);
1445 scm_unhash_name (name
)
1450 SCM_ASSERT (SCM_NIMP (name
) && SCM_SYMBOLP (name
), name
, SCM_ARG1
, s_unhash_name
);
1452 bound
= scm_n_heap_segs
;
1453 for (x
= 0; x
< bound
; ++x
)
1457 p
= (SCM_CELLPTR
)scm_heap_table
[x
].bounds
[0];
1458 pbound
= (SCM_CELLPTR
)scm_heap_table
[x
].bounds
[1];
1463 if (1 == (7 & (int)incar
))
1466 if ( ((name
== SCM_BOOL_T
) || (SCM_CAR (incar
) == name
))
1467 && (SCM_CDR (incar
) != 0)
1468 && (SCM_CDR (incar
) != 1))
1482 /* {GC Protection Helper Functions}
1493 scm_return_first (SCM elt
, ...)
1496 scm_return_first (elt
, va_alist
)
1506 scm_permanent_object (obj
)
1510 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
1518 scm_init_storage (init_heap_size
)
1519 long init_heap_size
;
1523 j
= SCM_NUM_PROTECTS
;
1525 scm_sys_protects
[--j
] = SCM_BOOL_F
;
1527 scm_freelist
= SCM_EOL
;
1530 j
= SCM_HEAP_SEG_SIZE
;
1531 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
1532 scm_heap_table
= ((struct scm_heap_seg_data
*)
1533 scm_must_malloc (sizeof (struct scm_heap_seg_data
), "hplims"));
1534 if (0L == init_heap_size
)
1535 init_heap_size
= SCM_INIT_HEAP_SIZE
;
1537 if ((init_heap_size
!= j
)
1538 || !init_heap_seg ((SCM_CELLPTR
) malloc (j
), j
, 1, &scm_freelist
))
1540 j
= SCM_HEAP_SEG_SIZE
;
1541 if (!init_heap_seg ((SCM_CELLPTR
) malloc (j
), j
, 1, &scm_freelist
))
1546 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0]);
1547 /* scm_hplims[0] can change. do not remove scm_heap_org */
1548 if (!(scm_weak_vectors
= (SCM
*) malloc ((scm_weak_size
= 32) * sizeof(SCM
*))))
1551 /* Initialise the list of ports. */
1552 scm_port_table
= (struct scm_port_table
**) malloc ((long) (sizeof (struct scm_port_table
)
1553 * scm_port_table_room
));
1554 if (!scm_port_table
)
1558 scm_undefineds
= scm_cons (SCM_UNDEFINED
, SCM_EOL
);
1559 SCM_CDR (scm_undefineds
) = scm_undefineds
;
1561 scm_listofnull
= scm_cons (SCM_EOL
, SCM_EOL
);
1562 scm_nullstr
= scm_makstr (0L, 0);
1563 scm_nullvect
= scm_make_vector (SCM_INUM0
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1564 scm_symhash
= scm_make_vector ((SCM
) SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
, SCM_UNDEFINED
);
1565 scm_weak_symhash
= scm_make_weak_key_hash_table ((SCM
) SCM_MAKINUM (scm_symhash_dim
));
1566 scm_symhash_vars
= scm_make_vector ((SCM
) SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
, SCM_UNDEFINED
);
1567 scm_permobjs
= SCM_EOL
;
1568 scm_asyncs
= SCM_EOL
;
1569 scm_sysintern ("most-positive-fixnum", (SCM
) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM
));
1570 scm_sysintern ("most-negative-fixnum", (SCM
) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM
));
1572 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD
));