1 /* Copyright (C) 2010,2012,2016 Matthew Fluet.
2 * Copyright (C) 1999-2008 Henry Cejtin, Matthew Fluet, Suresh
3 * Jagannathan, and Stephen Weeks.
4 * Copyright (C) 1997-2000 NEC Research Institute.
6 * MLton is released under a BSD-style license.
7 * See the file MLton-LICENSE for details.
10 /* ---------------------------------------------------------------- */
11 /* Jonkers Mark-compact Collection */
12 /* ---------------------------------------------------------------- */
14 void copyForThreadInternal (pointer dst
, pointer src
) {
17 "copyForThreadInternal dst = "FMTPTR
" src = "FMTPTR
"\n",
18 (uintptr_t)dst
, (uintptr_t)src
);
19 if (OBJPTR_SIZE
> GC_HEADER_SIZE
) {
22 assert (0 == (OBJPTR_SIZE
% GC_HEADER_SIZE
));
23 count
= (OBJPTR_SIZE
- GC_HEADER_SIZE
) / GC_HEADER_SIZE
;
24 src
= src
+ GC_HEADER_SIZE
* count
;
26 for (size_t i
= 0; i
<= count
; i
++) {
27 *((GC_header
*)dst
) = *((GC_header
*)src
);
28 dst
+= GC_HEADER_SIZE
;
29 src
-= GC_HEADER_SIZE
;
31 } else if (GC_HEADER_SIZE
> OBJPTR_SIZE
) {
34 assert (0 == (GC_HEADER_SIZE
% OBJPTR_SIZE
));
35 count
= (GC_HEADER_SIZE
- OBJPTR_SIZE
) / OBJPTR_SIZE
;
36 dst
= dst
+ OBJPTR_SIZE
* count
;
38 for (size_t i
= 0; i
<= count
; i
++) {
39 *((objptr
*)dst
) = *((objptr
*)src
);
43 } else /* (GC_HEADER_SIZE == OBJPTR_SIZE) */ {
44 *((GC_header
*)dst
) = *((GC_header
*)src
);
48 void threadInternalObjptr (GC_state s
, objptr
*opp
) {
53 opop
= pointerToObjptr ((pointer
)opp
, s
->heap
.start
);
54 p
= objptrToPointer (*opp
, s
->heap
.start
);
57 "threadInternal opp = "FMTPTR
" p = "FMTPTR
" header = "FMTHDR
"\n",
58 (uintptr_t)opp
, (uintptr_t)p
, getHeader (p
));
59 headerp
= getHeaderp (p
);
60 copyForThreadInternal ((pointer
)(opp
), (pointer
)(headerp
));
61 copyForThreadInternal ((pointer
)(headerp
), (pointer
)(&opop
));
64 /* If the object pointer is valid, and points to an unmarked object,
65 * then clear the object pointer.
67 void updateWeaksForMarkCompact (GC_state s
) {
71 for (w
= s
->weaks
; w
!= NULL
; w
= w
->link
) {
72 assert (BOGUS_OBJPTR
!= w
->objptr
);
75 fprintf (stderr
, "updateWeaksForMarkCompact w = "FMTPTR
" ", (uintptr_t)w
);
76 p
= objptrToPointer(w
->objptr
, s
->heap
.start
);
77 /* If it's unmarked, clear the weak pointer. */
78 if (isPointerMarked(p
)) {
80 fprintf (stderr
, "not cleared\n");
83 fprintf (stderr
, "cleared\n");
84 *(getHeaderp((pointer
)w
- offsetofWeak (s
))) = GC_WEAK_GONE_HEADER
| MARK_MASK
;
85 w
->objptr
= BOGUS_OBJPTR
;
91 void updateForwardPointersForMarkCompact (GC_state s
, GC_stack currentStack
) {
93 pointer endOfLastMarked
;
99 size_t size
, skipFront
, skipGap
;
101 if (DEBUG_MARK_COMPACT
)
102 fprintf (stderr
, "Update forward pointers.\n");
103 front
= alignFrontier (s
, s
->heap
.start
);
104 back
= s
->heap
.start
+ s
->heap
.oldGenSize
;
106 endOfLastMarked
= front
;
108 if (DEBUG_MARK_COMPACT
)
109 fprintf (stderr
, "updateObject front = "FMTPTR
" back = "FMTPTR
"\n",
110 (uintptr_t)front
, (uintptr_t)back
);
113 p
= advanceToObjectData (s
, front
);
114 headerp
= getHeaderp (p
);
116 if (GC_VALID_HEADER_MASK
& header
) {
118 if (MARK_MASK
& header
) {
119 /* It is marked, but has no forward pointers.
120 * Thread internal pointers.
123 assert (GC_VALID_HEADER_MASK
& header
);
124 assert (MARK_MASK
& header
);
126 size_t metaDataBytes
, objectBytes
;
127 GC_objectTypeTag tag
;
128 uint16_t bytesNonObjptrs
, numObjptrs
;
130 assert (header
== getHeader (p
));
131 splitHeader(s
, header
, &tag
, NULL
, &bytesNonObjptrs
, &numObjptrs
);
133 /* Compute the space taken by the header and object body. */
134 if ((NORMAL_TAG
== tag
) or (WEAK_TAG
== tag
)) { /* Fixed size object. */
135 metaDataBytes
= GC_NORMAL_METADATA_SIZE
;
136 objectBytes
= bytesNonObjptrs
+ (numObjptrs
* OBJPTR_SIZE
);
139 } else if (ARRAY_TAG
== tag
) {
140 metaDataBytes
= GC_ARRAY_METADATA_SIZE
;
141 objectBytes
= sizeofArrayNoMetaData (s
, getArrayLength (p
),
142 bytesNonObjptrs
, numObjptrs
);
145 } else { /* Stack. */
147 size_t reservedNew
, reservedOld
;
150 assert (STACK_TAG
== tag
);
151 metaDataBytes
= GC_STACK_METADATA_SIZE
;
153 current
= currentStack
== stack
;
155 reservedOld
= stack
->reserved
;
156 reservedNew
= sizeofStackShrinkReserved (s
, stack
, current
);
157 objectBytes
= sizeof (struct GC_stack
) + stack
->used
;
158 skipFront
= reservedOld
- stack
->used
;
159 skipGap
= reservedOld
- reservedNew
;
161 size
= metaDataBytes
+ objectBytes
;
162 if (DEBUG_MARK_COMPACT
)
163 fprintf (stderr
, "threading "FMTPTR
" of size %"PRIuMAX
"\n",
164 (uintptr_t)p
, (uintmax_t)size
);
165 if ((size_t)(front
- endOfLastMarked
) >= GC_ARRAY_METADATA_SIZE
) {
166 pointer newArray
= endOfLastMarked
;
167 /* Compress all of the unmarked into one vector. We require
168 * GC_ARRAY_METADATA_SIZE space to be available because that is
169 * the smallest possible array.
171 if (DEBUG_MARK_COMPACT
)
172 fprintf (stderr
, "compressing from "FMTPTR
" to "FMTPTR
" (length = %"PRIuMAX
")\n",
173 (uintptr_t)endOfLastMarked
, (uintptr_t)front
,
174 (uintmax_t)(front
- endOfLastMarked
));
175 *((GC_arrayCounter
*)(newArray
)) = 0;
176 newArray
+= GC_ARRAY_COUNTER_SIZE
;
177 *((GC_arrayLength
*)(newArray
)) =
178 ((size_t)(front
- endOfLastMarked
)) - GC_ARRAY_METADATA_SIZE
;
179 newArray
+= GC_ARRAY_LENGTH_SIZE
;
180 *((GC_header
*)(newArray
)) = GC_WORD8_VECTOR_HEADER
;
183 front
+= size
+ skipFront
;
184 endOfLastMarked
= front
;
185 foreachObjptrInObject (s
, p
, threadInternalObjptr
, FALSE
);
188 /* It's not marked. */
189 size
= sizeofObject (s
, p
);
198 assert (not (GC_VALID_HEADER_MASK
& header
));
199 /* It's a pointer. This object must be live. Fix all the forward
200 * pointers to it, store its header, then thread its internal
204 newObjptr
= pointerToObjptr (new, s
->heap
.start
);
209 copyForThreadInternal ((pointer
)(&curObjptr
), (pointer
)headerp
);
210 cur
= objptrToPointer (curObjptr
, s
->heap
.start
);
212 copyForThreadInternal ((pointer
)headerp
, cur
);
213 *((objptr
*)cur
) = newObjptr
;
216 } while (0 == (1 & header
));
224 void updateBackwardPointersAndSlideForMarkCompact (GC_state s
, GC_stack currentStack
) {
231 size_t size
, skipFront
, skipGap
;
233 if (DEBUG_MARK_COMPACT
)
234 fprintf (stderr
, "Update backward pointers and slide.\n");
235 front
= alignFrontier (s
, s
->heap
.start
);
236 back
= s
->heap
.start
+ s
->heap
.oldGenSize
;
239 if (DEBUG_MARK_COMPACT
)
240 fprintf (stderr
, "updateObject front = "FMTPTR
" back = "FMTPTR
"\n",
241 (uintptr_t)front
, (uintptr_t)back
);
244 p
= advanceToObjectData (s
, front
);
245 headerp
= getHeaderp (p
);
247 if (GC_VALID_HEADER_MASK
& header
) {
249 if (MARK_MASK
& header
) {
250 /* It is marked, but has no backward pointers to it.
254 assert (GC_VALID_HEADER_MASK
& header
);
255 assert (MARK_MASK
& header
);
257 size_t metaDataBytes
, objectBytes
;
258 GC_objectTypeTag tag
;
259 uint16_t bytesNonObjptrs
, numObjptrs
;
261 assert (header
== getHeader (p
));
262 splitHeader(s
, header
, &tag
, NULL
, &bytesNonObjptrs
, &numObjptrs
);
264 /* Compute the space taken by the header and object body. */
265 if ((NORMAL_TAG
== tag
) or (WEAK_TAG
== tag
)) { /* Fixed size object. */
266 metaDataBytes
= GC_NORMAL_METADATA_SIZE
;
267 objectBytes
= bytesNonObjptrs
+ (numObjptrs
* OBJPTR_SIZE
);
270 } else if (ARRAY_TAG
== tag
) {
271 metaDataBytes
= GC_ARRAY_METADATA_SIZE
;
272 objectBytes
= sizeofArrayNoMetaData (s
, getArrayLength (p
),
273 bytesNonObjptrs
, numObjptrs
);
276 } else { /* Stack. */
278 size_t reservedNew
, reservedOld
;
281 assert (STACK_TAG
== tag
);
282 metaDataBytes
= GC_STACK_METADATA_SIZE
;
284 current
= currentStack
== stack
;
286 reservedOld
= stack
->reserved
;
287 reservedNew
= sizeofStackShrinkReserved (s
, stack
, current
);
288 if (reservedNew
< stack
->reserved
) {
289 if (DEBUG_STACKS
or s
->controls
.messages
)
291 "[GC: Shrinking stack of size %s bytes to size %s bytes, using %s bytes.]\n",
292 uintmaxToCommaString(stack
->reserved
),
293 uintmaxToCommaString(reservedNew
),
294 uintmaxToCommaString(stack
->used
));
295 stack
->reserved
= reservedNew
;
297 objectBytes
= sizeof (struct GC_stack
) + stack
->used
;
298 skipFront
= reservedOld
- stack
->used
;
299 skipGap
= reservedOld
- reservedNew
;
301 size
= metaDataBytes
+ objectBytes
;
303 if (DEBUG_MARK_COMPACT
)
304 fprintf (stderr
, "unmarking "FMTPTR
" of size %"PRIuMAX
"\n",
305 (uintptr_t)p
, (uintmax_t)size
);
306 *headerp
= header
& ~MARK_MASK
;
308 if (DEBUG_MARK_COMPACT
)
309 fprintf (stderr
, "sliding "FMTPTR
" down %"PRIuMAX
" to "FMTPTR
"\n",
310 (uintptr_t)front
, (uintmax_t)gap
, (uintptr_t)(front
- gap
));
311 GC_memmove (front
, front
- gap
, size
);
313 front
+= size
+ skipFront
;
316 /* It's not marked. */
317 size
= sizeofObject (s
, p
);
318 if (DEBUG_MARK_COMPACT
)
319 fprintf (stderr
, "skipping "FMTPTR
" of size %"PRIuMAX
"\n",
320 (uintptr_t)p
, (uintmax_t)size
);
329 assert (not (GC_VALID_HEADER_MASK
& header
));
330 /* It's a pointer. This object must be live. Fix all the
331 * backward pointers to it. Then unmark it.
334 newObjptr
= pointerToObjptr (new, s
->heap
.start
);
339 copyForThreadInternal ((pointer
)(&curObjptr
), (pointer
)headerp
);
340 cur
= objptrToPointer (curObjptr
, s
->heap
.start
);
342 copyForThreadInternal ((pointer
)headerp
, cur
);
343 *((objptr
*)cur
) = newObjptr
;
346 } while (0 == (1 & header
));
347 /* The unmarked header will be stored by unmark. */
352 s
->heap
.oldGenSize
= (size_t)((front
- gap
) - s
->heap
.start
);
353 if (DEBUG_MARK_COMPACT
)
354 fprintf (stderr
, "oldGenSize = %"PRIuMAX
"\n",
355 (uintmax_t)s
->heap
.oldGenSize
);
359 void majorMarkCompactGC (GC_state s
) {
360 size_t bytesHashConsed
;
361 size_t bytesMarkCompacted
;
362 GC_stack currentStack
;
363 struct rusage ru_start
;
365 if (detailedGCTime (s
))
366 startTiming (&ru_start
);
367 s
->cumulativeStatistics
.numMarkCompactGCs
++;
368 if (DEBUG
or s
->controls
.messages
) {
370 "[GC: Starting major mark-compact;]\n");
372 "[GC:\theap at "FMTPTR
" of size %s bytes.]\n",
373 (uintptr_t)(s
->heap
.start
),
374 uintmaxToCommaString(s
->heap
.size
));
376 currentStack
= getStackCurrent (s
);
377 if (s
->hashConsDuringGC
) {
378 s
->lastMajorStatistics
.bytesHashConsed
= 0;
379 s
->cumulativeStatistics
.numHashConsGCs
++;
380 s
->objectHashTable
= allocHashTable (s
);
381 foreachGlobalObjptr (s
, dfsMarkWithHashConsWithLinkWeaks
);
382 freeHashTable (s
->objectHashTable
);
384 foreachGlobalObjptr (s
, dfsMarkWithoutHashConsWithLinkWeaks
);
386 updateWeaksForMarkCompact (s
);
387 foreachGlobalObjptr (s
, threadInternalObjptr
);
388 updateForwardPointersForMarkCompact (s
, currentStack
);
389 updateBackwardPointersAndSlideForMarkCompact (s
, currentStack
);
390 bytesHashConsed
= s
->lastMajorStatistics
.bytesHashConsed
;
391 s
->cumulativeStatistics
.bytesHashConsed
+= bytesHashConsed
;
392 bytesMarkCompacted
= s
->heap
.oldGenSize
;
393 s
->cumulativeStatistics
.bytesMarkCompacted
+= bytesMarkCompacted
;
394 s
->lastMajorStatistics
.kind
= GC_MARK_COMPACT
;
395 if (detailedGCTime (s
))
396 stopTiming (&ru_start
, &s
->cumulativeStatistics
.ru_gcMarkCompact
);
397 if (DEBUG
or s
->controls
.messages
) {
399 "[GC: Finished major mark-compact; mark compacted %s bytes.]\n",
400 uintmaxToCommaString(bytesMarkCompacted
));
401 if (s
->hashConsDuringGC
)
402 printBytesHashConsedMessage(bytesHashConsed
,
403 bytesHashConsed
+ bytesMarkCompacted
);