1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
5 This file is part of GNU Emacs.
7 GNU Emacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
30 #include "lisp.h" /* Needed for VALBITS. */
31 #include "blockinput.h"
37 typedef POINTER_TYPE
*POINTER
;
40 /* Declared in dispnew.c, this version doesn't screw up if regions
43 extern void safe_bcopy (const char *, char *, int);
45 #ifdef DOUG_LEA_MALLOC
47 extern int mallopt (int, int);
48 #else /* not DOUG_LEA_MALLOC */
50 extern size_t __malloc_extra_blocks
;
51 #endif /* SYSTEM_MALLOC */
52 #endif /* not DOUG_LEA_MALLOC */
59 typedef void *POINTER
;
64 #define safe_bcopy(x, y, z) memmove (y, x, z)
65 #define bzero(x, len) memset (x, 0, len)
67 #endif /* not emacs */
70 #include "getpagesize.h"
72 #define NIL ((POINTER) 0)
74 /* A flag to indicate whether we have initialized ralloc yet. For
75 Emacs's sake, please do not make this local to malloc_init; on some
76 machines, the dumping procedure makes all static variables
77 read-only. On these machines, the word static is #defined to be
78 the empty string, meaning that r_alloc_initialized becomes an
79 automatic variable, and loses its value each time Emacs is started
82 static int r_alloc_initialized
= 0;
84 static void r_alloc_init (void);
87 /* Declarations for working with the malloc, ralloc, and system breaks. */
89 /* Function to set the real break value. */
90 POINTER (*real_morecore
) ();
92 /* The break value, as seen by malloc. */
93 static POINTER virtual_break_value
;
95 /* The address of the end of the last data in use by ralloc,
96 including relocatable blocs as well as malloc data. */
97 static POINTER break_value
;
99 /* This is the size of a page. We round memory requests to this boundary. */
100 static int page_size
;
102 /* Whenever we get memory from the system, get this many extra bytes. This
103 must be a multiple of page_size. */
104 static int extra_bytes
;
106 /* Macros for rounding. Note that rounding to any value is possible
107 by changing the definition of PAGE. */
108 #define PAGE (getpagesize ())
109 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
110 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
112 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
114 #define MEM_ALIGN sizeof(double)
115 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
118 /* The hook `malloc' uses for the function which gets more space
121 #ifndef SYSTEM_MALLOC
122 extern POINTER (*__morecore
) ();
127 /***********************************************************************
128 Implementation using sbrk
129 ***********************************************************************/
131 /* Data structures of heaps and blocs. */
133 /* The relocatable objects, or blocs, and the malloc data
134 both reside within one or more heaps.
135 Each heap contains malloc data, running from `start' to `bloc_start',
136 and relocatable objects, running from `bloc_start' to `free'.
138 Relocatable objects may relocate within the same heap
139 or may move into another heap; the heaps themselves may grow
142 We try to make just one heap and make it larger as necessary.
143 But sometimes we can't do that, because we can't get contiguous
144 space to add onto the heap. When that happens, we start a new heap. */
150 /* Start of memory range of this heap. */
152 /* End of memory range of this heap. */
154 /* Start of relocatable data in this heap. */
156 /* Start of unused space in this heap. */
158 /* First bloc in this heap. */
159 struct bp
*first_bloc
;
160 /* Last bloc in this heap. */
161 struct bp
*last_bloc
;
164 #define NIL_HEAP ((heap_ptr) 0)
165 #define HEAP_PTR_SIZE (sizeof (struct heap))
167 /* This is the first heap object.
168 If we need additional heap objects, each one resides at the beginning of
169 the space it covers. */
170 static struct heap heap_base
;
172 /* Head and tail of the list of heaps. */
173 static heap_ptr first_heap
, last_heap
;
175 /* These structures are allocated in the malloc arena.
176 The linked list is kept in order of increasing '.data' members.
177 The data blocks abut each other; if b->next is non-nil, then
178 b->data + b->size == b->next->data.
180 An element with variable==NIL denotes a freed block, which has not yet
181 been collected. They may only appear while r_alloc_freeze_level > 0,
182 and will be freed when the arena is thawed. Currently, these blocs are
183 not reusable, while the arena is frozen. Very inefficient. */
192 POINTER new_data
; /* temporarily used for relocation */
193 struct heap
*heap
; /* Heap this bloc is in. */
196 #define NIL_BLOC ((bloc_ptr) 0)
197 #define BLOC_PTR_SIZE (sizeof (struct bp))
199 /* Head and tail of the list of relocatable blocs. */
200 static bloc_ptr first_bloc
, last_bloc
;
202 static int use_relocatable_buffers
;
204 /* If >0, no relocation whatsoever takes place. */
205 static int r_alloc_freeze_level
;
208 /* Functions to get and return memory from the system. */
210 /* Find the heap that ADDRESS falls within. */
213 find_heap (POINTER address
)
217 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
219 if (heap
->start
<= address
&& address
<= heap
->end
)
226 /* Find SIZE bytes of space in a heap.
227 Try to get them at ADDRESS (which must fall within some heap's range)
228 if we can get that many within one heap.
230 If enough space is not presently available in our reserve, this means
231 getting more page-aligned space from the system. If the returned space
232 is not contiguous to the last heap, allocate a new heap, and append it
234 obtain does not try to keep track of whether space is in use
235 or not in use. It just returns the address of SIZE bytes that
236 fall within a single heap. If you call obtain twice in a row
237 with the same arguments, you typically get the same value.
238 to the heap list. It's the caller's responsibility to keep
239 track of what space is in use.
241 Return the address of the space if all went well, or zero if we couldn't
242 allocate the memory. */
245 obtain (POINTER address
, SIZE size
)
248 SIZE already_available
;
250 /* Find the heap that ADDRESS falls within. */
251 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
253 if (heap
->start
<= address
&& address
<= heap
->end
)
260 /* If we can't fit SIZE bytes in that heap,
261 try successive later heaps. */
262 while (heap
&& (char *) address
+ size
> (char *) heap
->end
)
265 if (heap
== NIL_HEAP
)
267 address
= heap
->bloc_start
;
270 /* If we can't fit them within any existing heap,
272 if (heap
== NIL_HEAP
)
274 POINTER
new = (*real_morecore
)(0);
277 already_available
= (char *)last_heap
->end
- (char *)address
;
279 if (new != last_heap
->end
)
281 /* Someone else called sbrk. Make a new heap. */
283 heap_ptr new_heap
= (heap_ptr
) MEM_ROUNDUP (new);
284 POINTER bloc_start
= (POINTER
) MEM_ROUNDUP ((POINTER
)(new_heap
+ 1));
286 if ((*real_morecore
) ((char *) bloc_start
- (char *) new) != new)
289 new_heap
->start
= new;
290 new_heap
->end
= bloc_start
;
291 new_heap
->bloc_start
= bloc_start
;
292 new_heap
->free
= bloc_start
;
293 new_heap
->next
= NIL_HEAP
;
294 new_heap
->prev
= last_heap
;
295 new_heap
->first_bloc
= NIL_BLOC
;
296 new_heap
->last_bloc
= NIL_BLOC
;
297 last_heap
->next
= new_heap
;
298 last_heap
= new_heap
;
300 address
= bloc_start
;
301 already_available
= 0;
304 /* Add space to the last heap (which we may have just created).
305 Get some extra, so we can come here less often. */
307 get
= size
+ extra_bytes
- already_available
;
308 get
= (char *) ROUNDUP ((char *)last_heap
->end
+ get
)
309 - (char *) last_heap
->end
;
311 if ((*real_morecore
) (get
) != last_heap
->end
)
314 last_heap
->end
= (char *) last_heap
->end
+ get
;
320 /* Return unused heap space to the system
321 if there is a lot of unused space now.
322 This can make the last heap smaller;
323 it can also eliminate the last heap entirely. */
331 /* Add the amount of space beyond break_value
332 in all heaps which have extend beyond break_value at all. */
334 for (h
= last_heap
; h
&& break_value
< h
->end
; h
= h
->prev
)
336 excess
+= (char *) h
->end
- (char *) ((break_value
< h
->bloc_start
)
337 ? h
->bloc_start
: break_value
);
340 if (excess
> extra_bytes
* 2 && (*real_morecore
) (0) == last_heap
->end
)
342 /* Keep extra_bytes worth of empty space.
343 And don't free anything unless we can free at least extra_bytes. */
344 excess
-= extra_bytes
;
346 if ((char *)last_heap
->end
- (char *)last_heap
->bloc_start
<= excess
)
348 /* This heap should have no blocs in it. */
349 if (last_heap
->first_bloc
!= NIL_BLOC
350 || last_heap
->last_bloc
!= NIL_BLOC
)
353 /* Return the last heap, with its header, to the system. */
354 excess
= (char *)last_heap
->end
- (char *)last_heap
->start
;
355 last_heap
= last_heap
->prev
;
356 last_heap
->next
= NIL_HEAP
;
360 excess
= (char *) last_heap
->end
361 - (char *) ROUNDUP ((char *)last_heap
->end
- excess
);
362 last_heap
->end
= (char *) last_heap
->end
- excess
;
365 if ((*real_morecore
) (- excess
) == 0)
367 /* If the system didn't want that much memory back, adjust
368 the end of the last heap to reflect that. This can occur
369 if break_value is still within the original data segment. */
370 last_heap
->end
= (char *) last_heap
->end
+ excess
;
371 /* Make sure that the result of the adjustment is accurate.
372 It should be, for the else clause above; the other case,
373 which returns the entire last heap to the system, seems
374 unlikely to trigger this mode of failure. */
375 if (last_heap
->end
!= (*real_morecore
) (0))
381 /* Return the total size in use by relocating allocator,
382 above where malloc gets space. */
385 r_alloc_size_in_use (void)
387 return (char *) break_value
- (char *) virtual_break_value
;
390 /* The meat - allocating, freeing, and relocating blocs. */
392 /* Find the bloc referenced by the address in PTR. Returns a pointer
396 find_bloc (POINTER
*ptr
)
398 register bloc_ptr p
= first_bloc
;
400 while (p
!= NIL_BLOC
)
402 /* Consistency check. Don't return inconsistent blocs.
403 Don't abort here, as callers might be expecting this, but
404 callers that always expect a bloc to be returned should abort
405 if one isn't to avoid a memory corruption bug that is
406 difficult to track down. */
407 if (p
->variable
== ptr
&& p
->data
== *ptr
)
416 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
417 Returns a pointer to the new bloc, or zero if we couldn't allocate
418 memory for the new block. */
423 register bloc_ptr new_bloc
;
424 register heap_ptr heap
;
426 if (! (new_bloc
= (bloc_ptr
) malloc (BLOC_PTR_SIZE
))
427 || ! (new_bloc
->data
= obtain (break_value
, size
)))
434 break_value
= (char *) new_bloc
->data
+ size
;
436 new_bloc
->size
= size
;
437 new_bloc
->next
= NIL_BLOC
;
438 new_bloc
->variable
= (POINTER
*) NIL
;
439 new_bloc
->new_data
= 0;
441 /* Record in the heap that this space is in use. */
442 heap
= find_heap (new_bloc
->data
);
443 heap
->free
= break_value
;
445 /* Maintain the correspondence between heaps and blocs. */
446 new_bloc
->heap
= heap
;
447 heap
->last_bloc
= new_bloc
;
448 if (heap
->first_bloc
== NIL_BLOC
)
449 heap
->first_bloc
= new_bloc
;
451 /* Put this bloc on the doubly-linked list of blocs. */
454 new_bloc
->prev
= last_bloc
;
455 last_bloc
->next
= new_bloc
;
456 last_bloc
= new_bloc
;
460 first_bloc
= last_bloc
= new_bloc
;
461 new_bloc
->prev
= NIL_BLOC
;
467 /* Calculate new locations of blocs in the list beginning with BLOC,
468 relocating it to start at ADDRESS, in heap HEAP. If enough space is
469 not presently available in our reserve, call obtain for
472 Store the new location of each bloc in its new_data field.
473 Do not touch the contents of blocs or break_value. */
476 relocate_blocs (bloc_ptr bloc
, heap_ptr heap
, POINTER address
)
478 register bloc_ptr b
= bloc
;
480 /* No need to ever call this if arena is frozen, bug somewhere! */
481 if (r_alloc_freeze_level
)
486 /* If bloc B won't fit within HEAP,
487 move to the next heap and try again. */
488 while (heap
&& (char *) address
+ b
->size
> (char *) heap
->end
)
491 if (heap
== NIL_HEAP
)
493 address
= heap
->bloc_start
;
496 /* If BLOC won't fit in any heap,
497 get enough new space to hold BLOC and all following blocs. */
498 if (heap
== NIL_HEAP
)
500 register bloc_ptr tb
= b
;
503 /* Add up the size of all the following blocs. */
504 while (tb
!= NIL_BLOC
)
512 /* Get that space. */
513 address
= obtain (address
, s
);
520 /* Record the new address of this bloc
521 and update where the next bloc can start. */
522 b
->new_data
= address
;
524 address
= (char *) address
+ b
->size
;
531 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
532 This is necessary if we put the memory of space of BLOC
533 before that of BEFORE. */
536 reorder_bloc (bloc_ptr bloc
, bloc_ptr before
)
540 /* Splice BLOC out from where it is. */
549 /* Splice it in before BEFORE. */
560 /* Update the records of which heaps contain which blocs, starting
561 with heap HEAP and bloc BLOC. */
564 update_heap_bloc_correspondence (bloc_ptr bloc
, heap_ptr heap
)
568 /* Initialize HEAP's status to reflect blocs before BLOC. */
569 if (bloc
!= NIL_BLOC
&& bloc
->prev
!= NIL_BLOC
&& bloc
->prev
->heap
== heap
)
571 /* The previous bloc is in HEAP. */
572 heap
->last_bloc
= bloc
->prev
;
573 heap
->free
= (char *) bloc
->prev
->data
+ bloc
->prev
->size
;
577 /* HEAP contains no blocs before BLOC. */
578 heap
->first_bloc
= NIL_BLOC
;
579 heap
->last_bloc
= NIL_BLOC
;
580 heap
->free
= heap
->bloc_start
;
583 /* Advance through blocs one by one. */
584 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
586 /* Advance through heaps, marking them empty,
587 till we get to the one that B is in. */
590 if (heap
->bloc_start
<= b
->data
&& b
->data
<= heap
->end
)
593 /* We know HEAP is not null now,
594 because there has to be space for bloc B. */
595 heap
->first_bloc
= NIL_BLOC
;
596 heap
->last_bloc
= NIL_BLOC
;
597 heap
->free
= heap
->bloc_start
;
600 /* Update HEAP's status for bloc B. */
601 heap
->free
= (char *) b
->data
+ b
->size
;
603 if (heap
->first_bloc
== NIL_BLOC
)
604 heap
->first_bloc
= b
;
606 /* Record that B is in HEAP. */
610 /* If there are any remaining heaps and no blocs left,
611 mark those heaps as empty. */
615 heap
->first_bloc
= NIL_BLOC
;
616 heap
->last_bloc
= NIL_BLOC
;
617 heap
->free
= heap
->bloc_start
;
622 /* Resize BLOC to SIZE bytes. This relocates the blocs
623 that come after BLOC in memory. */
626 resize_bloc (bloc_ptr bloc
, SIZE size
)
633 /* No need to ever call this if arena is frozen, bug somewhere! */
634 if (r_alloc_freeze_level
)
637 if (bloc
== NIL_BLOC
|| size
== bloc
->size
)
640 for (heap
= first_heap
; heap
!= NIL_HEAP
; heap
= heap
->next
)
642 if (heap
->bloc_start
<= bloc
->data
&& bloc
->data
<= heap
->end
)
646 if (heap
== NIL_HEAP
)
649 old_size
= bloc
->size
;
652 /* Note that bloc could be moved into the previous heap. */
653 address
= (bloc
->prev
? (char *) bloc
->prev
->data
+ bloc
->prev
->size
654 : (char *) first_heap
->bloc_start
);
657 if (heap
->bloc_start
<= address
&& address
<= heap
->end
)
662 if (! relocate_blocs (bloc
, heap
, address
))
664 bloc
->size
= old_size
;
670 for (b
= last_bloc
; b
!= bloc
; b
= b
->prev
)
675 b
->data
= b
->new_data
;
679 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
680 *b
->variable
= b
->data
= b
->new_data
;
686 bloc
->data
= bloc
->new_data
;
690 safe_bcopy (bloc
->data
, bloc
->new_data
, old_size
);
691 bzero ((char *) bloc
->new_data
+ old_size
, size
- old_size
);
692 *bloc
->variable
= bloc
->data
= bloc
->new_data
;
697 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
702 b
->data
= b
->new_data
;
706 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
707 *b
->variable
= b
->data
= b
->new_data
;
712 update_heap_bloc_correspondence (bloc
, heap
);
714 break_value
= (last_bloc
? (char *) last_bloc
->data
+ last_bloc
->size
715 : (char *) first_heap
->bloc_start
);
719 /* Free BLOC from the chain of blocs, relocating any blocs above it.
720 This may return space to the system. */
723 free_bloc (bloc_ptr bloc
)
725 heap_ptr heap
= bloc
->heap
;
727 if (r_alloc_freeze_level
)
729 bloc
->variable
= (POINTER
*) NIL
;
733 resize_bloc (bloc
, 0);
735 if (bloc
== first_bloc
&& bloc
== last_bloc
)
737 first_bloc
= last_bloc
= NIL_BLOC
;
739 else if (bloc
== last_bloc
)
741 last_bloc
= bloc
->prev
;
742 last_bloc
->next
= NIL_BLOC
;
744 else if (bloc
== first_bloc
)
746 first_bloc
= bloc
->next
;
747 first_bloc
->prev
= NIL_BLOC
;
751 bloc
->next
->prev
= bloc
->prev
;
752 bloc
->prev
->next
= bloc
->next
;
755 /* Update the records of which blocs are in HEAP. */
756 if (heap
->first_bloc
== bloc
)
758 if (bloc
->next
!= 0 && bloc
->next
->heap
== heap
)
759 heap
->first_bloc
= bloc
->next
;
761 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
763 if (heap
->last_bloc
== bloc
)
765 if (bloc
->prev
!= 0 && bloc
->prev
->heap
== heap
)
766 heap
->last_bloc
= bloc
->prev
;
768 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
775 /* Interface routines. */
777 /* Obtain SIZE bytes of storage from the free pool, or the system, as
778 necessary. If relocatable blocs are in use, this means relocating
779 them. This function gets plugged into the GNU malloc's __morecore
782 We provide hysteresis, never relocating by less than extra_bytes.
784 If we're out of memory, we should return zero, to imitate the other
785 __morecore hook values - in particular, __default_morecore in the
786 GNU malloc package. */
789 r_alloc_sbrk (long int size
)
794 if (! r_alloc_initialized
)
797 if (! use_relocatable_buffers
)
798 return (*real_morecore
) (size
);
801 return virtual_break_value
;
805 /* Allocate a page-aligned space. GNU malloc would reclaim an
806 extra space if we passed an unaligned one. But we could
807 not always find a space which is contiguous to the previous. */
808 POINTER new_bloc_start
;
809 heap_ptr h
= first_heap
;
810 SIZE get
= ROUNDUP (size
);
812 address
= (POINTER
) ROUNDUP (virtual_break_value
);
814 /* Search the list upward for a heap which is large enough. */
815 while ((char *) h
->end
< (char *) MEM_ROUNDUP ((char *)address
+ get
))
820 address
= (POINTER
) ROUNDUP (h
->start
);
823 /* If not found, obtain more space. */
826 get
+= extra_bytes
+ page_size
;
828 if (! obtain (address
, get
))
831 if (first_heap
== last_heap
)
832 address
= (POINTER
) ROUNDUP (virtual_break_value
);
834 address
= (POINTER
) ROUNDUP (last_heap
->start
);
838 new_bloc_start
= (POINTER
) MEM_ROUNDUP ((char *)address
+ get
);
840 if (first_heap
->bloc_start
< new_bloc_start
)
842 /* This is no clean solution - no idea how to do it better. */
843 if (r_alloc_freeze_level
)
846 /* There is a bug here: if the above obtain call succeeded, but the
847 relocate_blocs call below does not succeed, we need to free
848 the memory that we got with obtain. */
850 /* Move all blocs upward. */
851 if (! relocate_blocs (first_bloc
, h
, new_bloc_start
))
854 /* Note that (POINTER)(h+1) <= new_bloc_start since
855 get >= page_size, so the following does not destroy the heap
857 for (b
= last_bloc
; b
!= NIL_BLOC
; b
= b
->prev
)
859 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
860 *b
->variable
= b
->data
= b
->new_data
;
863 h
->bloc_start
= new_bloc_start
;
865 update_heap_bloc_correspondence (first_bloc
, h
);
869 /* Give up managing heaps below the one the new
870 virtual_break_value points to. */
871 first_heap
->prev
= NIL_HEAP
;
872 first_heap
->next
= h
->next
;
873 first_heap
->start
= h
->start
;
874 first_heap
->end
= h
->end
;
875 first_heap
->free
= h
->free
;
876 first_heap
->first_bloc
= h
->first_bloc
;
877 first_heap
->last_bloc
= h
->last_bloc
;
878 first_heap
->bloc_start
= h
->bloc_start
;
880 if (first_heap
->next
)
881 first_heap
->next
->prev
= first_heap
;
883 last_heap
= first_heap
;
886 bzero (address
, size
);
890 SIZE excess
= (char *)first_heap
->bloc_start
891 - ((char *)virtual_break_value
+ size
);
893 address
= virtual_break_value
;
895 if (r_alloc_freeze_level
== 0 && excess
> 2 * extra_bytes
)
897 excess
-= extra_bytes
;
898 first_heap
->bloc_start
899 = (POINTER
) MEM_ROUNDUP ((char *)first_heap
->bloc_start
- excess
);
901 relocate_blocs (first_bloc
, first_heap
, first_heap
->bloc_start
);
903 for (b
= first_bloc
; b
!= NIL_BLOC
; b
= b
->next
)
905 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
906 *b
->variable
= b
->data
= b
->new_data
;
910 if ((char *)virtual_break_value
+ size
< (char *)first_heap
->start
)
912 /* We found an additional space below the first heap */
913 first_heap
->start
= (POINTER
) ((char *)virtual_break_value
+ size
);
917 virtual_break_value
= (POINTER
) ((char *)address
+ size
);
918 break_value
= (last_bloc
919 ? (char *) last_bloc
->data
+ last_bloc
->size
920 : (char *) first_heap
->bloc_start
);
928 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
929 the data is returned in *PTR. PTR is thus the address of some variable
930 which will use the data area.
932 The allocation of 0 bytes is valid.
933 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
934 done before allocating a new area. Not yet done.
936 If we can't allocate the necessary memory, set *PTR to zero, and
940 r_alloc (POINTER
*ptr
, SIZE size
)
942 register bloc_ptr new_bloc
;
944 if (! r_alloc_initialized
)
947 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
950 new_bloc
->variable
= ptr
;
951 *ptr
= new_bloc
->data
;
959 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
960 Store 0 in *PTR to show there's no block allocated. */
963 r_alloc_free (register POINTER
*ptr
)
965 register bloc_ptr dead_bloc
;
967 if (! r_alloc_initialized
)
970 dead_bloc
= find_bloc (ptr
);
971 if (dead_bloc
== NIL_BLOC
)
972 abort (); /* Double free? PTR not originally used to allocate? */
974 free_bloc (dead_bloc
);
978 refill_memory_reserve ();
982 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
983 Do this by shifting all blocks above this one up in memory, unless
984 SIZE is less than or equal to the current bloc size, in which case
987 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
988 memory copied to it. Not very efficient. We could traverse the
989 bloc_list for a best fit of free blocs first.
991 Change *PTR to reflect the new bloc, and return this value.
993 If more memory cannot be allocated, then leave *PTR unchanged, and
997 r_re_alloc (POINTER
*ptr
, SIZE size
)
999 register bloc_ptr bloc
;
1001 if (! r_alloc_initialized
)
1005 return r_alloc (ptr
, size
);
1009 return r_alloc (ptr
, 0);
1012 bloc
= find_bloc (ptr
);
1013 if (bloc
== NIL_BLOC
)
1014 abort (); /* Already freed? PTR not originally used to allocate? */
1016 if (size
< bloc
->size
)
1018 /* Wouldn't it be useful to actually resize the bloc here? */
1019 /* I think so too, but not if it's too expensive... */
1020 if ((bloc
->size
- MEM_ROUNDUP (size
) >= page_size
)
1021 && r_alloc_freeze_level
== 0)
1023 resize_bloc (bloc
, MEM_ROUNDUP (size
));
1024 /* Never mind if this fails, just do nothing... */
1025 /* It *should* be infallible! */
1028 else if (size
> bloc
->size
)
1030 if (r_alloc_freeze_level
)
1033 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
1036 new_bloc
->variable
= ptr
;
1037 *ptr
= new_bloc
->data
;
1038 bloc
->variable
= (POINTER
*) NIL
;
1045 if (! resize_bloc (bloc
, MEM_ROUNDUP (size
)))
1052 /* Disable relocations, after making room for at least SIZE bytes
1053 of non-relocatable heap if possible. The relocatable blocs are
1054 guaranteed to hold still until thawed, even if this means that
1055 malloc must return a null pointer. */
1058 r_alloc_freeze (long int size
)
1060 if (! r_alloc_initialized
)
1063 /* If already frozen, we can't make any more room, so don't try. */
1064 if (r_alloc_freeze_level
> 0)
1066 /* If we can't get the amount requested, half is better than nothing. */
1067 while (size
> 0 && r_alloc_sbrk (size
) == 0)
1069 ++r_alloc_freeze_level
;
1071 r_alloc_sbrk (-size
);
1078 if (! r_alloc_initialized
)
1081 if (--r_alloc_freeze_level
< 0)
1084 /* This frees all unused blocs. It is not too inefficient, as the resize
1085 and bcopy is done only once. Afterwards, all unreferenced blocs are
1086 already shrunk to zero size. */
1087 if (!r_alloc_freeze_level
)
1089 bloc_ptr
*b
= &first_bloc
;
1091 if (!(*b
)->variable
)
1099 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1101 /* Reinitialize the morecore hook variables after restarting a dumped
1102 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1104 r_alloc_reinit (void)
1106 /* Only do this if the hook has been reset, so that we don't get an
1107 infinite loop, in case Emacs was linked statically. */
1108 if (__morecore
!= r_alloc_sbrk
)
1110 real_morecore
= __morecore
;
1111 __morecore
= r_alloc_sbrk
;
1115 #endif /* emacs && DOUG_LEA_MALLOC */
1128 if (!r_alloc_initialized
)
1131 assert (first_heap
);
1132 assert (last_heap
->end
<= (POINTER
) sbrk (0));
1133 assert ((POINTER
) first_heap
< first_heap
->start
);
1134 assert (first_heap
->start
<= virtual_break_value
);
1135 assert (virtual_break_value
<= first_heap
->end
);
1137 for (h
= first_heap
; h
; h
= h
->next
)
1139 assert (h
->prev
== ph
);
1140 assert ((POINTER
) ROUNDUP (h
->end
) == h
->end
);
1141 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1142 the heap start has any sort of alignment.
1143 Perhaps it should. */
1144 assert ((POINTER
) MEM_ROUNDUP (h
->start
) == h
->start
);
1146 assert ((POINTER
) MEM_ROUNDUP (h
->bloc_start
) == h
->bloc_start
);
1147 assert (h
->start
<= h
->bloc_start
&& h
->bloc_start
<= h
->end
);
1151 assert (ph
->end
< h
->start
);
1152 assert (h
->start
<= (POINTER
)h
&& (POINTER
)(h
+1) <= h
->bloc_start
);
1155 if (h
->bloc_start
<= break_value
&& break_value
<= h
->end
)
1162 assert (last_heap
== ph
);
1164 for (b
= first_bloc
; b
; b
= b
->next
)
1166 assert (b
->prev
== pb
);
1167 assert ((POINTER
) MEM_ROUNDUP (b
->data
) == b
->data
);
1168 assert ((SIZE
) MEM_ROUNDUP (b
->size
) == b
->size
);
1171 for (h
= first_heap
; h
; h
= h
->next
)
1173 if (h
->bloc_start
<= b
->data
&& b
->data
+ b
->size
<= h
->end
)
1180 if (pb
&& pb
->data
+ pb
->size
!= b
->data
)
1182 assert (ph
&& b
->data
== h
->bloc_start
);
1185 if (ph
->bloc_start
<= pb
->data
1186 && pb
->data
+ pb
->size
<= ph
->end
)
1188 assert (pb
->data
+ pb
->size
+ b
->size
> ph
->end
);
1193 assert (ph
->bloc_start
+ b
->size
> ph
->end
);
1201 assert (last_bloc
== pb
);
1204 assert (last_bloc
->data
+ last_bloc
->size
== break_value
);
1206 assert (first_heap
->bloc_start
== break_value
);
1211 /* Update the internal record of which variable points to some data to NEW.
1212 Used by buffer-swap-text in Emacs to restore consistency after it
1213 swaps the buffer text between two buffer objects. The OLD pointer
1214 is checked to ensure that memory corruption does not occur due to
1217 r_alloc_reset_variable (POINTER
*old
, POINTER
*new)
1219 bloc_ptr bloc
= first_bloc
;
1221 /* Find the bloc that corresponds to the data pointed to by pointer.
1222 find_bloc cannot be used, as it has internal consistency checks
1223 which fail when the variable needs reseting. */
1224 while (bloc
!= NIL_BLOC
)
1226 if (bloc
->data
== *new)
1232 if (bloc
== NIL_BLOC
|| bloc
->variable
!= old
)
1233 abort (); /* Already freed? OLD not originally used to allocate? */
1235 /* Update variable to point to the new location. */
1236 bloc
->variable
= new;
1240 /***********************************************************************
1242 ***********************************************************************/
1244 /* Initialize various things for memory allocation. */
1249 if (r_alloc_initialized
)
1251 r_alloc_initialized
= 1;
1254 #ifndef SYSTEM_MALLOC
1255 real_morecore
= __morecore
;
1256 __morecore
= r_alloc_sbrk
;
1258 first_heap
= last_heap
= &heap_base
;
1259 first_heap
->next
= first_heap
->prev
= NIL_HEAP
;
1260 first_heap
->start
= first_heap
->bloc_start
1261 = virtual_break_value
= break_value
= (*real_morecore
) (0);
1262 if (break_value
== NIL
)
1265 extra_bytes
= ROUNDUP (50000);
1268 #ifdef DOUG_LEA_MALLOC
1270 mallopt (M_TOP_PAD
, 64 * 4096);
1273 #ifndef SYSTEM_MALLOC
1274 /* Give GNU malloc's morecore some hysteresis
1275 so that we move all the relocatable blocks much less often. */
1276 __malloc_extra_blocks
= 64;
1280 #ifndef SYSTEM_MALLOC
1281 first_heap
->end
= (POINTER
) ROUNDUP (first_heap
->start
);
1283 /* The extra call to real_morecore guarantees that the end of the
1284 address space is a multiple of page_size, even if page_size is
1285 not really the page size of the system running the binary in
1286 which page_size is stored. This allows a binary to be built on a
1287 system with one page size and run on a system with a smaller page
1289 (*real_morecore
) ((char *) first_heap
->end
- (char *) first_heap
->start
);
1291 /* Clear the rest of the last page; this memory is in our address space
1292 even though it is after the sbrk value. */
1293 /* Doubly true, with the additional call that explicitly adds the
1294 rest of that page to the address space. */
1295 bzero (first_heap
->start
,
1296 (char *) first_heap
->end
- (char *) first_heap
->start
);
1297 virtual_break_value
= break_value
= first_heap
->bloc_start
= first_heap
->end
;
1300 use_relocatable_buffers
= 1;
1303 /* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1304 (do not change this comment) */