Convert (most) functions in src to standard C.
[bpt/emacs.git] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GNU Emacs.
6
7 GNU Emacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
19
20 /* NOTES:
21
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
25
26 #ifdef emacs
27
28 #include <config.h>
29 #include <setjmp.h>
30 #include "lisp.h" /* Needed for VALBITS. */
31 #include "blockinput.h"
32
33 #ifdef HAVE_UNISTD_H
34 #include <unistd.h>
35 #endif
36
37 typedef POINTER_TYPE *POINTER;
38 typedef size_t SIZE;
39
40 /* Declared in dispnew.c, this version doesn't screw up if regions
41 overlap. */
42
43 extern void safe_bcopy (const char *, char *, int);
44
45 #ifdef DOUG_LEA_MALLOC
46 #define M_TOP_PAD -2
47 extern int mallopt (int, int);
48 #else /* not DOUG_LEA_MALLOC */
49 #ifndef SYSTEM_MALLOC
50 extern size_t __malloc_extra_blocks;
51 #endif /* SYSTEM_MALLOC */
52 #endif /* not DOUG_LEA_MALLOC */
53
54 #else /* not emacs */
55
56 #include <stddef.h>
57
58 typedef size_t SIZE;
59 typedef void *POINTER;
60
61 #include <unistd.h>
62 #include <malloc.h>
63
64 #define safe_bcopy(x, y, z) memmove (y, x, z)
65 #define bzero(x, len) memset (x, 0, len)
66
67 #endif /* not emacs */
68
69
70 #include "getpagesize.h"
71
72 #define NIL ((POINTER) 0)
73
74 /* A flag to indicate whether we have initialized ralloc yet. For
75 Emacs's sake, please do not make this local to malloc_init; on some
76 machines, the dumping procedure makes all static variables
77 read-only. On these machines, the word static is #defined to be
78 the empty string, meaning that r_alloc_initialized becomes an
79 automatic variable, and loses its value each time Emacs is started
80 up. */
81
82 static int r_alloc_initialized = 0;
83
84 static void r_alloc_init (void);
85
86 \f
87 /* Declarations for working with the malloc, ralloc, and system breaks. */
88
89 /* Function to set the real break value. */
90 POINTER (*real_morecore) ();
91
92 /* The break value, as seen by malloc. */
93 static POINTER virtual_break_value;
94
95 /* The address of the end of the last data in use by ralloc,
96 including relocatable blocs as well as malloc data. */
97 static POINTER break_value;
98
99 /* This is the size of a page. We round memory requests to this boundary. */
100 static int page_size;
101
102 /* Whenever we get memory from the system, get this many extra bytes. This
103 must be a multiple of page_size. */
104 static int extra_bytes;
105
106 /* Macros for rounding. Note that rounding to any value is possible
107 by changing the definition of PAGE. */
108 #define PAGE (getpagesize ())
109 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
110 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
111 & ~(page_size - 1))
112 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
113
114 #define MEM_ALIGN sizeof(double)
115 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
116 & ~(MEM_ALIGN - 1))
117
118 /* The hook `malloc' uses for the function which gets more space
119 from the system. */
120
121 #ifndef SYSTEM_MALLOC
122 extern POINTER (*__morecore) ();
123 #endif
124
125
126 \f
127 /***********************************************************************
128 Implementation using sbrk
129 ***********************************************************************/
130
131 /* Data structures of heaps and blocs. */
132
133 /* The relocatable objects, or blocs, and the malloc data
134 both reside within one or more heaps.
135 Each heap contains malloc data, running from `start' to `bloc_start',
136 and relocatable objects, running from `bloc_start' to `free'.
137
138 Relocatable objects may relocate within the same heap
139 or may move into another heap; the heaps themselves may grow
140 but they never move.
141
142 We try to make just one heap and make it larger as necessary.
143 But sometimes we can't do that, because we can't get contiguous
144 space to add onto the heap. When that happens, we start a new heap. */
145
146 typedef struct heap
147 {
148 struct heap *next;
149 struct heap *prev;
150 /* Start of memory range of this heap. */
151 POINTER start;
152 /* End of memory range of this heap. */
153 POINTER end;
154 /* Start of relocatable data in this heap. */
155 POINTER bloc_start;
156 /* Start of unused space in this heap. */
157 POINTER free;
158 /* First bloc in this heap. */
159 struct bp *first_bloc;
160 /* Last bloc in this heap. */
161 struct bp *last_bloc;
162 } *heap_ptr;
163
164 #define NIL_HEAP ((heap_ptr) 0)
165 #define HEAP_PTR_SIZE (sizeof (struct heap))
166
167 /* This is the first heap object.
168 If we need additional heap objects, each one resides at the beginning of
169 the space it covers. */
170 static struct heap heap_base;
171
172 /* Head and tail of the list of heaps. */
173 static heap_ptr first_heap, last_heap;
174
175 /* These structures are allocated in the malloc arena.
176 The linked list is kept in order of increasing '.data' members.
177 The data blocks abut each other; if b->next is non-nil, then
178 b->data + b->size == b->next->data.
179
180 An element with variable==NIL denotes a freed block, which has not yet
181 been collected. They may only appear while r_alloc_freeze_level > 0,
182 and will be freed when the arena is thawed. Currently, these blocs are
183 not reusable, while the arena is frozen. Very inefficient. */
184
185 typedef struct bp
186 {
187 struct bp *next;
188 struct bp *prev;
189 POINTER *variable;
190 POINTER data;
191 SIZE size;
192 POINTER new_data; /* temporarily used for relocation */
193 struct heap *heap; /* Heap this bloc is in. */
194 } *bloc_ptr;
195
196 #define NIL_BLOC ((bloc_ptr) 0)
197 #define BLOC_PTR_SIZE (sizeof (struct bp))
198
199 /* Head and tail of the list of relocatable blocs. */
200 static bloc_ptr first_bloc, last_bloc;
201
202 static int use_relocatable_buffers;
203
204 /* If >0, no relocation whatsoever takes place. */
205 static int r_alloc_freeze_level;
206
207 \f
208 /* Functions to get and return memory from the system. */
209
210 /* Find the heap that ADDRESS falls within. */
211
212 static heap_ptr
213 find_heap (POINTER address)
214 {
215 heap_ptr heap;
216
217 for (heap = last_heap; heap; heap = heap->prev)
218 {
219 if (heap->start <= address && address <= heap->end)
220 return heap;
221 }
222
223 return NIL_HEAP;
224 }
225
226 /* Find SIZE bytes of space in a heap.
227 Try to get them at ADDRESS (which must fall within some heap's range)
228 if we can get that many within one heap.
229
230 If enough space is not presently available in our reserve, this means
231 getting more page-aligned space from the system. If the returned space
232 is not contiguous to the last heap, allocate a new heap, and append it
233
234 obtain does not try to keep track of whether space is in use
235 or not in use. It just returns the address of SIZE bytes that
236 fall within a single heap. If you call obtain twice in a row
237 with the same arguments, you typically get the same value.
238 to the heap list. It's the caller's responsibility to keep
239 track of what space is in use.
240
241 Return the address of the space if all went well, or zero if we couldn't
242 allocate the memory. */
243
244 static POINTER
245 obtain (POINTER address, SIZE size)
246 {
247 heap_ptr heap;
248 SIZE already_available;
249
250 /* Find the heap that ADDRESS falls within. */
251 for (heap = last_heap; heap; heap = heap->prev)
252 {
253 if (heap->start <= address && address <= heap->end)
254 break;
255 }
256
257 if (! heap)
258 abort ();
259
260 /* If we can't fit SIZE bytes in that heap,
261 try successive later heaps. */
262 while (heap && (char *) address + size > (char *) heap->end)
263 {
264 heap = heap->next;
265 if (heap == NIL_HEAP)
266 break;
267 address = heap->bloc_start;
268 }
269
270 /* If we can't fit them within any existing heap,
271 get more space. */
272 if (heap == NIL_HEAP)
273 {
274 POINTER new = (*real_morecore)(0);
275 SIZE get;
276
277 already_available = (char *)last_heap->end - (char *)address;
278
279 if (new != last_heap->end)
280 {
281 /* Someone else called sbrk. Make a new heap. */
282
283 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
284 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
285
286 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
287 return 0;
288
289 new_heap->start = new;
290 new_heap->end = bloc_start;
291 new_heap->bloc_start = bloc_start;
292 new_heap->free = bloc_start;
293 new_heap->next = NIL_HEAP;
294 new_heap->prev = last_heap;
295 new_heap->first_bloc = NIL_BLOC;
296 new_heap->last_bloc = NIL_BLOC;
297 last_heap->next = new_heap;
298 last_heap = new_heap;
299
300 address = bloc_start;
301 already_available = 0;
302 }
303
304 /* Add space to the last heap (which we may have just created).
305 Get some extra, so we can come here less often. */
306
307 get = size + extra_bytes - already_available;
308 get = (char *) ROUNDUP ((char *)last_heap->end + get)
309 - (char *) last_heap->end;
310
311 if ((*real_morecore) (get) != last_heap->end)
312 return 0;
313
314 last_heap->end = (char *) last_heap->end + get;
315 }
316
317 return address;
318 }
319
320 /* Return unused heap space to the system
321 if there is a lot of unused space now.
322 This can make the last heap smaller;
323 it can also eliminate the last heap entirely. */
324
325 static void
326 relinquish (void)
327 {
328 register heap_ptr h;
329 long excess = 0;
330
331 /* Add the amount of space beyond break_value
332 in all heaps which have extend beyond break_value at all. */
333
334 for (h = last_heap; h && break_value < h->end; h = h->prev)
335 {
336 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
337 ? h->bloc_start : break_value);
338 }
339
340 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
341 {
342 /* Keep extra_bytes worth of empty space.
343 And don't free anything unless we can free at least extra_bytes. */
344 excess -= extra_bytes;
345
346 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
347 {
348 /* This heap should have no blocs in it. */
349 if (last_heap->first_bloc != NIL_BLOC
350 || last_heap->last_bloc != NIL_BLOC)
351 abort ();
352
353 /* Return the last heap, with its header, to the system. */
354 excess = (char *)last_heap->end - (char *)last_heap->start;
355 last_heap = last_heap->prev;
356 last_heap->next = NIL_HEAP;
357 }
358 else
359 {
360 excess = (char *) last_heap->end
361 - (char *) ROUNDUP ((char *)last_heap->end - excess);
362 last_heap->end = (char *) last_heap->end - excess;
363 }
364
365 if ((*real_morecore) (- excess) == 0)
366 {
367 /* If the system didn't want that much memory back, adjust
368 the end of the last heap to reflect that. This can occur
369 if break_value is still within the original data segment. */
370 last_heap->end = (char *) last_heap->end + excess;
371 /* Make sure that the result of the adjustment is accurate.
372 It should be, for the else clause above; the other case,
373 which returns the entire last heap to the system, seems
374 unlikely to trigger this mode of failure. */
375 if (last_heap->end != (*real_morecore) (0))
376 abort ();
377 }
378 }
379 }
380
381 /* Return the total size in use by relocating allocator,
382 above where malloc gets space. */
383
384 long
385 r_alloc_size_in_use (void)
386 {
387 return (char *) break_value - (char *) virtual_break_value;
388 }
389 \f
390 /* The meat - allocating, freeing, and relocating blocs. */
391
392 /* Find the bloc referenced by the address in PTR. Returns a pointer
393 to that block. */
394
395 static bloc_ptr
396 find_bloc (POINTER *ptr)
397 {
398 register bloc_ptr p = first_bloc;
399
400 while (p != NIL_BLOC)
401 {
402 /* Consistency check. Don't return inconsistent blocs.
403 Don't abort here, as callers might be expecting this, but
404 callers that always expect a bloc to be returned should abort
405 if one isn't to avoid a memory corruption bug that is
406 difficult to track down. */
407 if (p->variable == ptr && p->data == *ptr)
408 return p;
409
410 p = p->next;
411 }
412
413 return p;
414 }
415
416 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
417 Returns a pointer to the new bloc, or zero if we couldn't allocate
418 memory for the new block. */
419
420 static bloc_ptr
421 get_bloc (SIZE size)
422 {
423 register bloc_ptr new_bloc;
424 register heap_ptr heap;
425
426 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
427 || ! (new_bloc->data = obtain (break_value, size)))
428 {
429 free (new_bloc);
430
431 return 0;
432 }
433
434 break_value = (char *) new_bloc->data + size;
435
436 new_bloc->size = size;
437 new_bloc->next = NIL_BLOC;
438 new_bloc->variable = (POINTER *) NIL;
439 new_bloc->new_data = 0;
440
441 /* Record in the heap that this space is in use. */
442 heap = find_heap (new_bloc->data);
443 heap->free = break_value;
444
445 /* Maintain the correspondence between heaps and blocs. */
446 new_bloc->heap = heap;
447 heap->last_bloc = new_bloc;
448 if (heap->first_bloc == NIL_BLOC)
449 heap->first_bloc = new_bloc;
450
451 /* Put this bloc on the doubly-linked list of blocs. */
452 if (first_bloc)
453 {
454 new_bloc->prev = last_bloc;
455 last_bloc->next = new_bloc;
456 last_bloc = new_bloc;
457 }
458 else
459 {
460 first_bloc = last_bloc = new_bloc;
461 new_bloc->prev = NIL_BLOC;
462 }
463
464 return new_bloc;
465 }
466 \f
467 /* Calculate new locations of blocs in the list beginning with BLOC,
468 relocating it to start at ADDRESS, in heap HEAP. If enough space is
469 not presently available in our reserve, call obtain for
470 more space.
471
472 Store the new location of each bloc in its new_data field.
473 Do not touch the contents of blocs or break_value. */
474
475 static int
476 relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address)
477 {
478 register bloc_ptr b = bloc;
479
480 /* No need to ever call this if arena is frozen, bug somewhere! */
481 if (r_alloc_freeze_level)
482 abort();
483
484 while (b)
485 {
486 /* If bloc B won't fit within HEAP,
487 move to the next heap and try again. */
488 while (heap && (char *) address + b->size > (char *) heap->end)
489 {
490 heap = heap->next;
491 if (heap == NIL_HEAP)
492 break;
493 address = heap->bloc_start;
494 }
495
496 /* If BLOC won't fit in any heap,
497 get enough new space to hold BLOC and all following blocs. */
498 if (heap == NIL_HEAP)
499 {
500 register bloc_ptr tb = b;
501 register SIZE s = 0;
502
503 /* Add up the size of all the following blocs. */
504 while (tb != NIL_BLOC)
505 {
506 if (tb->variable)
507 s += tb->size;
508
509 tb = tb->next;
510 }
511
512 /* Get that space. */
513 address = obtain (address, s);
514 if (address == 0)
515 return 0;
516
517 heap = last_heap;
518 }
519
520 /* Record the new address of this bloc
521 and update where the next bloc can start. */
522 b->new_data = address;
523 if (b->variable)
524 address = (char *) address + b->size;
525 b = b->next;
526 }
527
528 return 1;
529 }
530
531 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
532 This is necessary if we put the memory of space of BLOC
533 before that of BEFORE. */
534
535 static void
536 reorder_bloc (bloc_ptr bloc, bloc_ptr before)
537 {
538 bloc_ptr prev, next;
539
540 /* Splice BLOC out from where it is. */
541 prev = bloc->prev;
542 next = bloc->next;
543
544 if (prev)
545 prev->next = next;
546 if (next)
547 next->prev = prev;
548
549 /* Splice it in before BEFORE. */
550 prev = before->prev;
551
552 if (prev)
553 prev->next = bloc;
554 bloc->prev = prev;
555
556 before->prev = bloc;
557 bloc->next = before;
558 }
559 \f
560 /* Update the records of which heaps contain which blocs, starting
561 with heap HEAP and bloc BLOC. */
562
563 static void
564 update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
565 {
566 register bloc_ptr b;
567
568 /* Initialize HEAP's status to reflect blocs before BLOC. */
569 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
570 {
571 /* The previous bloc is in HEAP. */
572 heap->last_bloc = bloc->prev;
573 heap->free = (char *) bloc->prev->data + bloc->prev->size;
574 }
575 else
576 {
577 /* HEAP contains no blocs before BLOC. */
578 heap->first_bloc = NIL_BLOC;
579 heap->last_bloc = NIL_BLOC;
580 heap->free = heap->bloc_start;
581 }
582
583 /* Advance through blocs one by one. */
584 for (b = bloc; b != NIL_BLOC; b = b->next)
585 {
586 /* Advance through heaps, marking them empty,
587 till we get to the one that B is in. */
588 while (heap)
589 {
590 if (heap->bloc_start <= b->data && b->data <= heap->end)
591 break;
592 heap = heap->next;
593 /* We know HEAP is not null now,
594 because there has to be space for bloc B. */
595 heap->first_bloc = NIL_BLOC;
596 heap->last_bloc = NIL_BLOC;
597 heap->free = heap->bloc_start;
598 }
599
600 /* Update HEAP's status for bloc B. */
601 heap->free = (char *) b->data + b->size;
602 heap->last_bloc = b;
603 if (heap->first_bloc == NIL_BLOC)
604 heap->first_bloc = b;
605
606 /* Record that B is in HEAP. */
607 b->heap = heap;
608 }
609
610 /* If there are any remaining heaps and no blocs left,
611 mark those heaps as empty. */
612 heap = heap->next;
613 while (heap)
614 {
615 heap->first_bloc = NIL_BLOC;
616 heap->last_bloc = NIL_BLOC;
617 heap->free = heap->bloc_start;
618 heap = heap->next;
619 }
620 }
621 \f
622 /* Resize BLOC to SIZE bytes. This relocates the blocs
623 that come after BLOC in memory. */
624
625 static int
626 resize_bloc (bloc_ptr bloc, SIZE size)
627 {
628 register bloc_ptr b;
629 heap_ptr heap;
630 POINTER address;
631 SIZE old_size;
632
633 /* No need to ever call this if arena is frozen, bug somewhere! */
634 if (r_alloc_freeze_level)
635 abort();
636
637 if (bloc == NIL_BLOC || size == bloc->size)
638 return 1;
639
640 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
641 {
642 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
643 break;
644 }
645
646 if (heap == NIL_HEAP)
647 abort ();
648
649 old_size = bloc->size;
650 bloc->size = size;
651
652 /* Note that bloc could be moved into the previous heap. */
653 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
654 : (char *) first_heap->bloc_start);
655 while (heap)
656 {
657 if (heap->bloc_start <= address && address <= heap->end)
658 break;
659 heap = heap->prev;
660 }
661
662 if (! relocate_blocs (bloc, heap, address))
663 {
664 bloc->size = old_size;
665 return 0;
666 }
667
668 if (size > old_size)
669 {
670 for (b = last_bloc; b != bloc; b = b->prev)
671 {
672 if (!b->variable)
673 {
674 b->size = 0;
675 b->data = b->new_data;
676 }
677 else
678 {
679 safe_bcopy (b->data, b->new_data, b->size);
680 *b->variable = b->data = b->new_data;
681 }
682 }
683 if (!bloc->variable)
684 {
685 bloc->size = 0;
686 bloc->data = bloc->new_data;
687 }
688 else
689 {
690 safe_bcopy (bloc->data, bloc->new_data, old_size);
691 bzero ((char *) bloc->new_data + old_size, size - old_size);
692 *bloc->variable = bloc->data = bloc->new_data;
693 }
694 }
695 else
696 {
697 for (b = bloc; b != NIL_BLOC; b = b->next)
698 {
699 if (!b->variable)
700 {
701 b->size = 0;
702 b->data = b->new_data;
703 }
704 else
705 {
706 safe_bcopy (b->data, b->new_data, b->size);
707 *b->variable = b->data = b->new_data;
708 }
709 }
710 }
711
712 update_heap_bloc_correspondence (bloc, heap);
713
714 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
715 : (char *) first_heap->bloc_start);
716 return 1;
717 }
718 \f
719 /* Free BLOC from the chain of blocs, relocating any blocs above it.
720 This may return space to the system. */
721
722 static void
723 free_bloc (bloc_ptr bloc)
724 {
725 heap_ptr heap = bloc->heap;
726
727 if (r_alloc_freeze_level)
728 {
729 bloc->variable = (POINTER *) NIL;
730 return;
731 }
732
733 resize_bloc (bloc, 0);
734
735 if (bloc == first_bloc && bloc == last_bloc)
736 {
737 first_bloc = last_bloc = NIL_BLOC;
738 }
739 else if (bloc == last_bloc)
740 {
741 last_bloc = bloc->prev;
742 last_bloc->next = NIL_BLOC;
743 }
744 else if (bloc == first_bloc)
745 {
746 first_bloc = bloc->next;
747 first_bloc->prev = NIL_BLOC;
748 }
749 else
750 {
751 bloc->next->prev = bloc->prev;
752 bloc->prev->next = bloc->next;
753 }
754
755 /* Update the records of which blocs are in HEAP. */
756 if (heap->first_bloc == bloc)
757 {
758 if (bloc->next != 0 && bloc->next->heap == heap)
759 heap->first_bloc = bloc->next;
760 else
761 heap->first_bloc = heap->last_bloc = NIL_BLOC;
762 }
763 if (heap->last_bloc == bloc)
764 {
765 if (bloc->prev != 0 && bloc->prev->heap == heap)
766 heap->last_bloc = bloc->prev;
767 else
768 heap->first_bloc = heap->last_bloc = NIL_BLOC;
769 }
770
771 relinquish ();
772 free (bloc);
773 }
774 \f
775 /* Interface routines. */
776
777 /* Obtain SIZE bytes of storage from the free pool, or the system, as
778 necessary. If relocatable blocs are in use, this means relocating
779 them. This function gets plugged into the GNU malloc's __morecore
780 hook.
781
782 We provide hysteresis, never relocating by less than extra_bytes.
783
784 If we're out of memory, we should return zero, to imitate the other
785 __morecore hook values - in particular, __default_morecore in the
786 GNU malloc package. */
787
788 POINTER
789 r_alloc_sbrk (long int size)
790 {
791 register bloc_ptr b;
792 POINTER address;
793
794 if (! r_alloc_initialized)
795 r_alloc_init ();
796
797 if (! use_relocatable_buffers)
798 return (*real_morecore) (size);
799
800 if (size == 0)
801 return virtual_break_value;
802
803 if (size > 0)
804 {
805 /* Allocate a page-aligned space. GNU malloc would reclaim an
806 extra space if we passed an unaligned one. But we could
807 not always find a space which is contiguous to the previous. */
808 POINTER new_bloc_start;
809 heap_ptr h = first_heap;
810 SIZE get = ROUNDUP (size);
811
812 address = (POINTER) ROUNDUP (virtual_break_value);
813
814 /* Search the list upward for a heap which is large enough. */
815 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
816 {
817 h = h->next;
818 if (h == NIL_HEAP)
819 break;
820 address = (POINTER) ROUNDUP (h->start);
821 }
822
823 /* If not found, obtain more space. */
824 if (h == NIL_HEAP)
825 {
826 get += extra_bytes + page_size;
827
828 if (! obtain (address, get))
829 return 0;
830
831 if (first_heap == last_heap)
832 address = (POINTER) ROUNDUP (virtual_break_value);
833 else
834 address = (POINTER) ROUNDUP (last_heap->start);
835 h = last_heap;
836 }
837
838 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
839
840 if (first_heap->bloc_start < new_bloc_start)
841 {
842 /* This is no clean solution - no idea how to do it better. */
843 if (r_alloc_freeze_level)
844 return NIL;
845
846 /* There is a bug here: if the above obtain call succeeded, but the
847 relocate_blocs call below does not succeed, we need to free
848 the memory that we got with obtain. */
849
850 /* Move all blocs upward. */
851 if (! relocate_blocs (first_bloc, h, new_bloc_start))
852 return 0;
853
854 /* Note that (POINTER)(h+1) <= new_bloc_start since
855 get >= page_size, so the following does not destroy the heap
856 header. */
857 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
858 {
859 safe_bcopy (b->data, b->new_data, b->size);
860 *b->variable = b->data = b->new_data;
861 }
862
863 h->bloc_start = new_bloc_start;
864
865 update_heap_bloc_correspondence (first_bloc, h);
866 }
867 if (h != first_heap)
868 {
869 /* Give up managing heaps below the one the new
870 virtual_break_value points to. */
871 first_heap->prev = NIL_HEAP;
872 first_heap->next = h->next;
873 first_heap->start = h->start;
874 first_heap->end = h->end;
875 first_heap->free = h->free;
876 first_heap->first_bloc = h->first_bloc;
877 first_heap->last_bloc = h->last_bloc;
878 first_heap->bloc_start = h->bloc_start;
879
880 if (first_heap->next)
881 first_heap->next->prev = first_heap;
882 else
883 last_heap = first_heap;
884 }
885
886 bzero (address, size);
887 }
888 else /* size < 0 */
889 {
890 SIZE excess = (char *)first_heap->bloc_start
891 - ((char *)virtual_break_value + size);
892
893 address = virtual_break_value;
894
895 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
896 {
897 excess -= extra_bytes;
898 first_heap->bloc_start
899 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
900
901 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
902
903 for (b = first_bloc; b != NIL_BLOC; b = b->next)
904 {
905 safe_bcopy (b->data, b->new_data, b->size);
906 *b->variable = b->data = b->new_data;
907 }
908 }
909
910 if ((char *)virtual_break_value + size < (char *)first_heap->start)
911 {
912 /* We found an additional space below the first heap */
913 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
914 }
915 }
916
917 virtual_break_value = (POINTER) ((char *)address + size);
918 break_value = (last_bloc
919 ? (char *) last_bloc->data + last_bloc->size
920 : (char *) first_heap->bloc_start);
921 if (size < 0)
922 relinquish ();
923
924 return address;
925 }
926
927
928 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
929 the data is returned in *PTR. PTR is thus the address of some variable
930 which will use the data area.
931
932 The allocation of 0 bytes is valid.
933 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
934 done before allocating a new area. Not yet done.
935
936 If we can't allocate the necessary memory, set *PTR to zero, and
937 return zero. */
938
939 POINTER
940 r_alloc (POINTER *ptr, SIZE size)
941 {
942 register bloc_ptr new_bloc;
943
944 if (! r_alloc_initialized)
945 r_alloc_init ();
946
947 new_bloc = get_bloc (MEM_ROUNDUP (size));
948 if (new_bloc)
949 {
950 new_bloc->variable = ptr;
951 *ptr = new_bloc->data;
952 }
953 else
954 *ptr = 0;
955
956 return *ptr;
957 }
958
959 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
960 Store 0 in *PTR to show there's no block allocated. */
961
962 void
963 r_alloc_free (register POINTER *ptr)
964 {
965 register bloc_ptr dead_bloc;
966
967 if (! r_alloc_initialized)
968 r_alloc_init ();
969
970 dead_bloc = find_bloc (ptr);
971 if (dead_bloc == NIL_BLOC)
972 abort (); /* Double free? PTR not originally used to allocate? */
973
974 free_bloc (dead_bloc);
975 *ptr = 0;
976
977 #ifdef emacs
978 refill_memory_reserve ();
979 #endif
980 }
981
982 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
983 Do this by shifting all blocks above this one up in memory, unless
984 SIZE is less than or equal to the current bloc size, in which case
985 do nothing.
986
987 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
988 memory copied to it. Not very efficient. We could traverse the
989 bloc_list for a best fit of free blocs first.
990
991 Change *PTR to reflect the new bloc, and return this value.
992
993 If more memory cannot be allocated, then leave *PTR unchanged, and
994 return zero. */
995
996 POINTER
997 r_re_alloc (POINTER *ptr, SIZE size)
998 {
999 register bloc_ptr bloc;
1000
1001 if (! r_alloc_initialized)
1002 r_alloc_init ();
1003
1004 if (!*ptr)
1005 return r_alloc (ptr, size);
1006 if (!size)
1007 {
1008 r_alloc_free (ptr);
1009 return r_alloc (ptr, 0);
1010 }
1011
1012 bloc = find_bloc (ptr);
1013 if (bloc == NIL_BLOC)
1014 abort (); /* Already freed? PTR not originally used to allocate? */
1015
1016 if (size < bloc->size)
1017 {
1018 /* Wouldn't it be useful to actually resize the bloc here? */
1019 /* I think so too, but not if it's too expensive... */
1020 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1021 && r_alloc_freeze_level == 0)
1022 {
1023 resize_bloc (bloc, MEM_ROUNDUP (size));
1024 /* Never mind if this fails, just do nothing... */
1025 /* It *should* be infallible! */
1026 }
1027 }
1028 else if (size > bloc->size)
1029 {
1030 if (r_alloc_freeze_level)
1031 {
1032 bloc_ptr new_bloc;
1033 new_bloc = get_bloc (MEM_ROUNDUP (size));
1034 if (new_bloc)
1035 {
1036 new_bloc->variable = ptr;
1037 *ptr = new_bloc->data;
1038 bloc->variable = (POINTER *) NIL;
1039 }
1040 else
1041 return NIL;
1042 }
1043 else
1044 {
1045 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1046 return NIL;
1047 }
1048 }
1049 return *ptr;
1050 }
1051
1052 /* Disable relocations, after making room for at least SIZE bytes
1053 of non-relocatable heap if possible. The relocatable blocs are
1054 guaranteed to hold still until thawed, even if this means that
1055 malloc must return a null pointer. */
1056
1057 void
1058 r_alloc_freeze (long int size)
1059 {
1060 if (! r_alloc_initialized)
1061 r_alloc_init ();
1062
1063 /* If already frozen, we can't make any more room, so don't try. */
1064 if (r_alloc_freeze_level > 0)
1065 size = 0;
1066 /* If we can't get the amount requested, half is better than nothing. */
1067 while (size > 0 && r_alloc_sbrk (size) == 0)
1068 size /= 2;
1069 ++r_alloc_freeze_level;
1070 if (size > 0)
1071 r_alloc_sbrk (-size);
1072 }
1073
1074 void
1075 r_alloc_thaw (void)
1076 {
1077
1078 if (! r_alloc_initialized)
1079 r_alloc_init ();
1080
1081 if (--r_alloc_freeze_level < 0)
1082 abort ();
1083
1084 /* This frees all unused blocs. It is not too inefficient, as the resize
1085 and bcopy is done only once. Afterwards, all unreferenced blocs are
1086 already shrunk to zero size. */
1087 if (!r_alloc_freeze_level)
1088 {
1089 bloc_ptr *b = &first_bloc;
1090 while (*b)
1091 if (!(*b)->variable)
1092 free_bloc (*b);
1093 else
1094 b = &(*b)->next;
1095 }
1096 }
1097
1098
1099 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1100
1101 /* Reinitialize the morecore hook variables after restarting a dumped
1102 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1103 void
1104 r_alloc_reinit (void)
1105 {
1106 /* Only do this if the hook has been reset, so that we don't get an
1107 infinite loop, in case Emacs was linked statically. */
1108 if (__morecore != r_alloc_sbrk)
1109 {
1110 real_morecore = __morecore;
1111 __morecore = r_alloc_sbrk;
1112 }
1113 }
1114
1115 #endif /* emacs && DOUG_LEA_MALLOC */
1116
1117 #ifdef DEBUG
1118
1119 #include <assert.h>
1120
1121 void
1122 r_alloc_check ()
1123 {
1124 int found = 0;
1125 heap_ptr h, ph = 0;
1126 bloc_ptr b, pb = 0;
1127
1128 if (!r_alloc_initialized)
1129 return;
1130
1131 assert (first_heap);
1132 assert (last_heap->end <= (POINTER) sbrk (0));
1133 assert ((POINTER) first_heap < first_heap->start);
1134 assert (first_heap->start <= virtual_break_value);
1135 assert (virtual_break_value <= first_heap->end);
1136
1137 for (h = first_heap; h; h = h->next)
1138 {
1139 assert (h->prev == ph);
1140 assert ((POINTER) ROUNDUP (h->end) == h->end);
1141 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1142 the heap start has any sort of alignment.
1143 Perhaps it should. */
1144 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1145 #endif
1146 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1147 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1148
1149 if (ph)
1150 {
1151 assert (ph->end < h->start);
1152 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1153 }
1154
1155 if (h->bloc_start <= break_value && break_value <= h->end)
1156 found = 1;
1157
1158 ph = h;
1159 }
1160
1161 assert (found);
1162 assert (last_heap == ph);
1163
1164 for (b = first_bloc; b; b = b->next)
1165 {
1166 assert (b->prev == pb);
1167 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1168 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1169
1170 ph = 0;
1171 for (h = first_heap; h; h = h->next)
1172 {
1173 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1174 break;
1175 ph = h;
1176 }
1177
1178 assert (h);
1179
1180 if (pb && pb->data + pb->size != b->data)
1181 {
1182 assert (ph && b->data == h->bloc_start);
1183 while (ph)
1184 {
1185 if (ph->bloc_start <= pb->data
1186 && pb->data + pb->size <= ph->end)
1187 {
1188 assert (pb->data + pb->size + b->size > ph->end);
1189 break;
1190 }
1191 else
1192 {
1193 assert (ph->bloc_start + b->size > ph->end);
1194 }
1195 ph = ph->prev;
1196 }
1197 }
1198 pb = b;
1199 }
1200
1201 assert (last_bloc == pb);
1202
1203 if (last_bloc)
1204 assert (last_bloc->data + last_bloc->size == break_value);
1205 else
1206 assert (first_heap->bloc_start == break_value);
1207 }
1208
1209 #endif /* DEBUG */
1210
1211 /* Update the internal record of which variable points to some data to NEW.
1212 Used by buffer-swap-text in Emacs to restore consistency after it
1213 swaps the buffer text between two buffer objects. The OLD pointer
1214 is checked to ensure that memory corruption does not occur due to
1215 misuse. */
1216 void
1217 r_alloc_reset_variable (POINTER *old, POINTER *new)
1218 {
1219 bloc_ptr bloc = first_bloc;
1220
1221 /* Find the bloc that corresponds to the data pointed to by pointer.
1222 find_bloc cannot be used, as it has internal consistency checks
1223 which fail when the variable needs reseting. */
1224 while (bloc != NIL_BLOC)
1225 {
1226 if (bloc->data == *new)
1227 break;
1228
1229 bloc = bloc->next;
1230 }
1231
1232 if (bloc == NIL_BLOC || bloc->variable != old)
1233 abort (); /* Already freed? OLD not originally used to allocate? */
1234
1235 /* Update variable to point to the new location. */
1236 bloc->variable = new;
1237 }
1238
1239 \f
1240 /***********************************************************************
1241 Initialization
1242 ***********************************************************************/
1243
1244 /* Initialize various things for memory allocation. */
1245
1246 static void
1247 r_alloc_init (void)
1248 {
1249 if (r_alloc_initialized)
1250 return;
1251 r_alloc_initialized = 1;
1252
1253 page_size = PAGE;
1254 #ifndef SYSTEM_MALLOC
1255 real_morecore = __morecore;
1256 __morecore = r_alloc_sbrk;
1257
1258 first_heap = last_heap = &heap_base;
1259 first_heap->next = first_heap->prev = NIL_HEAP;
1260 first_heap->start = first_heap->bloc_start
1261 = virtual_break_value = break_value = (*real_morecore) (0);
1262 if (break_value == NIL)
1263 abort ();
1264
1265 extra_bytes = ROUNDUP (50000);
1266 #endif
1267
1268 #ifdef DOUG_LEA_MALLOC
1269 BLOCK_INPUT;
1270 mallopt (M_TOP_PAD, 64 * 4096);
1271 UNBLOCK_INPUT;
1272 #else
1273 #ifndef SYSTEM_MALLOC
1274 /* Give GNU malloc's morecore some hysteresis
1275 so that we move all the relocatable blocks much less often. */
1276 __malloc_extra_blocks = 64;
1277 #endif
1278 #endif
1279
1280 #ifndef SYSTEM_MALLOC
1281 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1282
1283 /* The extra call to real_morecore guarantees that the end of the
1284 address space is a multiple of page_size, even if page_size is
1285 not really the page size of the system running the binary in
1286 which page_size is stored. This allows a binary to be built on a
1287 system with one page size and run on a system with a smaller page
1288 size. */
1289 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
1290
1291 /* Clear the rest of the last page; this memory is in our address space
1292 even though it is after the sbrk value. */
1293 /* Doubly true, with the additional call that explicitly adds the
1294 rest of that page to the address space. */
1295 bzero (first_heap->start,
1296 (char *) first_heap->end - (char *) first_heap->start);
1297 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1298 #endif
1299
1300 use_relocatable_buffers = 1;
1301 }
1302
1303 /* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1304 (do not change this comment) */