Comment fixes.
[bpt/emacs.git] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
19
20 /* NOTES:
21
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
25
26 #ifdef emacs
27
28 #include <config.h>
29 #include "lisp.h" /* Needed for VALBITS. */
30
31 #undef NULL
32
33 /* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
36 #if 0 /* Arithmetic on void* is a GCC extension. */
37 #ifdef __STDC__
38 typedef void *POINTER;
39 #else
40
41 #ifdef HAVE_CONFIG_H
42 #include "config.h"
43 #endif
44
45 typedef char *POINTER;
46
47 #endif
48 #endif /* 0 */
49
50 /* Unconditionally use char * for this. */
51 typedef char *POINTER;
52
53 typedef unsigned long SIZE;
54
55 /* Declared in dispnew.c, this version doesn't screw up if regions
56 overlap. */
57 extern void safe_bcopy ();
58
59 extern int __malloc_extra_blocks;
60
61 #else /* not emacs */
62
63 #include <stddef.h>
64
65 typedef size_t SIZE;
66 typedef void *POINTER;
67
68 #include <unistd.h>
69 #include <malloc.h>
70 #include <string.h>
71
72 #define safe_bcopy(x, y, z) memmove (y, x, z)
73 #define bzero(x, len) memset (x, 0, len)
74
75 #endif /* not emacs */
76
77 #include "getpagesize.h"
78
79 #define NIL ((POINTER) 0)
80
81 /* A flag to indicate whether we have initialized ralloc yet. For
82 Emacs's sake, please do not make this local to malloc_init; on some
83 machines, the dumping procedure makes all static variables
84 read-only. On these machines, the word static is #defined to be
85 the empty string, meaning that r_alloc_initialized becomes an
86 automatic variable, and loses its value each time Emacs is started up. */
87 static int r_alloc_initialized = 0;
88
89 static void r_alloc_init ();
90 \f
91 /* Declarations for working with the malloc, ralloc, and system breaks. */
92
93 /* Function to set the real break value. */
94 static POINTER (*real_morecore) ();
95
96 /* The break value, as seen by malloc. */
97 static POINTER virtual_break_value;
98
99 /* The address of the end of the last data in use by ralloc,
100 including relocatable blocs as well as malloc data. */
101 static POINTER break_value;
102
103 /* This is the size of a page. We round memory requests to this boundary. */
104 static int page_size;
105
106 /* Whenever we get memory from the system, get this many extra bytes. This
107 must be a multiple of page_size. */
108 static int extra_bytes;
109
110 /* Macros for rounding. Note that rounding to any value is possible
111 by changing the definition of PAGE. */
112 #define PAGE (getpagesize ())
113 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
114 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
115 & ~(page_size - 1))
116 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
117
118 #define MEM_ALIGN sizeof(double)
119 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
120 & ~(MEM_ALIGN - 1))
121 \f
122 /* Data structures of heaps and blocs. */
123
124 /* The relocatable objects, or blocs, and the malloc data
125 both reside within one or more heaps.
126 Each heap contains malloc data, running from `start' to `bloc_start',
127 and relocatable objects, running from `bloc_start' to `free'.
128
129 Relocatable objects may relocate within the same heap
130 or may move into another heap; the heaps themselves may grow
131 but they never move.
132
133 We try to make just one heap and make it larger as necessary.
134 But sometimes we can't do that, because we can't get contiguous
135 space to add onto the heap. When that happens, we start a new heap. */
136
137 typedef struct heap
138 {
139 struct heap *next;
140 struct heap *prev;
141 /* Start of memory range of this heap. */
142 POINTER start;
143 /* End of memory range of this heap. */
144 POINTER end;
145 /* Start of relocatable data in this heap. */
146 POINTER bloc_start;
147 /* Start of unused space in this heap. */
148 POINTER free;
149 /* First bloc in this heap. */
150 struct bp *first_bloc;
151 /* Last bloc in this heap. */
152 struct bp *last_bloc;
153 } *heap_ptr;
154
155 #define NIL_HEAP ((heap_ptr) 0)
156 #define HEAP_PTR_SIZE (sizeof (struct heap))
157
158 /* This is the first heap object.
159 If we need additional heap objects, each one resides at the beginning of
160 the space it covers. */
161 static struct heap heap_base;
162
163 /* Head and tail of the list of heaps. */
164 static heap_ptr first_heap, last_heap;
165
166 /* These structures are allocated in the malloc arena.
167 The linked list is kept in order of increasing '.data' members.
168 The data blocks abut each other; if b->next is non-nil, then
169 b->data + b->size == b->next->data.
170
171 An element with variable==NIL denotes a freed block, which has not yet
172 been collected. They may only appear while r_alloc_freeze > 0, and will be
173 freed when the arena is thawed. Currently, these blocs are not reusable,
174 while the arena is frozen. Very inefficient. */
175
176 typedef struct bp
177 {
178 struct bp *next;
179 struct bp *prev;
180 POINTER *variable;
181 POINTER data;
182 SIZE size;
183 POINTER new_data; /* temporarily used for relocation */
184 struct heap *heap; /* Heap this bloc is in. */
185 } *bloc_ptr;
186
187 #define NIL_BLOC ((bloc_ptr) 0)
188 #define BLOC_PTR_SIZE (sizeof (struct bp))
189
190 /* Head and tail of the list of relocatable blocs. */
191 static bloc_ptr first_bloc, last_bloc;
192
193 static int use_relocatable_buffers;
194
195 /* If >0, no relocation whatsoever takes place. */
196 static int r_alloc_freeze_level;
197
198 \f
199 /* Functions to get and return memory from the system. */
200
201 /* Find the heap that ADDRESS falls within. */
202
203 static heap_ptr
204 find_heap (address)
205 POINTER address;
206 {
207 heap_ptr heap;
208
209 for (heap = last_heap; heap; heap = heap->prev)
210 {
211 if (heap->start <= address && address <= heap->end)
212 return heap;
213 }
214
215 return NIL_HEAP;
216 }
217
218 /* Find SIZE bytes of space in a heap.
219 Try to get them at ADDRESS (which must fall within some heap's range)
220 if we can get that many within one heap.
221
222 If enough space is not presently available in our reserve, this means
223 getting more page-aligned space from the system. If the returned space
224 is not contiguous to the last heap, allocate a new heap, and append it
225
226 obtain does not try to keep track of whether space is in use
227 or not in use. It just returns the address of SIZE bytes that
228 fall within a single heap. If you call obtain twice in a row
229 with the same arguments, you typically get the same value.
230 to the heap list. It's the caller's responsibility to keep
231 track of what space is in use.
232
233 Return the address of the space if all went well, or zero if we couldn't
234 allocate the memory. */
235
236 static POINTER
237 obtain (address, size)
238 POINTER address;
239 SIZE size;
240 {
241 heap_ptr heap;
242 SIZE already_available;
243
244 /* Find the heap that ADDRESS falls within. */
245 for (heap = last_heap; heap; heap = heap->prev)
246 {
247 if (heap->start <= address && address <= heap->end)
248 break;
249 }
250
251 if (! heap)
252 abort ();
253
254 /* If we can't fit SIZE bytes in that heap,
255 try successive later heaps. */
256 while (heap && address + size > heap->end)
257 {
258 heap = heap->next;
259 if (heap == NIL_HEAP)
260 break;
261 address = heap->bloc_start;
262 }
263
264 /* If we can't fit them within any existing heap,
265 get more space. */
266 if (heap == NIL_HEAP)
267 {
268 POINTER new = (*real_morecore)(0);
269 SIZE get;
270
271 already_available = (char *)last_heap->end - (char *)address;
272
273 if (new != last_heap->end)
274 {
275 /* Someone else called sbrk. Make a new heap. */
276
277 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
278 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
279
280 if ((*real_morecore) (bloc_start - new) != new)
281 return 0;
282
283 new_heap->start = new;
284 new_heap->end = bloc_start;
285 new_heap->bloc_start = bloc_start;
286 new_heap->free = bloc_start;
287 new_heap->next = NIL_HEAP;
288 new_heap->prev = last_heap;
289 new_heap->first_bloc = NIL_BLOC;
290 new_heap->last_bloc = NIL_BLOC;
291 last_heap->next = new_heap;
292 last_heap = new_heap;
293
294 address = bloc_start;
295 already_available = 0;
296 }
297
298 /* Add space to the last heap (which we may have just created).
299 Get some extra, so we can come here less often. */
300
301 get = size + extra_bytes - already_available;
302 get = (char *) ROUNDUP ((char *)last_heap->end + get)
303 - (char *) last_heap->end;
304
305 if ((*real_morecore) (get) != last_heap->end)
306 return 0;
307
308 last_heap->end += get;
309 }
310
311 return address;
312 }
313
314 /* Return unused heap space to the system
315 if there is a lot of unused space now.
316 This can make the last heap smaller;
317 it can also eliminate the last heap entirely. */
318
319 static void
320 relinquish ()
321 {
322 register heap_ptr h;
323 int excess = 0;
324
325 /* Add the amount of space beyond break_value
326 in all heaps which have extend beyond break_value at all. */
327
328 for (h = last_heap; h && break_value < h->end; h = h->prev)
329 {
330 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
331 ? h->bloc_start : break_value);
332 }
333
334 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
335 {
336 /* Keep extra_bytes worth of empty space.
337 And don't free anything unless we can free at least extra_bytes. */
338 excess -= extra_bytes;
339
340 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
341 {
342 /* This heap should have no blocs in it. */
343 if (last_heap->first_bloc != NIL_BLOC
344 || last_heap->last_bloc != NIL_BLOC)
345 abort ();
346
347 /* Return the last heap, with its header, to the system. */
348 excess = (char *)last_heap->end - (char *)last_heap->start;
349 last_heap = last_heap->prev;
350 last_heap->next = NIL_HEAP;
351 }
352 else
353 {
354 excess = (char *) last_heap->end
355 - (char *) ROUNDUP ((char *)last_heap->end - excess);
356 last_heap->end -= excess;
357 }
358
359 if ((*real_morecore) (- excess) == 0)
360 abort ();
361 }
362 }
363
364 /* Return the total size in use by relocating allocator,
365 above where malloc gets space. */
366
367 long
368 r_alloc_size_in_use ()
369 {
370 return break_value - virtual_break_value;
371 }
372 \f
373 /* The meat - allocating, freeing, and relocating blocs. */
374
375 /* Find the bloc referenced by the address in PTR. Returns a pointer
376 to that block. */
377
378 static bloc_ptr
379 find_bloc (ptr)
380 POINTER *ptr;
381 {
382 register bloc_ptr p = first_bloc;
383
384 while (p != NIL_BLOC)
385 {
386 if (p->variable == ptr && p->data == *ptr)
387 return p;
388
389 p = p->next;
390 }
391
392 return p;
393 }
394
395 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
396 Returns a pointer to the new bloc, or zero if we couldn't allocate
397 memory for the new block. */
398
399 static bloc_ptr
400 get_bloc (size)
401 SIZE size;
402 {
403 register bloc_ptr new_bloc;
404 register heap_ptr heap;
405
406 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
407 || ! (new_bloc->data = obtain (break_value, size)))
408 {
409 if (new_bloc)
410 free (new_bloc);
411
412 return 0;
413 }
414
415 break_value = new_bloc->data + size;
416
417 new_bloc->size = size;
418 new_bloc->next = NIL_BLOC;
419 new_bloc->variable = (POINTER *) NIL;
420 new_bloc->new_data = 0;
421
422 /* Record in the heap that this space is in use. */
423 heap = find_heap (new_bloc->data);
424 heap->free = break_value;
425
426 /* Maintain the correspondence between heaps and blocs. */
427 new_bloc->heap = heap;
428 heap->last_bloc = new_bloc;
429 if (heap->first_bloc == NIL_BLOC)
430 heap->first_bloc = new_bloc;
431
432 /* Put this bloc on the doubly-linked list of blocs. */
433 if (first_bloc)
434 {
435 new_bloc->prev = last_bloc;
436 last_bloc->next = new_bloc;
437 last_bloc = new_bloc;
438 }
439 else
440 {
441 first_bloc = last_bloc = new_bloc;
442 new_bloc->prev = NIL_BLOC;
443 }
444
445 return new_bloc;
446 }
447 \f
448 /* Calculate new locations of blocs in the list beginning with BLOC,
449 relocating it to start at ADDRESS, in heap HEAP. If enough space is
450 not presently available in our reserve, call obtain for
451 more space.
452
453 Store the new location of each bloc in its new_data field.
454 Do not touch the contents of blocs or break_value. */
455
456 static int
457 relocate_blocs (bloc, heap, address)
458 bloc_ptr bloc;
459 heap_ptr heap;
460 POINTER address;
461 {
462 register bloc_ptr b = bloc;
463
464 /* No need to ever call this if arena is frozen, bug somewhere! */
465 if (r_alloc_freeze_level)
466 abort();
467
468 while (b)
469 {
470 /* If bloc B won't fit within HEAP,
471 move to the next heap and try again. */
472 while (heap && address + b->size > heap->end)
473 {
474 heap = heap->next;
475 if (heap == NIL_HEAP)
476 break;
477 address = heap->bloc_start;
478 }
479
480 /* If BLOC won't fit in any heap,
481 get enough new space to hold BLOC and all following blocs. */
482 if (heap == NIL_HEAP)
483 {
484 register bloc_ptr tb = b;
485 register SIZE s = 0;
486
487 /* Add up the size of all the following blocs. */
488 while (tb != NIL_BLOC)
489 {
490 if (tb->variable)
491 s += tb->size;
492
493 tb = tb->next;
494 }
495
496 /* Get that space. */
497 address = obtain (address, s);
498 if (address == 0)
499 return 0;
500
501 heap = last_heap;
502 }
503
504 /* Record the new address of this bloc
505 and update where the next bloc can start. */
506 b->new_data = address;
507 if (b->variable)
508 address += b->size;
509 b = b->next;
510 }
511
512 return 1;
513 }
514
515 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
516 This is necessary if we put the memory of space of BLOC
517 before that of BEFORE. */
518
519 static void
520 reorder_bloc (bloc, before)
521 bloc_ptr bloc, before;
522 {
523 bloc_ptr prev, next;
524
525 /* Splice BLOC out from where it is. */
526 prev = bloc->prev;
527 next = bloc->next;
528
529 if (prev)
530 prev->next = next;
531 if (next)
532 next->prev = prev;
533
534 /* Splice it in before BEFORE. */
535 prev = before->prev;
536
537 if (prev)
538 prev->next = bloc;
539 bloc->prev = prev;
540
541 before->prev = bloc;
542 bloc->next = before;
543 }
544 \f
545 /* Update the records of which heaps contain which blocs, starting
546 with heap HEAP and bloc BLOC. */
547
548 static void
549 update_heap_bloc_correspondence (bloc, heap)
550 bloc_ptr bloc;
551 heap_ptr heap;
552 {
553 register bloc_ptr b;
554
555 /* Initialize HEAP's status to reflect blocs before BLOC. */
556 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
557 {
558 /* The previous bloc is in HEAP. */
559 heap->last_bloc = bloc->prev;
560 heap->free = bloc->prev->data + bloc->prev->size;
561 }
562 else
563 {
564 /* HEAP contains no blocs before BLOC. */
565 heap->first_bloc = NIL_BLOC;
566 heap->last_bloc = NIL_BLOC;
567 heap->free = heap->bloc_start;
568 }
569
570 /* Advance through blocs one by one. */
571 for (b = bloc; b != NIL_BLOC; b = b->next)
572 {
573 /* Advance through heaps, marking them empty,
574 till we get to the one that B is in. */
575 while (heap)
576 {
577 if (heap->bloc_start <= b->data && b->data <= heap->end)
578 break;
579 heap = heap->next;
580 /* We know HEAP is not null now,
581 because there has to be space for bloc B. */
582 heap->first_bloc = NIL_BLOC;
583 heap->last_bloc = NIL_BLOC;
584 heap->free = heap->bloc_start;
585 }
586
587 /* Update HEAP's status for bloc B. */
588 heap->free = b->data + b->size;
589 heap->last_bloc = b;
590 if (heap->first_bloc == NIL_BLOC)
591 heap->first_bloc = b;
592
593 /* Record that B is in HEAP. */
594 b->heap = heap;
595 }
596
597 /* If there are any remaining heaps and no blocs left,
598 mark those heaps as empty. */
599 heap = heap->next;
600 while (heap)
601 {
602 heap->first_bloc = NIL_BLOC;
603 heap->last_bloc = NIL_BLOC;
604 heap->free = heap->bloc_start;
605 heap = heap->next;
606 }
607 }
608 \f
609 /* Resize BLOC to SIZE bytes. This relocates the blocs
610 that come after BLOC in memory. */
611
612 static int
613 resize_bloc (bloc, size)
614 bloc_ptr bloc;
615 SIZE size;
616 {
617 register bloc_ptr b;
618 heap_ptr heap;
619 POINTER address;
620 SIZE old_size;
621
622 /* No need to ever call this if arena is frozen, bug somewhere! */
623 if (r_alloc_freeze_level)
624 abort();
625
626 if (bloc == NIL_BLOC || size == bloc->size)
627 return 1;
628
629 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
630 {
631 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
632 break;
633 }
634
635 if (heap == NIL_HEAP)
636 abort ();
637
638 old_size = bloc->size;
639 bloc->size = size;
640
641 /* Note that bloc could be moved into the previous heap. */
642 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
643 : first_heap->bloc_start);
644 while (heap)
645 {
646 if (heap->bloc_start <= address && address <= heap->end)
647 break;
648 heap = heap->prev;
649 }
650
651 if (! relocate_blocs (bloc, heap, address))
652 {
653 bloc->size = old_size;
654 return 0;
655 }
656
657 if (size > old_size)
658 {
659 for (b = last_bloc; b != bloc; b = b->prev)
660 {
661 if (!b->variable)
662 {
663 b->size = 0;
664 b->data = b->new_data;
665 }
666 else
667 {
668 safe_bcopy (b->data, b->new_data, b->size);
669 *b->variable = b->data = b->new_data;
670 }
671 }
672 if (!bloc->variable)
673 {
674 bloc->size = 0;
675 bloc->data = bloc->new_data;
676 }
677 else
678 {
679 safe_bcopy (bloc->data, bloc->new_data, old_size);
680 bzero (bloc->new_data + old_size, size - old_size);
681 *bloc->variable = bloc->data = bloc->new_data;
682 }
683 }
684 else
685 {
686 for (b = bloc; b != NIL_BLOC; b = b->next)
687 {
688 if (!b->variable)
689 {
690 b->size = 0;
691 b->data = b->new_data;
692 }
693 else
694 {
695 safe_bcopy (b->data, b->new_data, b->size);
696 *b->variable = b->data = b->new_data;
697 }
698 }
699 }
700
701 update_heap_bloc_correspondence (bloc, heap);
702
703 break_value = (last_bloc ? last_bloc->data + last_bloc->size
704 : first_heap->bloc_start);
705 return 1;
706 }
707 \f
708 /* Free BLOC from the chain of blocs, relocating any blocs above it.
709 This may return space to the system. */
710
711 static void
712 free_bloc (bloc)
713 bloc_ptr bloc;
714 {
715 heap_ptr heap = bloc->heap;
716
717 if (r_alloc_freeze_level)
718 {
719 bloc->variable = (POINTER *) NIL;
720 return;
721 }
722
723 resize_bloc (bloc, 0);
724
725 if (bloc == first_bloc && bloc == last_bloc)
726 {
727 first_bloc = last_bloc = NIL_BLOC;
728 }
729 else if (bloc == last_bloc)
730 {
731 last_bloc = bloc->prev;
732 last_bloc->next = NIL_BLOC;
733 }
734 else if (bloc == first_bloc)
735 {
736 first_bloc = bloc->next;
737 first_bloc->prev = NIL_BLOC;
738 }
739 else
740 {
741 bloc->next->prev = bloc->prev;
742 bloc->prev->next = bloc->next;
743 }
744
745 /* Update the records of which blocs are in HEAP. */
746 if (heap->first_bloc == bloc)
747 {
748 if (bloc->next != 0 && bloc->next->heap == heap)
749 heap->first_bloc = bloc->next;
750 else
751 heap->first_bloc = heap->last_bloc = NIL_BLOC;
752 }
753 if (heap->last_bloc == bloc)
754 {
755 if (bloc->prev != 0 && bloc->prev->heap == heap)
756 heap->last_bloc = bloc->prev;
757 else
758 heap->first_bloc = heap->last_bloc = NIL_BLOC;
759 }
760
761 relinquish ();
762 free (bloc);
763 }
764 \f
765 /* Interface routines. */
766
767 /* Obtain SIZE bytes of storage from the free pool, or the system, as
768 necessary. If relocatable blocs are in use, this means relocating
769 them. This function gets plugged into the GNU malloc's __morecore
770 hook.
771
772 We provide hysteresis, never relocating by less than extra_bytes.
773
774 If we're out of memory, we should return zero, to imitate the other
775 __morecore hook values - in particular, __default_morecore in the
776 GNU malloc package. */
777
778 POINTER
779 r_alloc_sbrk (size)
780 long size;
781 {
782 register bloc_ptr b;
783 POINTER address;
784
785 if (! r_alloc_initialized)
786 r_alloc_init ();
787
788 if (! use_relocatable_buffers)
789 return (*real_morecore) (size);
790
791 if (size == 0)
792 return virtual_break_value;
793
794 if (size > 0)
795 {
796 /* Allocate a page-aligned space. GNU malloc would reclaim an
797 extra space if we passed an unaligned one. But we could
798 not always find a space which is contiguous to the previous. */
799 POINTER new_bloc_start;
800 heap_ptr h = first_heap;
801 SIZE get = ROUNDUP (size);
802
803 address = (POINTER) ROUNDUP (virtual_break_value);
804
805 /* Search the list upward for a heap which is large enough. */
806 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
807 {
808 h = h->next;
809 if (h == NIL_HEAP)
810 break;
811 address = (POINTER) ROUNDUP (h->start);
812 }
813
814 /* If not found, obtain more space. */
815 if (h == NIL_HEAP)
816 {
817 get += extra_bytes + page_size;
818
819 if (! obtain (address, get))
820 return 0;
821
822 if (first_heap == last_heap)
823 address = (POINTER) ROUNDUP (virtual_break_value);
824 else
825 address = (POINTER) ROUNDUP (last_heap->start);
826 h = last_heap;
827 }
828
829 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
830
831 if (first_heap->bloc_start < new_bloc_start)
832 {
833 /* This is no clean solution - no idea how to do it better. */
834 if (r_alloc_freeze_level)
835 return NIL;
836
837 /* There is a bug here: if the above obtain call succeeded, but the
838 relocate_blocs call below does not succeed, we need to free
839 the memory that we got with obtain. */
840
841 /* Move all blocs upward. */
842 if (! relocate_blocs (first_bloc, h, new_bloc_start))
843 return 0;
844
845 /* Note that (POINTER)(h+1) <= new_bloc_start since
846 get >= page_size, so the following does not destroy the heap
847 header. */
848 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
849 {
850 safe_bcopy (b->data, b->new_data, b->size);
851 *b->variable = b->data = b->new_data;
852 }
853
854 h->bloc_start = new_bloc_start;
855
856 update_heap_bloc_correspondence (first_bloc, h);
857 }
858 if (h != first_heap)
859 {
860 /* Give up managing heaps below the one the new
861 virtual_break_value points to. */
862 first_heap->prev = NIL_HEAP;
863 first_heap->next = h->next;
864 first_heap->start = h->start;
865 first_heap->end = h->end;
866 first_heap->free = h->free;
867 first_heap->first_bloc = h->first_bloc;
868 first_heap->last_bloc = h->last_bloc;
869 first_heap->bloc_start = h->bloc_start;
870
871 if (first_heap->next)
872 first_heap->next->prev = first_heap;
873 else
874 last_heap = first_heap;
875 }
876
877 bzero (address, size);
878 }
879 else /* size < 0 */
880 {
881 SIZE excess = (char *)first_heap->bloc_start
882 - ((char *)virtual_break_value + size);
883
884 address = virtual_break_value;
885
886 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
887 {
888 excess -= extra_bytes;
889 first_heap->bloc_start
890 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
891
892 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
893
894 for (b = first_bloc; b != NIL_BLOC; b = b->next)
895 {
896 safe_bcopy (b->data, b->new_data, b->size);
897 *b->variable = b->data = b->new_data;
898 }
899 }
900
901 if ((char *)virtual_break_value + size < (char *)first_heap->start)
902 {
903 /* We found an additional space below the first heap */
904 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
905 }
906 }
907
908 virtual_break_value = (POINTER) ((char *)address + size);
909 break_value = (last_bloc
910 ? last_bloc->data + last_bloc->size
911 : first_heap->bloc_start);
912 if (size < 0)
913 relinquish ();
914
915 return address;
916 }
917
918 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
919 the data is returned in *PTR. PTR is thus the address of some variable
920 which will use the data area.
921
922 The allocation of 0 bytes is valid.
923 In case r_alloc_freeze is set, a best fit of unused blocs could be done
924 before allocating a new area. Not yet done.
925
926 If we can't allocate the necessary memory, set *PTR to zero, and
927 return zero. */
928
929 POINTER
930 r_alloc (ptr, size)
931 POINTER *ptr;
932 SIZE size;
933 {
934 register bloc_ptr new_bloc;
935
936 if (! r_alloc_initialized)
937 r_alloc_init ();
938
939 new_bloc = get_bloc (MEM_ROUNDUP (size));
940 if (new_bloc)
941 {
942 new_bloc->variable = ptr;
943 *ptr = new_bloc->data;
944 }
945 else
946 *ptr = 0;
947
948 return *ptr;
949 }
950
951 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
952 Store 0 in *PTR to show there's no block allocated. */
953
954 void
955 r_alloc_free (ptr)
956 register POINTER *ptr;
957 {
958 register bloc_ptr dead_bloc;
959
960 if (! r_alloc_initialized)
961 r_alloc_init ();
962
963 dead_bloc = find_bloc (ptr);
964 if (dead_bloc == NIL_BLOC)
965 abort ();
966
967 free_bloc (dead_bloc);
968 *ptr = 0;
969
970 #ifdef emacs
971 refill_memory_reserve ();
972 #endif
973 }
974
975 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
976 Do this by shifting all blocks above this one up in memory, unless
977 SIZE is less than or equal to the current bloc size, in which case
978 do nothing.
979
980 In case r_alloc_freeze is set, a new bloc is allocated, and the
981 memory copied to it. Not very efficient. We could traverse the
982 bloc_list for a best fit of free blocs first.
983
984 Change *PTR to reflect the new bloc, and return this value.
985
986 If more memory cannot be allocated, then leave *PTR unchanged, and
987 return zero. */
988
989 POINTER
990 r_re_alloc (ptr, size)
991 POINTER *ptr;
992 SIZE size;
993 {
994 register bloc_ptr bloc;
995
996 if (! r_alloc_initialized)
997 r_alloc_init ();
998
999 if (!*ptr)
1000 return r_alloc (ptr, size);
1001 if (!size)
1002 {
1003 r_alloc_free (ptr);
1004 return r_alloc (ptr, 0);
1005 }
1006
1007 bloc = find_bloc (ptr);
1008 if (bloc == NIL_BLOC)
1009 abort ();
1010
1011 if (size < bloc->size)
1012 {
1013 /* Wouldn't it be useful to actually resize the bloc here? */
1014 /* I think so too, but not if it's too expensive... */
1015 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1016 && r_alloc_freeze_level == 0)
1017 {
1018 resize_bloc (bloc, MEM_ROUNDUP (size));
1019 /* Never mind if this fails, just do nothing... */
1020 /* It *should* be infallible! */
1021 }
1022 }
1023 else if (size > bloc->size)
1024 {
1025 if (r_alloc_freeze_level)
1026 {
1027 bloc_ptr new_bloc;
1028 new_bloc = get_bloc (MEM_ROUNDUP (size));
1029 if (new_bloc)
1030 {
1031 new_bloc->variable = ptr;
1032 *ptr = new_bloc->data;
1033 bloc->variable = (POINTER *) NIL;
1034 }
1035 else
1036 return NIL;
1037 }
1038 else
1039 {
1040 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1041 return NIL;
1042 }
1043 }
1044 return *ptr;
1045 }
1046
1047 /* Disable relocations, after making room for at least SIZE bytes
1048 of non-relocatable heap if possible. The relocatable blocs are
1049 guaranteed to hold still until thawed, even if this means that
1050 malloc must return a null pointer. */
1051
1052 void
1053 r_alloc_freeze (size)
1054 long size;
1055 {
1056 if (! r_alloc_initialized)
1057 r_alloc_init ();
1058
1059 /* If already frozen, we can't make any more room, so don't try. */
1060 if (r_alloc_freeze_level > 0)
1061 size = 0;
1062 /* If we can't get the amount requested, half is better than nothing. */
1063 while (size > 0 && r_alloc_sbrk (size) == 0)
1064 size /= 2;
1065 ++r_alloc_freeze_level;
1066 if (size > 0)
1067 r_alloc_sbrk (-size);
1068 }
1069
1070 void
1071 r_alloc_thaw ()
1072 {
1073
1074 if (! r_alloc_initialized)
1075 r_alloc_init ();
1076
1077 if (--r_alloc_freeze_level < 0)
1078 abort ();
1079
1080 /* This frees all unused blocs. It is not too inefficient, as the resize
1081 and bcopy is done only once. Afterwards, all unreferenced blocs are
1082 already shrunk to zero size. */
1083 if (!r_alloc_freeze_level)
1084 {
1085 bloc_ptr *b = &first_bloc;
1086 while (*b)
1087 if (!(*b)->variable)
1088 free_bloc (*b);
1089 else
1090 b = &(*b)->next;
1091 }
1092 }
1093
1094 \f
1095 /* The hook `malloc' uses for the function which gets more space
1096 from the system. */
1097 extern POINTER (*__morecore) ();
1098
1099 /* Initialize various things for memory allocation. */
1100
1101 static void
1102 r_alloc_init ()
1103 {
1104 if (r_alloc_initialized)
1105 return;
1106
1107 r_alloc_initialized = 1;
1108 real_morecore = __morecore;
1109 __morecore = r_alloc_sbrk;
1110
1111 first_heap = last_heap = &heap_base;
1112 first_heap->next = first_heap->prev = NIL_HEAP;
1113 first_heap->start = first_heap->bloc_start
1114 = virtual_break_value = break_value = (*real_morecore) (0);
1115 if (break_value == NIL)
1116 abort ();
1117
1118 page_size = PAGE;
1119 extra_bytes = ROUNDUP (50000);
1120
1121 /* Give GNU malloc's morecore some hysteresis
1122 so that we move all the relocatable blocks much less often. */
1123 __malloc_extra_blocks = 64;
1124
1125 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1126
1127 /* The extra call to real_morecore guarantees that the end of the
1128 address space is a multiple of page_size, even if page_size is
1129 not really the page size of the system running the binary in
1130 which page_size is stored. This allows a binary to be built on a
1131 system with one page size and run on a system with a smaller page
1132 size. */
1133 (*real_morecore) (first_heap->end - first_heap->start);
1134
1135 /* Clear the rest of the last page; this memory is in our address space
1136 even though it is after the sbrk value. */
1137 /* Doubly true, with the additional call that explicitly adds the
1138 rest of that page to the address space. */
1139 bzero (first_heap->start, first_heap->end - first_heap->start);
1140 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1141 use_relocatable_buffers = 1;
1142 }
1143 #ifdef DEBUG
1144 #include <assert.h>
1145
1146 void
1147 r_alloc_check ()
1148 {
1149 int found = 0;
1150 heap_ptr h, ph = 0;
1151 bloc_ptr b, pb = 0;
1152
1153 if (!r_alloc_initialized)
1154 return;
1155
1156 assert (first_heap);
1157 assert (last_heap->end <= (POINTER) sbrk (0));
1158 assert ((POINTER) first_heap < first_heap->start);
1159 assert (first_heap->start <= virtual_break_value);
1160 assert (virtual_break_value <= first_heap->end);
1161
1162 for (h = first_heap; h; h = h->next)
1163 {
1164 assert (h->prev == ph);
1165 assert ((POINTER) ROUNDUP (h->end) == h->end);
1166 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1167 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1168 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1169
1170 if (ph)
1171 {
1172 assert (ph->end < h->start);
1173 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1174 }
1175
1176 if (h->bloc_start <= break_value && break_value <= h->end)
1177 found = 1;
1178
1179 ph = h;
1180 }
1181
1182 assert (found);
1183 assert (last_heap == ph);
1184
1185 for (b = first_bloc; b; b = b->next)
1186 {
1187 assert (b->prev == pb);
1188 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1189 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1190
1191 ph = 0;
1192 for (h = first_heap; h; h = h->next)
1193 {
1194 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1195 break;
1196 ph = h;
1197 }
1198
1199 assert (h);
1200
1201 if (pb && pb->data + pb->size != b->data)
1202 {
1203 assert (ph && b->data == h->bloc_start);
1204 while (ph)
1205 {
1206 if (ph->bloc_start <= pb->data
1207 && pb->data + pb->size <= ph->end)
1208 {
1209 assert (pb->data + pb->size + b->size > ph->end);
1210 break;
1211 }
1212 else
1213 {
1214 assert (ph->bloc_start + b->size > ph->end);
1215 }
1216 ph = ph->prev;
1217 }
1218 }
1219 pb = b;
1220 }
1221
1222 assert (last_bloc == pb);
1223
1224 if (last_bloc)
1225 assert (last_bloc->data + last_bloc->size == break_value);
1226 else
1227 assert (first_heap->bloc_start == break_value);
1228 }
1229 #endif /* DEBUG */