(xbufobjfwd, xbuflocal, xwinconfig):
[bpt/emacs.git] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
19
20 /* NOTES:
21
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
25
26 #ifdef emacs
27
28 #include <config.h>
29 #include "lisp.h" /* Needed for VALBITS. */
30
31 #undef NULL
32
33 /* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
36 #if 0 /* Arithmetic on void* is a GCC extension. */
37 #ifdef __STDC__
38 typedef void *POINTER;
39 #else
40
41 #ifdef HAVE_CONFIG_H
42 #include "config.h"
43 #endif
44
45 typedef char *POINTER;
46
47 #endif
48 #endif /* 0 */
49
50 /* Unconditionally use char * for this. */
51 typedef char *POINTER;
52
53 typedef unsigned long SIZE;
54
55 /* Declared in dispnew.c, this version doesn't screw up if regions
56 overlap. */
57 extern void safe_bcopy ();
58
59 #include "getpagesize.h"
60
61 #else /* Not emacs. */
62
63 #include <stddef.h>
64
65 typedef size_t SIZE;
66 typedef void *POINTER;
67
68 #include <unistd.h>
69 #include <malloc.h>
70 #include <string.h>
71
72 #define safe_bcopy(x, y, z) memmove (y, x, z)
73
74 #endif /* emacs. */
75
76 #define NIL ((POINTER) 0)
77
78 /* A flag to indicate whether we have initialized ralloc yet. For
79 Emacs's sake, please do not make this local to malloc_init; on some
80 machines, the dumping procedure makes all static variables
81 read-only. On these machines, the word static is #defined to be
82 the empty string, meaning that r_alloc_initialized becomes an
83 automatic variable, and loses its value each time Emacs is started up. */
84 static int r_alloc_initialized = 0;
85
86 static void r_alloc_init ();
87 \f
88 /* Declarations for working with the malloc, ralloc, and system breaks. */
89
90 /* Function to set the real break value. */
91 static POINTER (*real_morecore) ();
92
93 /* The break value, as seen by malloc. */
94 static POINTER virtual_break_value;
95
96 /* The address of the end of the last data in use by ralloc,
97 including relocatable blocs as well as malloc data. */
98 static POINTER break_value;
99
100 /* This is the size of a page. We round memory requests to this boundary. */
101 static int page_size;
102
103 /* Whenever we get memory from the system, get this many extra bytes. This
104 must be a multiple of page_size. */
105 static int extra_bytes;
106
107 /* Macros for rounding. Note that rounding to any value is possible
108 by changing the definition of PAGE. */
109 #define PAGE (getpagesize ())
110 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
111 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
112 & ~(page_size - 1))
113 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
114
115 #define MEM_ALIGN sizeof(double)
116 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
117 & ~(MEM_ALIGN - 1))
118 \f
119 /* Data structures of heaps and blocs. */
120
121 /* The relocatable objects, or blocs, and the malloc data
122 both reside within one or more heaps.
123 Each heap contains malloc data, running from `start' to `bloc_start',
124 and relocatable objects, running from `bloc_start' to `free'.
125
126 Relocatable objects may relocate within the same heap
127 or may move into another heap; the heaps themselves may grow
128 but they never move.
129
130 We try to make just one heap and make it larger as necessary.
131 But sometimes we can't do that, because we can't get continguous
132 space to add onto the heap. When that happens, we start a new heap. */
133
134 typedef struct heap
135 {
136 struct heap *next;
137 struct heap *prev;
138 /* Start of memory range of this heap. */
139 POINTER start;
140 /* End of memory range of this heap. */
141 POINTER end;
142 /* Start of relocatable data in this heap. */
143 POINTER bloc_start;
144 /* Start of unused space in this heap. */
145 POINTER free;
146 /* First bloc in this heap. */
147 struct bp *first_bloc;
148 /* Last bloc in this heap. */
149 struct bp *last_bloc;
150 } *heap_ptr;
151
152 #define NIL_HEAP ((heap_ptr) 0)
153 #define HEAP_PTR_SIZE (sizeof (struct heap))
154
155 /* This is the first heap object.
156 If we need additional heap objects, each one resides at the beginning of
157 the space it covers. */
158 static struct heap heap_base;
159
160 /* Head and tail of the list of heaps. */
161 static heap_ptr first_heap, last_heap;
162
163 /* These structures are allocated in the malloc arena.
164 The linked list is kept in order of increasing '.data' members.
165 The data blocks abut each other; if b->next is non-nil, then
166 b->data + b->size == b->next->data. */
167 typedef struct bp
168 {
169 struct bp *next;
170 struct bp *prev;
171 POINTER *variable;
172 POINTER data;
173 SIZE size;
174 POINTER new_data; /* tmporarily used for relocation */
175 /* Heap this bloc is in. */
176 struct heap *heap;
177 } *bloc_ptr;
178
179 #define NIL_BLOC ((bloc_ptr) 0)
180 #define BLOC_PTR_SIZE (sizeof (struct bp))
181
182 /* Head and tail of the list of relocatable blocs. */
183 static bloc_ptr first_bloc, last_bloc;
184
185 \f
186 /* Functions to get and return memory from the system. */
187
188 /* Find the heap that ADDRESS falls within. */
189
190 static heap_ptr
191 find_heap (address)
192 POINTER address;
193 {
194 heap_ptr heap;
195
196 for (heap = last_heap; heap; heap = heap->prev)
197 {
198 if (heap->start <= address && address <= heap->end)
199 return heap;
200 }
201
202 return NIL_HEAP;
203 }
204
205 /* Find SIZE bytes of space in a heap.
206 Try to get them at ADDRESS (which must fall within some heap's range)
207 if we can get that many within one heap.
208
209 If enough space is not presently available in our reserve, this means
210 getting more page-aligned space from the system. If the retuned space
211 is not contiguos to the last heap, allocate a new heap, and append it
212
213 obtain does not try to keep track of whether space is in use
214 or not in use. It just returns the address of SIZE bytes that
215 fall within a single heap. If you call obtain twice in a row
216 with the same arguments, you typically get the same value.
217 to the heap list. It's the caller's responsibility to keep
218 track of what space is in use.
219
220 Return the address of the space if all went well, or zero if we couldn't
221 allocate the memory. */
222
223 static POINTER
224 obtain (address, size)
225 POINTER address;
226 SIZE size;
227 {
228 heap_ptr heap;
229 SIZE already_available;
230
231 /* Find the heap that ADDRESS falls within. */
232 for (heap = last_heap; heap; heap = heap->prev)
233 {
234 if (heap->start <= address && address <= heap->end)
235 break;
236 }
237
238 if (! heap)
239 abort ();
240
241 /* If we can't fit SIZE bytes in that heap,
242 try successive later heaps. */
243 while (heap && address + size > heap->end)
244 {
245 heap = heap->next;
246 if (heap == NIL_HEAP)
247 break;
248 address = heap->bloc_start;
249 }
250
251 /* If we can't fit them within any existing heap,
252 get more space. */
253 if (heap == NIL_HEAP)
254 {
255 POINTER new = (*real_morecore)(0);
256 SIZE get;
257
258 already_available = (char *)last_heap->end - (char *)address;
259
260 if (new != last_heap->end)
261 {
262 /* Someone else called sbrk. Make a new heap. */
263
264 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
265 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
266
267 if ((*real_morecore) (bloc_start - new) != new)
268 return 0;
269
270 new_heap->start = new;
271 new_heap->end = bloc_start;
272 new_heap->bloc_start = bloc_start;
273 new_heap->free = bloc_start;
274 new_heap->next = NIL_HEAP;
275 new_heap->prev = last_heap;
276 new_heap->first_bloc = NIL_BLOC;
277 new_heap->last_bloc = NIL_BLOC;
278 last_heap->next = new_heap;
279 last_heap = new_heap;
280
281 address = bloc_start;
282 already_available = 0;
283 }
284
285 /* Add space to the last heap (which we may have just created).
286 Get some extra, so we can come here less often. */
287
288 get = size + extra_bytes - already_available;
289 get = (char *) ROUNDUP ((char *)last_heap->end + get)
290 - (char *) last_heap->end;
291
292 if ((*real_morecore) (get) != last_heap->end)
293 return 0;
294
295 last_heap->end += get;
296 }
297
298 return address;
299 }
300
301 /* Return unused heap space to the system
302 if there is a lot of unused space now.
303 This can make the last heap smaller;
304 it can also eliminate the last heap entirely. */
305
306 static void
307 relinquish ()
308 {
309 register heap_ptr h;
310 int excess = 0;
311
312 /* Add the amount of space beyond break_value
313 in all heaps which have extend beyond break_value at all. */
314
315 for (h = last_heap; h && break_value < h->end; h = h->prev)
316 {
317 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
318 ? h->bloc_start : break_value);
319 }
320
321 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
322 {
323 /* Keep extra_bytes worth of empty space.
324 And don't free anything unless we can free at least extra_bytes. */
325 excess -= extra_bytes;
326
327 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
328 {
329 /* This heap should have no blocs in it. */
330 if (last_heap->first_bloc != NIL_BLOC
331 || last_heap->last_bloc != NIL_BLOC)
332 abort ();
333
334 /* Return the last heap, with its header, to the system. */
335 excess = (char *)last_heap->end - (char *)last_heap->start;
336 last_heap = last_heap->prev;
337 last_heap->next = NIL_HEAP;
338 }
339 else
340 {
341 excess = (char *) last_heap->end
342 - (char *) ROUNDUP ((char *)last_heap->end - excess);
343 last_heap->end -= excess;
344 }
345
346 if ((*real_morecore) (- excess) == 0)
347 abort ();
348 }
349 }
350 \f
351 /* The meat - allocating, freeing, and relocating blocs. */
352
353 /* Find the bloc referenced by the address in PTR. Returns a pointer
354 to that block. */
355
356 static bloc_ptr
357 find_bloc (ptr)
358 POINTER *ptr;
359 {
360 register bloc_ptr p = first_bloc;
361
362 while (p != NIL_BLOC)
363 {
364 if (p->variable == ptr && p->data == *ptr)
365 return p;
366
367 p = p->next;
368 }
369
370 return p;
371 }
372
373 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
374 Returns a pointer to the new bloc, or zero if we couldn't allocate
375 memory for the new block. */
376
377 static bloc_ptr
378 get_bloc (size)
379 SIZE size;
380 {
381 register bloc_ptr new_bloc;
382 register heap_ptr heap;
383
384 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
385 || ! (new_bloc->data = obtain (break_value, size)))
386 {
387 if (new_bloc)
388 free (new_bloc);
389
390 return 0;
391 }
392
393 break_value = new_bloc->data + size;
394
395 new_bloc->size = size;
396 new_bloc->next = NIL_BLOC;
397 new_bloc->variable = (POINTER *) NIL;
398 new_bloc->new_data = 0;
399
400 /* Record in the heap that this space is in use. */
401 heap = find_heap (new_bloc->data);
402 heap->free = break_value;
403
404 /* Maintain the correspondence between heaps and blocs. */
405 new_bloc->heap = heap;
406 heap->last_bloc = new_bloc;
407 if (heap->first_bloc == NIL_BLOC)
408 heap->first_bloc = new_bloc;
409
410 /* Put this bloc on the doubly-linked list of blocs. */
411 if (first_bloc)
412 {
413 new_bloc->prev = last_bloc;
414 last_bloc->next = new_bloc;
415 last_bloc = new_bloc;
416 }
417 else
418 {
419 first_bloc = last_bloc = new_bloc;
420 new_bloc->prev = NIL_BLOC;
421 }
422
423 return new_bloc;
424 }
425 \f
426 /* Calculate new locations of blocs in the list beginning with BLOC,
427 relocating it to start at ADDRESS, in heap HEAP. If enough space is
428 not presently available in our reserve, call obtain for
429 more space.
430
431 Store the new location of each bloc in its new_data field.
432 Do not touch the contents of blocs or break_value. */
433
434 static int
435 relocate_blocs (bloc, heap, address)
436 bloc_ptr bloc;
437 heap_ptr heap;
438 POINTER address;
439 {
440 register bloc_ptr b = bloc;
441
442 while (b)
443 {
444 /* If bloc B won't fit within HEAP,
445 move to the next heap and try again. */
446 while (heap && address + b->size > heap->end)
447 {
448 heap = heap->next;
449 if (heap == NIL_HEAP)
450 break;
451 address = heap->bloc_start;
452 }
453
454 /* If BLOC won't fit in any heap,
455 get enough new space to hold BLOC and all following blocs. */
456 if (heap == NIL_HEAP)
457 {
458 register bloc_ptr tb = b;
459 register SIZE s = 0;
460
461 /* Add up the size of all the following blocs. */
462 while (tb != NIL_BLOC)
463 {
464 s += tb->size;
465 tb = tb->next;
466 }
467
468 /* Get that space. */
469 address = obtain (address, s);
470 if (address == 0)
471 return 0;
472
473 heap = last_heap;
474 }
475
476 /* Record the new address of this bloc
477 and update where the next bloc can start. */
478 b->new_data = address;
479 address += b->size;
480 b = b->next;
481 }
482
483 return 1;
484 }
485
486 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
487 This is necessary if we put the memory of space of BLOC
488 before that of BEFORE. */
489
490 static void
491 reorder_bloc (bloc, before)
492 bloc_ptr bloc, before;
493 {
494 bloc_ptr prev, next;
495
496 /* Splice BLOC out from where it is. */
497 prev = bloc->prev;
498 next = bloc->next;
499
500 if (prev)
501 prev->next = next;
502 if (next)
503 next->prev = prev;
504
505 /* Splice it in before BEFORE. */
506 prev = before->prev;
507
508 if (prev)
509 prev->next = bloc;
510 bloc->prev = prev;
511
512 before->prev = bloc;
513 bloc->next = before;
514 }
515 \f
516 /* Update the records of which heaps contain which blocs, starting
517 with heap HEAP and bloc BLOC. */
518
519 static void
520 update_heap_bloc_correspondence (bloc, heap)
521 bloc_ptr bloc;
522 heap_ptr heap;
523 {
524 register bloc_ptr b;
525
526 /* Initialize HEAP's status to reflect blocs before BLOC. */
527 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
528 {
529 /* The previous bloc is in HEAP. */
530 heap->last_bloc = bloc->prev;
531 heap->free = bloc->prev->data + bloc->prev->size;
532 }
533 else
534 {
535 /* HEAP contains no blocs before BLOC. */
536 heap->first_bloc = NIL_BLOC;
537 heap->last_bloc = NIL_BLOC;
538 heap->free = heap->bloc_start;
539 }
540
541 /* Advance through blocs one by one. */
542 for (b = bloc; b != NIL_BLOC; b = b->next)
543 {
544 /* Advance through heaps, marking them empty,
545 till we get to the one that B is in. */
546 while (heap)
547 {
548 if (heap->bloc_start <= b->data && b->data <= heap->end)
549 break;
550 heap = heap->next;
551 /* We know HEAP is not null now,
552 because there has to be space for bloc B. */
553 heap->first_bloc = NIL_BLOC;
554 heap->last_bloc = NIL_BLOC;
555 heap->free = heap->bloc_start;
556 }
557
558 /* Update HEAP's status for bloc B. */
559 heap->free = b->data + b->size;
560 heap->last_bloc = b;
561 if (heap->first_bloc == NIL_BLOC)
562 heap->first_bloc = b;
563
564 /* Record that B is in HEAP. */
565 b->heap = heap;
566 }
567
568 /* If there are any remaining heaps and no blocs left,
569 mark those heaps as empty. */
570 heap = heap->next;
571 while (heap)
572 {
573 heap->first_bloc = NIL_BLOC;
574 heap->last_bloc = NIL_BLOC;
575 heap->free = heap->bloc_start;
576 heap = heap->next;
577 }
578 }
579 \f
580 /* Resize BLOC to SIZE bytes. This relocates the blocs
581 that come after BLOC in memory. */
582
583 static int
584 resize_bloc (bloc, size)
585 bloc_ptr bloc;
586 SIZE size;
587 {
588 register bloc_ptr b;
589 heap_ptr heap;
590 POINTER address;
591 SIZE old_size;
592
593 if (bloc == NIL_BLOC || size == bloc->size)
594 return 1;
595
596 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
597 {
598 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
599 break;
600 }
601
602 if (heap == NIL_HEAP)
603 abort ();
604
605 old_size = bloc->size;
606 bloc->size = size;
607
608 /* Note that bloc could be moved into the previous heap. */
609 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
610 : first_heap->bloc_start);
611 while (heap)
612 {
613 if (heap->bloc_start <= address && address <= heap->end)
614 break;
615 heap = heap->prev;
616 }
617
618 if (! relocate_blocs (bloc, heap, address))
619 {
620 bloc->size = old_size;
621 return 0;
622 }
623
624 if (size > old_size)
625 {
626 for (b = last_bloc; b != bloc; b = b->prev)
627 {
628 safe_bcopy (b->data, b->new_data, b->size);
629 *b->variable = b->data = b->new_data;
630 }
631 safe_bcopy (bloc->data, bloc->new_data, old_size);
632 bzero (bloc->new_data + old_size, size - old_size);
633 *bloc->variable = bloc->data = bloc->new_data;
634 }
635 else
636 {
637 for (b = bloc; b != NIL_BLOC; b = b->next)
638 {
639 safe_bcopy (b->data, b->new_data, b->size);
640 *b->variable = b->data = b->new_data;
641 }
642 }
643
644 update_heap_bloc_correspondence (bloc, heap);
645
646 break_value = (last_bloc ? last_bloc->data + last_bloc->size
647 : first_heap->bloc_start);
648 return 1;
649 }
650 \f
651 /* Free BLOC from the chain of blocs, relocating any blocs above it.
652 This may return space to the system. */
653
654 static void
655 free_bloc (bloc)
656 bloc_ptr bloc;
657 {
658 heap_ptr heap = bloc->heap;
659
660 resize_bloc (bloc, 0);
661
662 if (bloc == first_bloc && bloc == last_bloc)
663 {
664 first_bloc = last_bloc = NIL_BLOC;
665 }
666 else if (bloc == last_bloc)
667 {
668 last_bloc = bloc->prev;
669 last_bloc->next = NIL_BLOC;
670 }
671 else if (bloc == first_bloc)
672 {
673 first_bloc = bloc->next;
674 first_bloc->prev = NIL_BLOC;
675 }
676 else
677 {
678 bloc->next->prev = bloc->prev;
679 bloc->prev->next = bloc->next;
680 }
681
682 /* Update the records of which blocs are in HEAP. */
683 if (heap->first_bloc == bloc)
684 {
685 if (bloc->next->heap == heap)
686 heap->first_bloc = bloc->next;
687 else
688 heap->first_bloc = heap->last_bloc = NIL_BLOC;
689 }
690 if (heap->last_bloc == bloc)
691 {
692 if (bloc->prev->heap == heap)
693 heap->last_bloc = bloc->prev;
694 else
695 heap->first_bloc = heap->last_bloc = NIL_BLOC;
696 }
697
698 relinquish ();
699 free (bloc);
700 }
701 \f
702 /* Interface routines. */
703
704 static int use_relocatable_buffers;
705 static int r_alloc_freeze_level;
706
707 /* Obtain SIZE bytes of storage from the free pool, or the system, as
708 necessary. If relocatable blocs are in use, this means relocating
709 them. This function gets plugged into the GNU malloc's __morecore
710 hook.
711
712 We provide hysteresis, never relocating by less than extra_bytes.
713
714 If we're out of memory, we should return zero, to imitate the other
715 __morecore hook values - in particular, __default_morecore in the
716 GNU malloc package. */
717
718 POINTER
719 r_alloc_sbrk (size)
720 long size;
721 {
722 register bloc_ptr b;
723 POINTER address;
724
725 if (! use_relocatable_buffers)
726 return (*real_morecore) (size);
727
728 if (size == 0)
729 return virtual_break_value;
730
731 if (size > 0)
732 {
733 /* Allocate a page-aligned space. GNU malloc would reclaim an
734 extra space if we passed an unaligned one. But we could
735 not always find a space which is contiguos to the previous. */
736 POINTER new_bloc_start;
737 heap_ptr h = first_heap;
738 SIZE get = ROUNDUP (size);
739
740 address = (POINTER) ROUNDUP (virtual_break_value);
741
742 /* Search the list upward for a heap which is large enough. */
743 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
744 {
745 h = h->next;
746 if (h == NIL_HEAP)
747 break;
748 address = (POINTER) ROUNDUP (h->start);
749 }
750
751 /* If not found, obtain more space. */
752 if (h == NIL_HEAP)
753 {
754 get += extra_bytes + page_size;
755
756 if (r_alloc_freeze_level > 0 || ! obtain (address, get))
757 return 0;
758
759 if (first_heap == last_heap)
760 address = (POINTER) ROUNDUP (virtual_break_value);
761 else
762 address = (POINTER) ROUNDUP (last_heap->start);
763 h = last_heap;
764 }
765
766 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
767
768 if (first_heap->bloc_start < new_bloc_start)
769 {
770 /* Move all blocs upward. */
771 if (r_alloc_freeze_level > 0
772 || ! relocate_blocs (first_bloc, h, new_bloc_start))
773 return 0;
774
775 /* Note that (POINTER)(h+1) <= new_bloc_start since
776 get >= page_size, so the following does not destroy the heap
777 header. */
778 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
779 {
780 safe_bcopy (b->data, b->new_data, b->size);
781 *b->variable = b->data = b->new_data;
782 }
783
784 h->bloc_start = new_bloc_start;
785
786 update_heap_bloc_correspondence (first_bloc, h);
787 }
788
789 if (h != first_heap)
790 {
791 /* Give up managing heaps below the one the new
792 virtual_break_value points to. */
793 first_heap->prev = NIL_HEAP;
794 first_heap->next = h->next;
795 first_heap->start = h->start;
796 first_heap->end = h->end;
797 first_heap->free = h->free;
798 first_heap->first_bloc = h->first_bloc;
799 first_heap->last_bloc = h->last_bloc;
800 first_heap->bloc_start = h->bloc_start;
801
802 if (first_heap->next)
803 first_heap->next->prev = first_heap;
804 else
805 last_heap = first_heap;
806 }
807
808 bzero (address, size);
809 }
810 else /* size < 0 */
811 {
812 SIZE excess = (char *)first_heap->bloc_start
813 - ((char *)virtual_break_value + size);
814
815 address = virtual_break_value;
816
817 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
818 {
819 excess -= extra_bytes;
820 first_heap->bloc_start
821 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
822
823 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
824
825 for (b = first_bloc; b != NIL_BLOC; b = b->next)
826 {
827 safe_bcopy (b->data, b->new_data, b->size);
828 *b->variable = b->data = b->new_data;
829 }
830 }
831
832 if ((char *)virtual_break_value + size < (char *)first_heap->start)
833 {
834 /* We found an additional space below the first heap */
835 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
836 }
837 }
838
839 virtual_break_value = (POINTER) ((char *)address + size);
840 break_value = (last_bloc
841 ? last_bloc->data + last_bloc->size
842 : first_heap->bloc_start);
843 if (size < 0)
844 relinquish ();
845
846 return address;
847 }
848
849 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
850 the data is returned in *PTR. PTR is thus the address of some variable
851 which will use the data area.
852
853 If we can't allocate the necessary memory, set *PTR to zero, and
854 return zero. */
855
856 POINTER
857 r_alloc (ptr, size)
858 POINTER *ptr;
859 SIZE size;
860 {
861 register bloc_ptr new_bloc;
862
863 if (! r_alloc_initialized)
864 r_alloc_init ();
865
866 new_bloc = get_bloc (MEM_ROUNDUP (size));
867 if (new_bloc)
868 {
869 new_bloc->variable = ptr;
870 *ptr = new_bloc->data;
871 }
872 else
873 *ptr = 0;
874
875 return *ptr;
876 }
877
878 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
879 Store 0 in *PTR to show there's no block allocated. */
880
881 void
882 r_alloc_free (ptr)
883 register POINTER *ptr;
884 {
885 register bloc_ptr dead_bloc;
886
887 dead_bloc = find_bloc (ptr);
888 if (dead_bloc == NIL_BLOC)
889 abort ();
890
891 free_bloc (dead_bloc);
892 *ptr = 0;
893 }
894
895 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
896 Do this by shifting all blocks above this one up in memory, unless
897 SIZE is less than or equal to the current bloc size, in which case
898 do nothing.
899
900 Change *PTR to reflect the new bloc, and return this value.
901
902 If more memory cannot be allocated, then leave *PTR unchanged, and
903 return zero. */
904
905 POINTER
906 r_re_alloc (ptr, size)
907 POINTER *ptr;
908 SIZE size;
909 {
910 register bloc_ptr bloc;
911
912 bloc = find_bloc (ptr);
913 if (bloc == NIL_BLOC)
914 abort ();
915
916 if (size <= bloc->size)
917 /* Wouldn't it be useful to actually resize the bloc here? */
918 return *ptr;
919
920 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
921 return 0;
922
923 return *ptr;
924 }
925
926 /* Disable relocations, after making room for at least SIZE bytes
927 of non-relocatable heap if possible. The relocatable blocs are
928 guaranteed to hold still until thawed, even if this means that
929 malloc must return a null pointer. */
930
931 void
932 r_alloc_freeze (size)
933 long size;
934 {
935 /* If already frozen, we can't make any more room, so don't try. */
936 if (r_alloc_freeze_level > 0)
937 size = 0;
938 /* If we can't get the amount requested, half is better than nothing. */
939 while (size > 0 && r_alloc_sbrk (size) == 0)
940 size /= 2;
941 ++r_alloc_freeze_level;
942 if (size > 0)
943 r_alloc_sbrk (-size);
944 }
945
946 void
947 r_alloc_thaw ()
948 {
949 if (--r_alloc_freeze_level < 0)
950 abort ();
951 }
952 \f
953 /* The hook `malloc' uses for the function which gets more space
954 from the system. */
955 extern POINTER (*__morecore) ();
956
957 /* Initialize various things for memory allocation. */
958
959 static void
960 r_alloc_init ()
961 {
962 if (r_alloc_initialized)
963 return;
964
965 r_alloc_initialized = 1;
966 real_morecore = __morecore;
967 __morecore = r_alloc_sbrk;
968
969 first_heap = last_heap = &heap_base;
970 first_heap->next = first_heap->prev = NIL_HEAP;
971 first_heap->start = first_heap->bloc_start
972 = virtual_break_value = break_value = (*real_morecore) (0);
973 if (break_value == NIL)
974 abort ();
975
976 page_size = PAGE;
977 extra_bytes = ROUNDUP (50000);
978
979 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
980
981 /* The extra call to real_morecore guarantees that the end of the
982 address space is a multiple of page_size, even if page_size is
983 not really the page size of the system running the binary in
984 which page_size is stored. This allows a binary to be built on a
985 system with one page size and run on a system with a smaller page
986 size. */
987 (*real_morecore) (first_heap->end - first_heap->start);
988
989 /* Clear the rest of the last page; this memory is in our address space
990 even though it is after the sbrk value. */
991 /* Doubly true, with the additional call that explicitly adds the
992 rest of that page to the address space. */
993 bzero (first_heap->start, first_heap->end - first_heap->start);
994 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
995 use_relocatable_buffers = 1;
996 }
997 #ifdef DEBUG
998 #include <assert.h>
999
1000 int
1001 r_alloc_check ()
1002 {
1003 int found = 0;
1004 heap_ptr h, ph = 0;
1005 bloc_ptr b, pb = 0;
1006
1007 if (!r_alloc_initialized)
1008 return;
1009
1010 assert (first_heap);
1011 assert (last_heap->end <= (POINTER) sbrk (0));
1012 assert ((POINTER) first_heap < first_heap->start);
1013 assert (first_heap->start <= virtual_break_value);
1014 assert (virtual_break_value <= first_heap->end);
1015
1016 for (h = first_heap; h; h = h->next)
1017 {
1018 assert (h->prev == ph);
1019 assert ((POINTER) ROUNDUP (h->end) == h->end);
1020 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1021 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1022 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1023
1024 if (ph)
1025 {
1026 assert (ph->end < h->start);
1027 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1028 }
1029
1030 if (h->bloc_start <= break_value && break_value <= h->end)
1031 found = 1;
1032
1033 ph = h;
1034 }
1035
1036 assert (found);
1037 assert (last_heap == ph);
1038
1039 for (b = first_bloc; b; b = b->next)
1040 {
1041 assert (b->prev == pb);
1042 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1043 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1044
1045 ph = 0;
1046 for (h = first_heap; h; h = h->next)
1047 {
1048 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1049 break;
1050 ph = h;
1051 }
1052
1053 assert (h);
1054
1055 if (pb && pb->data + pb->size != b->data)
1056 {
1057 assert (ph && b->data == h->bloc_start);
1058 while (ph)
1059 {
1060 if (ph->bloc_start <= pb->data
1061 && pb->data + pb->size <= ph->end)
1062 {
1063 assert (pb->data + pb->size + b->size > ph->end);
1064 break;
1065 }
1066 else
1067 {
1068 assert (ph->bloc_start + b->size > ph->end);
1069 }
1070 ph = ph->prev;
1071 }
1072 }
1073 pb = b;
1074 }
1075
1076 assert (last_bloc == pb);
1077
1078 if (last_bloc)
1079 assert (last_bloc->data + last_bloc->size == break_value);
1080 else
1081 assert (first_heap->bloc_start == break_value);
1082 }
1083 #endif /* DEBUG */