(Fuser_full_name): Accept a string (the login name) as
[bpt/emacs.git] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21 /* NOTES:
22
23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
24 rather than all of them. This means allowing for a possible
25 hole between the first bloc and the end of malloc storage. */
26
27 #ifdef emacs
28
29 #include <config.h>
30 #include "lisp.h" /* Needed for VALBITS. */
31
32 #undef NULL
33
34 /* The important properties of this type are that 1) it's a pointer, and
35 2) arithmetic on it should work as if the size of the object pointed
36 to has a size of 1. */
37 #if 0 /* Arithmetic on void* is a GCC extension. */
38 #ifdef __STDC__
39 typedef void *POINTER;
40 #else
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 typedef char *POINTER;
47
48 #endif
49 #endif /* 0 */
50
51 /* Unconditionally use char * for this. */
52 typedef char *POINTER;
53
54 typedef unsigned long SIZE;
55
56 /* Declared in dispnew.c, this version doesn't screw up if regions
57 overlap. */
58 extern void safe_bcopy ();
59
60 extern int __malloc_extra_blocks;
61
62 #else /* not emacs */
63
64 #include <stddef.h>
65
66 typedef size_t SIZE;
67 typedef void *POINTER;
68
69 #include <unistd.h>
70 #include <malloc.h>
71 #include <string.h>
72
73 #define safe_bcopy(x, y, z) memmove (y, x, z)
74 #define bzero(x, len) memset (x, 0, len)
75
76 #endif /* not emacs */
77
78 #include "getpagesize.h"
79
80 #define NIL ((POINTER) 0)
81
82 /* A flag to indicate whether we have initialized ralloc yet. For
83 Emacs's sake, please do not make this local to malloc_init; on some
84 machines, the dumping procedure makes all static variables
85 read-only. On these machines, the word static is #defined to be
86 the empty string, meaning that r_alloc_initialized becomes an
87 automatic variable, and loses its value each time Emacs is started up. */
88 static int r_alloc_initialized = 0;
89
90 static void r_alloc_init ();
91 \f
92 /* Declarations for working with the malloc, ralloc, and system breaks. */
93
94 /* Function to set the real break value. */
95 static POINTER (*real_morecore) ();
96
97 /* The break value, as seen by malloc. */
98 static POINTER virtual_break_value;
99
100 /* The address of the end of the last data in use by ralloc,
101 including relocatable blocs as well as malloc data. */
102 static POINTER break_value;
103
104 /* This is the size of a page. We round memory requests to this boundary. */
105 static int page_size;
106
107 /* Whenever we get memory from the system, get this many extra bytes. This
108 must be a multiple of page_size. */
109 static int extra_bytes;
110
111 /* Macros for rounding. Note that rounding to any value is possible
112 by changing the definition of PAGE. */
113 #define PAGE (getpagesize ())
114 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
115 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
116 & ~(page_size - 1))
117 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
118
119 #define MEM_ALIGN sizeof(double)
120 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
121 & ~(MEM_ALIGN - 1))
122 \f
123 /* Data structures of heaps and blocs. */
124
125 /* The relocatable objects, or blocs, and the malloc data
126 both reside within one or more heaps.
127 Each heap contains malloc data, running from `start' to `bloc_start',
128 and relocatable objects, running from `bloc_start' to `free'.
129
130 Relocatable objects may relocate within the same heap
131 or may move into another heap; the heaps themselves may grow
132 but they never move.
133
134 We try to make just one heap and make it larger as necessary.
135 But sometimes we can't do that, because we can't get contiguous
136 space to add onto the heap. When that happens, we start a new heap. */
137
138 typedef struct heap
139 {
140 struct heap *next;
141 struct heap *prev;
142 /* Start of memory range of this heap. */
143 POINTER start;
144 /* End of memory range of this heap. */
145 POINTER end;
146 /* Start of relocatable data in this heap. */
147 POINTER bloc_start;
148 /* Start of unused space in this heap. */
149 POINTER free;
150 /* First bloc in this heap. */
151 struct bp *first_bloc;
152 /* Last bloc in this heap. */
153 struct bp *last_bloc;
154 } *heap_ptr;
155
156 #define NIL_HEAP ((heap_ptr) 0)
157 #define HEAP_PTR_SIZE (sizeof (struct heap))
158
159 /* This is the first heap object.
160 If we need additional heap objects, each one resides at the beginning of
161 the space it covers. */
162 static struct heap heap_base;
163
164 /* Head and tail of the list of heaps. */
165 static heap_ptr first_heap, last_heap;
166
167 /* These structures are allocated in the malloc arena.
168 The linked list is kept in order of increasing '.data' members.
169 The data blocks abut each other; if b->next is non-nil, then
170 b->data + b->size == b->next->data.
171
172 An element with variable==NIL denotes a freed block, which has not yet
173 been collected. They may only appear while r_alloc_freeze > 0, and will be
174 freed when the arena is thawed. Currently, these blocs are not reusable,
175 while the arena is frozen. Very inefficient. */
176
177 typedef struct bp
178 {
179 struct bp *next;
180 struct bp *prev;
181 POINTER *variable;
182 POINTER data;
183 SIZE size;
184 POINTER new_data; /* temporarily used for relocation */
185 struct heap *heap; /* Heap this bloc is in. */
186 } *bloc_ptr;
187
188 #define NIL_BLOC ((bloc_ptr) 0)
189 #define BLOC_PTR_SIZE (sizeof (struct bp))
190
191 /* Head and tail of the list of relocatable blocs. */
192 static bloc_ptr first_bloc, last_bloc;
193
194 static int use_relocatable_buffers;
195
196 /* If >0, no relocation whatsoever takes place. */
197 static int r_alloc_freeze_level;
198
199 \f
200 /* Functions to get and return memory from the system. */
201
202 /* Find the heap that ADDRESS falls within. */
203
204 static heap_ptr
205 find_heap (address)
206 POINTER address;
207 {
208 heap_ptr heap;
209
210 for (heap = last_heap; heap; heap = heap->prev)
211 {
212 if (heap->start <= address && address <= heap->end)
213 return heap;
214 }
215
216 return NIL_HEAP;
217 }
218
219 /* Find SIZE bytes of space in a heap.
220 Try to get them at ADDRESS (which must fall within some heap's range)
221 if we can get that many within one heap.
222
223 If enough space is not presently available in our reserve, this means
224 getting more page-aligned space from the system. If the returned space
225 is not contiguous to the last heap, allocate a new heap, and append it
226
227 obtain does not try to keep track of whether space is in use
228 or not in use. It just returns the address of SIZE bytes that
229 fall within a single heap. If you call obtain twice in a row
230 with the same arguments, you typically get the same value.
231 to the heap list. It's the caller's responsibility to keep
232 track of what space is in use.
233
234 Return the address of the space if all went well, or zero if we couldn't
235 allocate the memory. */
236
237 static POINTER
238 obtain (address, size)
239 POINTER address;
240 SIZE size;
241 {
242 heap_ptr heap;
243 SIZE already_available;
244
245 /* Find the heap that ADDRESS falls within. */
246 for (heap = last_heap; heap; heap = heap->prev)
247 {
248 if (heap->start <= address && address <= heap->end)
249 break;
250 }
251
252 if (! heap)
253 abort ();
254
255 /* If we can't fit SIZE bytes in that heap,
256 try successive later heaps. */
257 while (heap && address + size > heap->end)
258 {
259 heap = heap->next;
260 if (heap == NIL_HEAP)
261 break;
262 address = heap->bloc_start;
263 }
264
265 /* If we can't fit them within any existing heap,
266 get more space. */
267 if (heap == NIL_HEAP)
268 {
269 POINTER new = (*real_morecore)(0);
270 SIZE get;
271
272 already_available = (char *)last_heap->end - (char *)address;
273
274 if (new != last_heap->end)
275 {
276 /* Someone else called sbrk. Make a new heap. */
277
278 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
279 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
280
281 if ((*real_morecore) (bloc_start - new) != new)
282 return 0;
283
284 new_heap->start = new;
285 new_heap->end = bloc_start;
286 new_heap->bloc_start = bloc_start;
287 new_heap->free = bloc_start;
288 new_heap->next = NIL_HEAP;
289 new_heap->prev = last_heap;
290 new_heap->first_bloc = NIL_BLOC;
291 new_heap->last_bloc = NIL_BLOC;
292 last_heap->next = new_heap;
293 last_heap = new_heap;
294
295 address = bloc_start;
296 already_available = 0;
297 }
298
299 /* Add space to the last heap (which we may have just created).
300 Get some extra, so we can come here less often. */
301
302 get = size + extra_bytes - already_available;
303 get = (char *) ROUNDUP ((char *)last_heap->end + get)
304 - (char *) last_heap->end;
305
306 if ((*real_morecore) (get) != last_heap->end)
307 return 0;
308
309 last_heap->end += get;
310 }
311
312 return address;
313 }
314
315 /* Return unused heap space to the system
316 if there is a lot of unused space now.
317 This can make the last heap smaller;
318 it can also eliminate the last heap entirely. */
319
320 static void
321 relinquish ()
322 {
323 register heap_ptr h;
324 int excess = 0;
325
326 /* Add the amount of space beyond break_value
327 in all heaps which have extend beyond break_value at all. */
328
329 for (h = last_heap; h && break_value < h->end; h = h->prev)
330 {
331 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
332 ? h->bloc_start : break_value);
333 }
334
335 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
336 {
337 /* Keep extra_bytes worth of empty space.
338 And don't free anything unless we can free at least extra_bytes. */
339 excess -= extra_bytes;
340
341 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
342 {
343 /* This heap should have no blocs in it. */
344 if (last_heap->first_bloc != NIL_BLOC
345 || last_heap->last_bloc != NIL_BLOC)
346 abort ();
347
348 /* Return the last heap, with its header, to the system. */
349 excess = (char *)last_heap->end - (char *)last_heap->start;
350 last_heap = last_heap->prev;
351 last_heap->next = NIL_HEAP;
352 }
353 else
354 {
355 excess = (char *) last_heap->end
356 - (char *) ROUNDUP ((char *)last_heap->end - excess);
357 last_heap->end -= excess;
358 }
359
360 if ((*real_morecore) (- excess) == 0)
361 abort ();
362 }
363 }
364
365 /* Return the total size in use by relocating allocator,
366 above where malloc gets space. */
367
368 long
369 r_alloc_size_in_use ()
370 {
371 return break_value - virtual_break_value;
372 }
373 \f
374 /* The meat - allocating, freeing, and relocating blocs. */
375
376 /* Find the bloc referenced by the address in PTR. Returns a pointer
377 to that block. */
378
379 static bloc_ptr
380 find_bloc (ptr)
381 POINTER *ptr;
382 {
383 register bloc_ptr p = first_bloc;
384
385 while (p != NIL_BLOC)
386 {
387 if (p->variable == ptr && p->data == *ptr)
388 return p;
389
390 p = p->next;
391 }
392
393 return p;
394 }
395
396 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
397 Returns a pointer to the new bloc, or zero if we couldn't allocate
398 memory for the new block. */
399
400 static bloc_ptr
401 get_bloc (size)
402 SIZE size;
403 {
404 register bloc_ptr new_bloc;
405 register heap_ptr heap;
406
407 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
408 || ! (new_bloc->data = obtain (break_value, size)))
409 {
410 if (new_bloc)
411 free (new_bloc);
412
413 return 0;
414 }
415
416 break_value = new_bloc->data + size;
417
418 new_bloc->size = size;
419 new_bloc->next = NIL_BLOC;
420 new_bloc->variable = (POINTER *) NIL;
421 new_bloc->new_data = 0;
422
423 /* Record in the heap that this space is in use. */
424 heap = find_heap (new_bloc->data);
425 heap->free = break_value;
426
427 /* Maintain the correspondence between heaps and blocs. */
428 new_bloc->heap = heap;
429 heap->last_bloc = new_bloc;
430 if (heap->first_bloc == NIL_BLOC)
431 heap->first_bloc = new_bloc;
432
433 /* Put this bloc on the doubly-linked list of blocs. */
434 if (first_bloc)
435 {
436 new_bloc->prev = last_bloc;
437 last_bloc->next = new_bloc;
438 last_bloc = new_bloc;
439 }
440 else
441 {
442 first_bloc = last_bloc = new_bloc;
443 new_bloc->prev = NIL_BLOC;
444 }
445
446 return new_bloc;
447 }
448 \f
449 /* Calculate new locations of blocs in the list beginning with BLOC,
450 relocating it to start at ADDRESS, in heap HEAP. If enough space is
451 not presently available in our reserve, call obtain for
452 more space.
453
454 Store the new location of each bloc in its new_data field.
455 Do not touch the contents of blocs or break_value. */
456
457 static int
458 relocate_blocs (bloc, heap, address)
459 bloc_ptr bloc;
460 heap_ptr heap;
461 POINTER address;
462 {
463 register bloc_ptr b = bloc;
464
465 /* No need to ever call this if arena is frozen, bug somewhere! */
466 if (r_alloc_freeze_level)
467 abort();
468
469 while (b)
470 {
471 /* If bloc B won't fit within HEAP,
472 move to the next heap and try again. */
473 while (heap && address + b->size > heap->end)
474 {
475 heap = heap->next;
476 if (heap == NIL_HEAP)
477 break;
478 address = heap->bloc_start;
479 }
480
481 /* If BLOC won't fit in any heap,
482 get enough new space to hold BLOC and all following blocs. */
483 if (heap == NIL_HEAP)
484 {
485 register bloc_ptr tb = b;
486 register SIZE s = 0;
487
488 /* Add up the size of all the following blocs. */
489 while (tb != NIL_BLOC)
490 {
491 if (tb->variable)
492 s += tb->size;
493
494 tb = tb->next;
495 }
496
497 /* Get that space. */
498 address = obtain (address, s);
499 if (address == 0)
500 return 0;
501
502 heap = last_heap;
503 }
504
505 /* Record the new address of this bloc
506 and update where the next bloc can start. */
507 b->new_data = address;
508 if (b->variable)
509 address += b->size;
510 b = b->next;
511 }
512
513 return 1;
514 }
515
516 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
517 This is necessary if we put the memory of space of BLOC
518 before that of BEFORE. */
519
520 static void
521 reorder_bloc (bloc, before)
522 bloc_ptr bloc, before;
523 {
524 bloc_ptr prev, next;
525
526 /* Splice BLOC out from where it is. */
527 prev = bloc->prev;
528 next = bloc->next;
529
530 if (prev)
531 prev->next = next;
532 if (next)
533 next->prev = prev;
534
535 /* Splice it in before BEFORE. */
536 prev = before->prev;
537
538 if (prev)
539 prev->next = bloc;
540 bloc->prev = prev;
541
542 before->prev = bloc;
543 bloc->next = before;
544 }
545 \f
546 /* Update the records of which heaps contain which blocs, starting
547 with heap HEAP and bloc BLOC. */
548
549 static void
550 update_heap_bloc_correspondence (bloc, heap)
551 bloc_ptr bloc;
552 heap_ptr heap;
553 {
554 register bloc_ptr b;
555
556 /* Initialize HEAP's status to reflect blocs before BLOC. */
557 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
558 {
559 /* The previous bloc is in HEAP. */
560 heap->last_bloc = bloc->prev;
561 heap->free = bloc->prev->data + bloc->prev->size;
562 }
563 else
564 {
565 /* HEAP contains no blocs before BLOC. */
566 heap->first_bloc = NIL_BLOC;
567 heap->last_bloc = NIL_BLOC;
568 heap->free = heap->bloc_start;
569 }
570
571 /* Advance through blocs one by one. */
572 for (b = bloc; b != NIL_BLOC; b = b->next)
573 {
574 /* Advance through heaps, marking them empty,
575 till we get to the one that B is in. */
576 while (heap)
577 {
578 if (heap->bloc_start <= b->data && b->data <= heap->end)
579 break;
580 heap = heap->next;
581 /* We know HEAP is not null now,
582 because there has to be space for bloc B. */
583 heap->first_bloc = NIL_BLOC;
584 heap->last_bloc = NIL_BLOC;
585 heap->free = heap->bloc_start;
586 }
587
588 /* Update HEAP's status for bloc B. */
589 heap->free = b->data + b->size;
590 heap->last_bloc = b;
591 if (heap->first_bloc == NIL_BLOC)
592 heap->first_bloc = b;
593
594 /* Record that B is in HEAP. */
595 b->heap = heap;
596 }
597
598 /* If there are any remaining heaps and no blocs left,
599 mark those heaps as empty. */
600 heap = heap->next;
601 while (heap)
602 {
603 heap->first_bloc = NIL_BLOC;
604 heap->last_bloc = NIL_BLOC;
605 heap->free = heap->bloc_start;
606 heap = heap->next;
607 }
608 }
609 \f
610 /* Resize BLOC to SIZE bytes. This relocates the blocs
611 that come after BLOC in memory. */
612
613 static int
614 resize_bloc (bloc, size)
615 bloc_ptr bloc;
616 SIZE size;
617 {
618 register bloc_ptr b;
619 heap_ptr heap;
620 POINTER address;
621 SIZE old_size;
622
623 /* No need to ever call this if arena is frozen, bug somewhere! */
624 if (r_alloc_freeze_level)
625 abort();
626
627 if (bloc == NIL_BLOC || size == bloc->size)
628 return 1;
629
630 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
631 {
632 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
633 break;
634 }
635
636 if (heap == NIL_HEAP)
637 abort ();
638
639 old_size = bloc->size;
640 bloc->size = size;
641
642 /* Note that bloc could be moved into the previous heap. */
643 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
644 : first_heap->bloc_start);
645 while (heap)
646 {
647 if (heap->bloc_start <= address && address <= heap->end)
648 break;
649 heap = heap->prev;
650 }
651
652 if (! relocate_blocs (bloc, heap, address))
653 {
654 bloc->size = old_size;
655 return 0;
656 }
657
658 if (size > old_size)
659 {
660 for (b = last_bloc; b != bloc; b = b->prev)
661 {
662 if (!b->variable)
663 {
664 b->size = 0;
665 b->data = b->new_data;
666 }
667 else
668 {
669 safe_bcopy (b->data, b->new_data, b->size);
670 *b->variable = b->data = b->new_data;
671 }
672 }
673 if (!bloc->variable)
674 {
675 bloc->size = 0;
676 bloc->data = bloc->new_data;
677 }
678 else
679 {
680 safe_bcopy (bloc->data, bloc->new_data, old_size);
681 bzero (bloc->new_data + old_size, size - old_size);
682 *bloc->variable = bloc->data = bloc->new_data;
683 }
684 }
685 else
686 {
687 for (b = bloc; b != NIL_BLOC; b = b->next)
688 {
689 if (!b->variable)
690 {
691 b->size = 0;
692 b->data = b->new_data;
693 }
694 else
695 {
696 safe_bcopy (b->data, b->new_data, b->size);
697 *b->variable = b->data = b->new_data;
698 }
699 }
700 }
701
702 update_heap_bloc_correspondence (bloc, heap);
703
704 break_value = (last_bloc ? last_bloc->data + last_bloc->size
705 : first_heap->bloc_start);
706 return 1;
707 }
708 \f
709 /* Free BLOC from the chain of blocs, relocating any blocs above it.
710 This may return space to the system. */
711
712 static void
713 free_bloc (bloc)
714 bloc_ptr bloc;
715 {
716 heap_ptr heap = bloc->heap;
717
718 if (r_alloc_freeze_level)
719 {
720 bloc->variable = (POINTER *) NIL;
721 return;
722 }
723
724 resize_bloc (bloc, 0);
725
726 if (bloc == first_bloc && bloc == last_bloc)
727 {
728 first_bloc = last_bloc = NIL_BLOC;
729 }
730 else if (bloc == last_bloc)
731 {
732 last_bloc = bloc->prev;
733 last_bloc->next = NIL_BLOC;
734 }
735 else if (bloc == first_bloc)
736 {
737 first_bloc = bloc->next;
738 first_bloc->prev = NIL_BLOC;
739 }
740 else
741 {
742 bloc->next->prev = bloc->prev;
743 bloc->prev->next = bloc->next;
744 }
745
746 /* Update the records of which blocs are in HEAP. */
747 if (heap->first_bloc == bloc)
748 {
749 if (bloc->next != 0 && bloc->next->heap == heap)
750 heap->first_bloc = bloc->next;
751 else
752 heap->first_bloc = heap->last_bloc = NIL_BLOC;
753 }
754 if (heap->last_bloc == bloc)
755 {
756 if (bloc->prev != 0 && bloc->prev->heap == heap)
757 heap->last_bloc = bloc->prev;
758 else
759 heap->first_bloc = heap->last_bloc = NIL_BLOC;
760 }
761
762 relinquish ();
763 free (bloc);
764 }
765 \f
766 /* Interface routines. */
767
768 /* Obtain SIZE bytes of storage from the free pool, or the system, as
769 necessary. If relocatable blocs are in use, this means relocating
770 them. This function gets plugged into the GNU malloc's __morecore
771 hook.
772
773 We provide hysteresis, never relocating by less than extra_bytes.
774
775 If we're out of memory, we should return zero, to imitate the other
776 __morecore hook values - in particular, __default_morecore in the
777 GNU malloc package. */
778
779 POINTER
780 r_alloc_sbrk (size)
781 long size;
782 {
783 register bloc_ptr b;
784 POINTER address;
785
786 if (! r_alloc_initialized)
787 r_alloc_init ();
788
789 if (! use_relocatable_buffers)
790 return (*real_morecore) (size);
791
792 if (size == 0)
793 return virtual_break_value;
794
795 if (size > 0)
796 {
797 /* Allocate a page-aligned space. GNU malloc would reclaim an
798 extra space if we passed an unaligned one. But we could
799 not always find a space which is contiguous to the previous. */
800 POINTER new_bloc_start;
801 heap_ptr h = first_heap;
802 SIZE get = ROUNDUP (size);
803
804 address = (POINTER) ROUNDUP (virtual_break_value);
805
806 /* Search the list upward for a heap which is large enough. */
807 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
808 {
809 h = h->next;
810 if (h == NIL_HEAP)
811 break;
812 address = (POINTER) ROUNDUP (h->start);
813 }
814
815 /* If not found, obtain more space. */
816 if (h == NIL_HEAP)
817 {
818 get += extra_bytes + page_size;
819
820 if (! obtain (address, get))
821 return 0;
822
823 if (first_heap == last_heap)
824 address = (POINTER) ROUNDUP (virtual_break_value);
825 else
826 address = (POINTER) ROUNDUP (last_heap->start);
827 h = last_heap;
828 }
829
830 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
831
832 if (first_heap->bloc_start < new_bloc_start)
833 {
834 /* This is no clean solution - no idea how to do it better. */
835 if (r_alloc_freeze_level)
836 return NIL;
837
838 /* There is a bug here: if the above obtain call succeeded, but the
839 relocate_blocs call below does not succeed, we need to free
840 the memory that we got with obtain. */
841
842 /* Move all blocs upward. */
843 if (! relocate_blocs (first_bloc, h, new_bloc_start))
844 return 0;
845
846 /* Note that (POINTER)(h+1) <= new_bloc_start since
847 get >= page_size, so the following does not destroy the heap
848 header. */
849 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
850 {
851 safe_bcopy (b->data, b->new_data, b->size);
852 *b->variable = b->data = b->new_data;
853 }
854
855 h->bloc_start = new_bloc_start;
856
857 update_heap_bloc_correspondence (first_bloc, h);
858 }
859 if (h != first_heap)
860 {
861 /* Give up managing heaps below the one the new
862 virtual_break_value points to. */
863 first_heap->prev = NIL_HEAP;
864 first_heap->next = h->next;
865 first_heap->start = h->start;
866 first_heap->end = h->end;
867 first_heap->free = h->free;
868 first_heap->first_bloc = h->first_bloc;
869 first_heap->last_bloc = h->last_bloc;
870 first_heap->bloc_start = h->bloc_start;
871
872 if (first_heap->next)
873 first_heap->next->prev = first_heap;
874 else
875 last_heap = first_heap;
876 }
877
878 bzero (address, size);
879 }
880 else /* size < 0 */
881 {
882 SIZE excess = (char *)first_heap->bloc_start
883 - ((char *)virtual_break_value + size);
884
885 address = virtual_break_value;
886
887 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
888 {
889 excess -= extra_bytes;
890 first_heap->bloc_start
891 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
892
893 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
894
895 for (b = first_bloc; b != NIL_BLOC; b = b->next)
896 {
897 safe_bcopy (b->data, b->new_data, b->size);
898 *b->variable = b->data = b->new_data;
899 }
900 }
901
902 if ((char *)virtual_break_value + size < (char *)first_heap->start)
903 {
904 /* We found an additional space below the first heap */
905 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
906 }
907 }
908
909 virtual_break_value = (POINTER) ((char *)address + size);
910 break_value = (last_bloc
911 ? last_bloc->data + last_bloc->size
912 : first_heap->bloc_start);
913 if (size < 0)
914 relinquish ();
915
916 return address;
917 }
918
919 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
920 the data is returned in *PTR. PTR is thus the address of some variable
921 which will use the data area.
922
923 The allocation of 0 bytes is valid.
924 In case r_alloc_freeze is set, a best fit of unused blocs could be done
925 before allocating a new area. Not yet done.
926
927 If we can't allocate the necessary memory, set *PTR to zero, and
928 return zero. */
929
930 POINTER
931 r_alloc (ptr, size)
932 POINTER *ptr;
933 SIZE size;
934 {
935 register bloc_ptr new_bloc;
936
937 if (! r_alloc_initialized)
938 r_alloc_init ();
939
940 new_bloc = get_bloc (MEM_ROUNDUP (size));
941 if (new_bloc)
942 {
943 new_bloc->variable = ptr;
944 *ptr = new_bloc->data;
945 }
946 else
947 *ptr = 0;
948
949 return *ptr;
950 }
951
952 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
953 Store 0 in *PTR to show there's no block allocated. */
954
955 void
956 r_alloc_free (ptr)
957 register POINTER *ptr;
958 {
959 register bloc_ptr dead_bloc;
960
961 if (! r_alloc_initialized)
962 r_alloc_init ();
963
964 dead_bloc = find_bloc (ptr);
965 if (dead_bloc == NIL_BLOC)
966 abort ();
967
968 free_bloc (dead_bloc);
969 *ptr = 0;
970
971 #ifdef emacs
972 refill_memory_reserve ();
973 #endif
974 }
975
976 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
977 Do this by shifting all blocks above this one up in memory, unless
978 SIZE is less than or equal to the current bloc size, in which case
979 do nothing.
980
981 In case r_alloc_freeze is set, a new bloc is allocated, and the
982 memory copied to it. Not very efficient. We could traverse the
983 bloc_list for a best fit of free blocs first.
984
985 Change *PTR to reflect the new bloc, and return this value.
986
987 If more memory cannot be allocated, then leave *PTR unchanged, and
988 return zero. */
989
990 POINTER
991 r_re_alloc (ptr, size)
992 POINTER *ptr;
993 SIZE size;
994 {
995 register bloc_ptr bloc;
996
997 if (! r_alloc_initialized)
998 r_alloc_init ();
999
1000 if (!*ptr)
1001 return r_alloc (ptr, size);
1002 if (!size)
1003 {
1004 r_alloc_free (ptr);
1005 return r_alloc (ptr, 0);
1006 }
1007
1008 bloc = find_bloc (ptr);
1009 if (bloc == NIL_BLOC)
1010 abort ();
1011
1012 if (size < bloc->size)
1013 {
1014 /* Wouldn't it be useful to actually resize the bloc here? */
1015 /* I think so too, but not if it's too expensive... */
1016 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1017 && r_alloc_freeze_level == 0)
1018 {
1019 resize_bloc (bloc, MEM_ROUNDUP (size));
1020 /* Never mind if this fails, just do nothing... */
1021 /* It *should* be infallible! */
1022 }
1023 }
1024 else if (size > bloc->size)
1025 {
1026 if (r_alloc_freeze_level)
1027 {
1028 bloc_ptr new_bloc;
1029 new_bloc = get_bloc (MEM_ROUNDUP (size));
1030 if (new_bloc)
1031 {
1032 new_bloc->variable = ptr;
1033 *ptr = new_bloc->data;
1034 bloc->variable = (POINTER *) NIL;
1035 }
1036 else
1037 return NIL;
1038 }
1039 else
1040 {
1041 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1042 return NIL;
1043 }
1044 }
1045 return *ptr;
1046 }
1047
1048 /* Disable relocations, after making room for at least SIZE bytes
1049 of non-relocatable heap if possible. The relocatable blocs are
1050 guaranteed to hold still until thawed, even if this means that
1051 malloc must return a null pointer. */
1052
1053 void
1054 r_alloc_freeze (size)
1055 long size;
1056 {
1057 if (! r_alloc_initialized)
1058 r_alloc_init ();
1059
1060 /* If already frozen, we can't make any more room, so don't try. */
1061 if (r_alloc_freeze_level > 0)
1062 size = 0;
1063 /* If we can't get the amount requested, half is better than nothing. */
1064 while (size > 0 && r_alloc_sbrk (size) == 0)
1065 size /= 2;
1066 ++r_alloc_freeze_level;
1067 if (size > 0)
1068 r_alloc_sbrk (-size);
1069 }
1070
1071 void
1072 r_alloc_thaw ()
1073 {
1074
1075 if (! r_alloc_initialized)
1076 r_alloc_init ();
1077
1078 if (--r_alloc_freeze_level < 0)
1079 abort ();
1080
1081 /* This frees all unused blocs. It is not too inefficient, as the resize
1082 and bcopy is done only once. Afterwards, all unreferenced blocs are
1083 already shrunk to zero size. */
1084 if (!r_alloc_freeze_level)
1085 {
1086 bloc_ptr *b = &first_bloc;
1087 while (*b)
1088 if (!(*b)->variable)
1089 free_bloc (*b);
1090 else
1091 b = &(*b)->next;
1092 }
1093 }
1094
1095 \f
1096 /* The hook `malloc' uses for the function which gets more space
1097 from the system. */
1098 extern POINTER (*__morecore) ();
1099
1100 /* Initialize various things for memory allocation. */
1101
1102 static void
1103 r_alloc_init ()
1104 {
1105 if (r_alloc_initialized)
1106 return;
1107
1108 r_alloc_initialized = 1;
1109 real_morecore = __morecore;
1110 __morecore = r_alloc_sbrk;
1111
1112 first_heap = last_heap = &heap_base;
1113 first_heap->next = first_heap->prev = NIL_HEAP;
1114 first_heap->start = first_heap->bloc_start
1115 = virtual_break_value = break_value = (*real_morecore) (0);
1116 if (break_value == NIL)
1117 abort ();
1118
1119 page_size = PAGE;
1120 extra_bytes = ROUNDUP (50000);
1121
1122 /* Give GNU malloc's morecore some hysteresis
1123 so that we move all the relocatable blocks much less often. */
1124 __malloc_extra_blocks = 64;
1125
1126 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1127
1128 /* The extra call to real_morecore guarantees that the end of the
1129 address space is a multiple of page_size, even if page_size is
1130 not really the page size of the system running the binary in
1131 which page_size is stored. This allows a binary to be built on a
1132 system with one page size and run on a system with a smaller page
1133 size. */
1134 (*real_morecore) (first_heap->end - first_heap->start);
1135
1136 /* Clear the rest of the last page; this memory is in our address space
1137 even though it is after the sbrk value. */
1138 /* Doubly true, with the additional call that explicitly adds the
1139 rest of that page to the address space. */
1140 bzero (first_heap->start, first_heap->end - first_heap->start);
1141 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1142 use_relocatable_buffers = 1;
1143 }
1144 #ifdef DEBUG
1145 #include <assert.h>
1146
1147 void
1148 r_alloc_check ()
1149 {
1150 int found = 0;
1151 heap_ptr h, ph = 0;
1152 bloc_ptr b, pb = 0;
1153
1154 if (!r_alloc_initialized)
1155 return;
1156
1157 assert (first_heap);
1158 assert (last_heap->end <= (POINTER) sbrk (0));
1159 assert ((POINTER) first_heap < first_heap->start);
1160 assert (first_heap->start <= virtual_break_value);
1161 assert (virtual_break_value <= first_heap->end);
1162
1163 for (h = first_heap; h; h = h->next)
1164 {
1165 assert (h->prev == ph);
1166 assert ((POINTER) ROUNDUP (h->end) == h->end);
1167 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1168 the heap start has any sort of alignment.
1169 Perhaps it should. */
1170 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1171 #endif
1172 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1173 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1174
1175 if (ph)
1176 {
1177 assert (ph->end < h->start);
1178 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1179 }
1180
1181 if (h->bloc_start <= break_value && break_value <= h->end)
1182 found = 1;
1183
1184 ph = h;
1185 }
1186
1187 assert (found);
1188 assert (last_heap == ph);
1189
1190 for (b = first_bloc; b; b = b->next)
1191 {
1192 assert (b->prev == pb);
1193 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1194 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1195
1196 ph = 0;
1197 for (h = first_heap; h; h = h->next)
1198 {
1199 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1200 break;
1201 ph = h;
1202 }
1203
1204 assert (h);
1205
1206 if (pb && pb->data + pb->size != b->data)
1207 {
1208 assert (ph && b->data == h->bloc_start);
1209 while (ph)
1210 {
1211 if (ph->bloc_start <= pb->data
1212 && pb->data + pb->size <= ph->end)
1213 {
1214 assert (pb->data + pb->size + b->size > ph->end);
1215 break;
1216 }
1217 else
1218 {
1219 assert (ph->bloc_start + b->size > ph->end);
1220 }
1221 ph = ph->prev;
1222 }
1223 }
1224 pb = b;
1225 }
1226
1227 assert (last_bloc == pb);
1228
1229 if (last_bloc)
1230 assert (last_bloc->data + last_bloc->size == break_value);
1231 else
1232 assert (first_heap->bloc_start == break_value);
1233 }
1234 #endif /* DEBUG */