(iso-languages): For Portuguese ~c and
[bpt/emacs.git] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21 /* NOTES:
22
23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
24 rather than all of them. This means allowing for a possible
25 hole between the first bloc and the end of malloc storage. */
26
27 #ifdef emacs
28
29 #include <config.h>
30 #include "lisp.h" /* Needed for VALBITS. */
31
32 #undef NULL
33
34 /* The important properties of this type are that 1) it's a pointer, and
35 2) arithmetic on it should work as if the size of the object pointed
36 to has a size of 1. */
37 #if 0 /* Arithmetic on void* is a GCC extension. */
38 #ifdef __STDC__
39 typedef void *POINTER;
40 #else
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 typedef char *POINTER;
47
48 #endif
49 #endif /* 0 */
50
51 /* Unconditionally use char * for this. */
52 typedef char *POINTER;
53
54 typedef unsigned long SIZE;
55
56 /* Declared in dispnew.c, this version doesn't screw up if regions
57 overlap. */
58 extern void safe_bcopy ();
59
60 #ifdef DOUG_LEA_MALLOC
61 #define M_TOP_PAD -2
62 extern int mallopt ();
63 #else
64 extern int __malloc_extra_blocks;
65 #endif
66
67 #else /* not emacs */
68
69 #include <stddef.h>
70
71 typedef size_t SIZE;
72 typedef void *POINTER;
73
74 #include <unistd.h>
75 #include <malloc.h>
76 #include <string.h>
77
78 #define safe_bcopy(x, y, z) memmove (y, x, z)
79 #define bzero(x, len) memset (x, 0, len)
80
81 #endif /* not emacs */
82
83 #include "getpagesize.h"
84
85 #define NIL ((POINTER) 0)
86
87 /* A flag to indicate whether we have initialized ralloc yet. For
88 Emacs's sake, please do not make this local to malloc_init; on some
89 machines, the dumping procedure makes all static variables
90 read-only. On these machines, the word static is #defined to be
91 the empty string, meaning that r_alloc_initialized becomes an
92 automatic variable, and loses its value each time Emacs is started up. */
93 static int r_alloc_initialized = 0;
94
95 static void r_alloc_init ();
96 \f
97 /* Declarations for working with the malloc, ralloc, and system breaks. */
98
99 /* Function to set the real break value. */
100 static POINTER (*real_morecore) ();
101
102 /* The break value, as seen by malloc. */
103 static POINTER virtual_break_value;
104
105 /* The address of the end of the last data in use by ralloc,
106 including relocatable blocs as well as malloc data. */
107 static POINTER break_value;
108
109 /* This is the size of a page. We round memory requests to this boundary. */
110 static int page_size;
111
112 /* Whenever we get memory from the system, get this many extra bytes. This
113 must be a multiple of page_size. */
114 static int extra_bytes;
115
116 /* Macros for rounding. Note that rounding to any value is possible
117 by changing the definition of PAGE. */
118 #define PAGE (getpagesize ())
119 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
120 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
121 & ~(page_size - 1))
122 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
123
124 #define MEM_ALIGN sizeof(double)
125 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
126 & ~(MEM_ALIGN - 1))
127 \f
128 /* Data structures of heaps and blocs. */
129
130 /* The relocatable objects, or blocs, and the malloc data
131 both reside within one or more heaps.
132 Each heap contains malloc data, running from `start' to `bloc_start',
133 and relocatable objects, running from `bloc_start' to `free'.
134
135 Relocatable objects may relocate within the same heap
136 or may move into another heap; the heaps themselves may grow
137 but they never move.
138
139 We try to make just one heap and make it larger as necessary.
140 But sometimes we can't do that, because we can't get contiguous
141 space to add onto the heap. When that happens, we start a new heap. */
142
143 typedef struct heap
144 {
145 struct heap *next;
146 struct heap *prev;
147 /* Start of memory range of this heap. */
148 POINTER start;
149 /* End of memory range of this heap. */
150 POINTER end;
151 /* Start of relocatable data in this heap. */
152 POINTER bloc_start;
153 /* Start of unused space in this heap. */
154 POINTER free;
155 /* First bloc in this heap. */
156 struct bp *first_bloc;
157 /* Last bloc in this heap. */
158 struct bp *last_bloc;
159 } *heap_ptr;
160
161 #define NIL_HEAP ((heap_ptr) 0)
162 #define HEAP_PTR_SIZE (sizeof (struct heap))
163
164 /* This is the first heap object.
165 If we need additional heap objects, each one resides at the beginning of
166 the space it covers. */
167 static struct heap heap_base;
168
169 /* Head and tail of the list of heaps. */
170 static heap_ptr first_heap, last_heap;
171
172 /* These structures are allocated in the malloc arena.
173 The linked list is kept in order of increasing '.data' members.
174 The data blocks abut each other; if b->next is non-nil, then
175 b->data + b->size == b->next->data.
176
177 An element with variable==NIL denotes a freed block, which has not yet
178 been collected. They may only appear while r_alloc_freeze > 0, and will be
179 freed when the arena is thawed. Currently, these blocs are not reusable,
180 while the arena is frozen. Very inefficient. */
181
182 typedef struct bp
183 {
184 struct bp *next;
185 struct bp *prev;
186 POINTER *variable;
187 POINTER data;
188 SIZE size;
189 POINTER new_data; /* temporarily used for relocation */
190 struct heap *heap; /* Heap this bloc is in. */
191 } *bloc_ptr;
192
193 #define NIL_BLOC ((bloc_ptr) 0)
194 #define BLOC_PTR_SIZE (sizeof (struct bp))
195
196 /* Head and tail of the list of relocatable blocs. */
197 static bloc_ptr first_bloc, last_bloc;
198
199 static int use_relocatable_buffers;
200
201 /* If >0, no relocation whatsoever takes place. */
202 static int r_alloc_freeze_level;
203
204 \f
205 /* Functions to get and return memory from the system. */
206
207 /* Find the heap that ADDRESS falls within. */
208
209 static heap_ptr
210 find_heap (address)
211 POINTER address;
212 {
213 heap_ptr heap;
214
215 for (heap = last_heap; heap; heap = heap->prev)
216 {
217 if (heap->start <= address && address <= heap->end)
218 return heap;
219 }
220
221 return NIL_HEAP;
222 }
223
224 /* Find SIZE bytes of space in a heap.
225 Try to get them at ADDRESS (which must fall within some heap's range)
226 if we can get that many within one heap.
227
228 If enough space is not presently available in our reserve, this means
229 getting more page-aligned space from the system. If the returned space
230 is not contiguous to the last heap, allocate a new heap, and append it
231
232 obtain does not try to keep track of whether space is in use
233 or not in use. It just returns the address of SIZE bytes that
234 fall within a single heap. If you call obtain twice in a row
235 with the same arguments, you typically get the same value.
236 to the heap list. It's the caller's responsibility to keep
237 track of what space is in use.
238
239 Return the address of the space if all went well, or zero if we couldn't
240 allocate the memory. */
241
242 static POINTER
243 obtain (address, size)
244 POINTER address;
245 SIZE size;
246 {
247 heap_ptr heap;
248 SIZE already_available;
249
250 /* Find the heap that ADDRESS falls within. */
251 for (heap = last_heap; heap; heap = heap->prev)
252 {
253 if (heap->start <= address && address <= heap->end)
254 break;
255 }
256
257 if (! heap)
258 abort ();
259
260 /* If we can't fit SIZE bytes in that heap,
261 try successive later heaps. */
262 while (heap && address + size > heap->end)
263 {
264 heap = heap->next;
265 if (heap == NIL_HEAP)
266 break;
267 address = heap->bloc_start;
268 }
269
270 /* If we can't fit them within any existing heap,
271 get more space. */
272 if (heap == NIL_HEAP)
273 {
274 POINTER new = (*real_morecore)(0);
275 SIZE get;
276
277 already_available = (char *)last_heap->end - (char *)address;
278
279 if (new != last_heap->end)
280 {
281 /* Someone else called sbrk. Make a new heap. */
282
283 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
284 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
285
286 if ((*real_morecore) (bloc_start - new) != new)
287 return 0;
288
289 new_heap->start = new;
290 new_heap->end = bloc_start;
291 new_heap->bloc_start = bloc_start;
292 new_heap->free = bloc_start;
293 new_heap->next = NIL_HEAP;
294 new_heap->prev = last_heap;
295 new_heap->first_bloc = NIL_BLOC;
296 new_heap->last_bloc = NIL_BLOC;
297 last_heap->next = new_heap;
298 last_heap = new_heap;
299
300 address = bloc_start;
301 already_available = 0;
302 }
303
304 /* Add space to the last heap (which we may have just created).
305 Get some extra, so we can come here less often. */
306
307 get = size + extra_bytes - already_available;
308 get = (char *) ROUNDUP ((char *)last_heap->end + get)
309 - (char *) last_heap->end;
310
311 if ((*real_morecore) (get) != last_heap->end)
312 return 0;
313
314 last_heap->end += get;
315 }
316
317 return address;
318 }
319
320 /* Return unused heap space to the system
321 if there is a lot of unused space now.
322 This can make the last heap smaller;
323 it can also eliminate the last heap entirely. */
324
325 static void
326 relinquish ()
327 {
328 register heap_ptr h;
329 int excess = 0;
330
331 /* Add the amount of space beyond break_value
332 in all heaps which have extend beyond break_value at all. */
333
334 for (h = last_heap; h && break_value < h->end; h = h->prev)
335 {
336 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
337 ? h->bloc_start : break_value);
338 }
339
340 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
341 {
342 /* Keep extra_bytes worth of empty space.
343 And don't free anything unless we can free at least extra_bytes. */
344 excess -= extra_bytes;
345
346 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
347 {
348 /* This heap should have no blocs in it. */
349 if (last_heap->first_bloc != NIL_BLOC
350 || last_heap->last_bloc != NIL_BLOC)
351 abort ();
352
353 /* Return the last heap, with its header, to the system. */
354 excess = (char *)last_heap->end - (char *)last_heap->start;
355 last_heap = last_heap->prev;
356 last_heap->next = NIL_HEAP;
357 }
358 else
359 {
360 excess = (char *) last_heap->end
361 - (char *) ROUNDUP ((char *)last_heap->end - excess);
362 last_heap->end -= excess;
363 }
364
365 if ((*real_morecore) (- excess) == 0)
366 {
367 /* If the system didn't want that much memory back, adjust
368 the end of the last heap to reflect that. This can occur
369 if break_value is still within the original data segment. */
370 last_heap->end += excess;
371 /* Make sure that the result of the adjustment is accurate.
372 It should be, for the else clause above; the other case,
373 which returns the entire last heap to the system, seems
374 unlikely to trigger this mode of failure. */
375 if (last_heap->end != (*real_morecore) (0))
376 abort ();
377 }
378 }
379 }
380
381 /* Return the total size in use by relocating allocator,
382 above where malloc gets space. */
383
384 long
385 r_alloc_size_in_use ()
386 {
387 return break_value - virtual_break_value;
388 }
389 \f
390 /* The meat - allocating, freeing, and relocating blocs. */
391
392 /* Find the bloc referenced by the address in PTR. Returns a pointer
393 to that block. */
394
395 static bloc_ptr
396 find_bloc (ptr)
397 POINTER *ptr;
398 {
399 register bloc_ptr p = first_bloc;
400
401 while (p != NIL_BLOC)
402 {
403 if (p->variable == ptr && p->data == *ptr)
404 return p;
405
406 p = p->next;
407 }
408
409 return p;
410 }
411
412 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
413 Returns a pointer to the new bloc, or zero if we couldn't allocate
414 memory for the new block. */
415
416 static bloc_ptr
417 get_bloc (size)
418 SIZE size;
419 {
420 register bloc_ptr new_bloc;
421 register heap_ptr heap;
422
423 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
424 || ! (new_bloc->data = obtain (break_value, size)))
425 {
426 if (new_bloc)
427 free (new_bloc);
428
429 return 0;
430 }
431
432 break_value = new_bloc->data + size;
433
434 new_bloc->size = size;
435 new_bloc->next = NIL_BLOC;
436 new_bloc->variable = (POINTER *) NIL;
437 new_bloc->new_data = 0;
438
439 /* Record in the heap that this space is in use. */
440 heap = find_heap (new_bloc->data);
441 heap->free = break_value;
442
443 /* Maintain the correspondence between heaps and blocs. */
444 new_bloc->heap = heap;
445 heap->last_bloc = new_bloc;
446 if (heap->first_bloc == NIL_BLOC)
447 heap->first_bloc = new_bloc;
448
449 /* Put this bloc on the doubly-linked list of blocs. */
450 if (first_bloc)
451 {
452 new_bloc->prev = last_bloc;
453 last_bloc->next = new_bloc;
454 last_bloc = new_bloc;
455 }
456 else
457 {
458 first_bloc = last_bloc = new_bloc;
459 new_bloc->prev = NIL_BLOC;
460 }
461
462 return new_bloc;
463 }
464 \f
465 /* Calculate new locations of blocs in the list beginning with BLOC,
466 relocating it to start at ADDRESS, in heap HEAP. If enough space is
467 not presently available in our reserve, call obtain for
468 more space.
469
470 Store the new location of each bloc in its new_data field.
471 Do not touch the contents of blocs or break_value. */
472
473 static int
474 relocate_blocs (bloc, heap, address)
475 bloc_ptr bloc;
476 heap_ptr heap;
477 POINTER address;
478 {
479 register bloc_ptr b = bloc;
480
481 /* No need to ever call this if arena is frozen, bug somewhere! */
482 if (r_alloc_freeze_level)
483 abort();
484
485 while (b)
486 {
487 /* If bloc B won't fit within HEAP,
488 move to the next heap and try again. */
489 while (heap && address + b->size > heap->end)
490 {
491 heap = heap->next;
492 if (heap == NIL_HEAP)
493 break;
494 address = heap->bloc_start;
495 }
496
497 /* If BLOC won't fit in any heap,
498 get enough new space to hold BLOC and all following blocs. */
499 if (heap == NIL_HEAP)
500 {
501 register bloc_ptr tb = b;
502 register SIZE s = 0;
503
504 /* Add up the size of all the following blocs. */
505 while (tb != NIL_BLOC)
506 {
507 if (tb->variable)
508 s += tb->size;
509
510 tb = tb->next;
511 }
512
513 /* Get that space. */
514 address = obtain (address, s);
515 if (address == 0)
516 return 0;
517
518 heap = last_heap;
519 }
520
521 /* Record the new address of this bloc
522 and update where the next bloc can start. */
523 b->new_data = address;
524 if (b->variable)
525 address += b->size;
526 b = b->next;
527 }
528
529 return 1;
530 }
531
532 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
533 This is necessary if we put the memory of space of BLOC
534 before that of BEFORE. */
535
536 static void
537 reorder_bloc (bloc, before)
538 bloc_ptr bloc, before;
539 {
540 bloc_ptr prev, next;
541
542 /* Splice BLOC out from where it is. */
543 prev = bloc->prev;
544 next = bloc->next;
545
546 if (prev)
547 prev->next = next;
548 if (next)
549 next->prev = prev;
550
551 /* Splice it in before BEFORE. */
552 prev = before->prev;
553
554 if (prev)
555 prev->next = bloc;
556 bloc->prev = prev;
557
558 before->prev = bloc;
559 bloc->next = before;
560 }
561 \f
562 /* Update the records of which heaps contain which blocs, starting
563 with heap HEAP and bloc BLOC. */
564
565 static void
566 update_heap_bloc_correspondence (bloc, heap)
567 bloc_ptr bloc;
568 heap_ptr heap;
569 {
570 register bloc_ptr b;
571
572 /* Initialize HEAP's status to reflect blocs before BLOC. */
573 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
574 {
575 /* The previous bloc is in HEAP. */
576 heap->last_bloc = bloc->prev;
577 heap->free = bloc->prev->data + bloc->prev->size;
578 }
579 else
580 {
581 /* HEAP contains no blocs before BLOC. */
582 heap->first_bloc = NIL_BLOC;
583 heap->last_bloc = NIL_BLOC;
584 heap->free = heap->bloc_start;
585 }
586
587 /* Advance through blocs one by one. */
588 for (b = bloc; b != NIL_BLOC; b = b->next)
589 {
590 /* Advance through heaps, marking them empty,
591 till we get to the one that B is in. */
592 while (heap)
593 {
594 if (heap->bloc_start <= b->data && b->data <= heap->end)
595 break;
596 heap = heap->next;
597 /* We know HEAP is not null now,
598 because there has to be space for bloc B. */
599 heap->first_bloc = NIL_BLOC;
600 heap->last_bloc = NIL_BLOC;
601 heap->free = heap->bloc_start;
602 }
603
604 /* Update HEAP's status for bloc B. */
605 heap->free = b->data + b->size;
606 heap->last_bloc = b;
607 if (heap->first_bloc == NIL_BLOC)
608 heap->first_bloc = b;
609
610 /* Record that B is in HEAP. */
611 b->heap = heap;
612 }
613
614 /* If there are any remaining heaps and no blocs left,
615 mark those heaps as empty. */
616 heap = heap->next;
617 while (heap)
618 {
619 heap->first_bloc = NIL_BLOC;
620 heap->last_bloc = NIL_BLOC;
621 heap->free = heap->bloc_start;
622 heap = heap->next;
623 }
624 }
625 \f
626 /* Resize BLOC to SIZE bytes. This relocates the blocs
627 that come after BLOC in memory. */
628
629 static int
630 resize_bloc (bloc, size)
631 bloc_ptr bloc;
632 SIZE size;
633 {
634 register bloc_ptr b;
635 heap_ptr heap;
636 POINTER address;
637 SIZE old_size;
638
639 /* No need to ever call this if arena is frozen, bug somewhere! */
640 if (r_alloc_freeze_level)
641 abort();
642
643 if (bloc == NIL_BLOC || size == bloc->size)
644 return 1;
645
646 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
647 {
648 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
649 break;
650 }
651
652 if (heap == NIL_HEAP)
653 abort ();
654
655 old_size = bloc->size;
656 bloc->size = size;
657
658 /* Note that bloc could be moved into the previous heap. */
659 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
660 : first_heap->bloc_start);
661 while (heap)
662 {
663 if (heap->bloc_start <= address && address <= heap->end)
664 break;
665 heap = heap->prev;
666 }
667
668 if (! relocate_blocs (bloc, heap, address))
669 {
670 bloc->size = old_size;
671 return 0;
672 }
673
674 if (size > old_size)
675 {
676 for (b = last_bloc; b != bloc; b = b->prev)
677 {
678 if (!b->variable)
679 {
680 b->size = 0;
681 b->data = b->new_data;
682 }
683 else
684 {
685 safe_bcopy (b->data, b->new_data, b->size);
686 *b->variable = b->data = b->new_data;
687 }
688 }
689 if (!bloc->variable)
690 {
691 bloc->size = 0;
692 bloc->data = bloc->new_data;
693 }
694 else
695 {
696 safe_bcopy (bloc->data, bloc->new_data, old_size);
697 bzero (bloc->new_data + old_size, size - old_size);
698 *bloc->variable = bloc->data = bloc->new_data;
699 }
700 }
701 else
702 {
703 for (b = bloc; b != NIL_BLOC; b = b->next)
704 {
705 if (!b->variable)
706 {
707 b->size = 0;
708 b->data = b->new_data;
709 }
710 else
711 {
712 safe_bcopy (b->data, b->new_data, b->size);
713 *b->variable = b->data = b->new_data;
714 }
715 }
716 }
717
718 update_heap_bloc_correspondence (bloc, heap);
719
720 break_value = (last_bloc ? last_bloc->data + last_bloc->size
721 : first_heap->bloc_start);
722 return 1;
723 }
724 \f
725 /* Free BLOC from the chain of blocs, relocating any blocs above it.
726 This may return space to the system. */
727
728 static void
729 free_bloc (bloc)
730 bloc_ptr bloc;
731 {
732 heap_ptr heap = bloc->heap;
733
734 if (r_alloc_freeze_level)
735 {
736 bloc->variable = (POINTER *) NIL;
737 return;
738 }
739
740 resize_bloc (bloc, 0);
741
742 if (bloc == first_bloc && bloc == last_bloc)
743 {
744 first_bloc = last_bloc = NIL_BLOC;
745 }
746 else if (bloc == last_bloc)
747 {
748 last_bloc = bloc->prev;
749 last_bloc->next = NIL_BLOC;
750 }
751 else if (bloc == first_bloc)
752 {
753 first_bloc = bloc->next;
754 first_bloc->prev = NIL_BLOC;
755 }
756 else
757 {
758 bloc->next->prev = bloc->prev;
759 bloc->prev->next = bloc->next;
760 }
761
762 /* Update the records of which blocs are in HEAP. */
763 if (heap->first_bloc == bloc)
764 {
765 if (bloc->next != 0 && bloc->next->heap == heap)
766 heap->first_bloc = bloc->next;
767 else
768 heap->first_bloc = heap->last_bloc = NIL_BLOC;
769 }
770 if (heap->last_bloc == bloc)
771 {
772 if (bloc->prev != 0 && bloc->prev->heap == heap)
773 heap->last_bloc = bloc->prev;
774 else
775 heap->first_bloc = heap->last_bloc = NIL_BLOC;
776 }
777
778 relinquish ();
779 free (bloc);
780 }
781 \f
782 /* Interface routines. */
783
784 /* Obtain SIZE bytes of storage from the free pool, or the system, as
785 necessary. If relocatable blocs are in use, this means relocating
786 them. This function gets plugged into the GNU malloc's __morecore
787 hook.
788
789 We provide hysteresis, never relocating by less than extra_bytes.
790
791 If we're out of memory, we should return zero, to imitate the other
792 __morecore hook values - in particular, __default_morecore in the
793 GNU malloc package. */
794
795 POINTER
796 r_alloc_sbrk (size)
797 long size;
798 {
799 register bloc_ptr b;
800 POINTER address;
801
802 if (! r_alloc_initialized)
803 r_alloc_init ();
804
805 if (! use_relocatable_buffers)
806 return (*real_morecore) (size);
807
808 if (size == 0)
809 return virtual_break_value;
810
811 if (size > 0)
812 {
813 /* Allocate a page-aligned space. GNU malloc would reclaim an
814 extra space if we passed an unaligned one. But we could
815 not always find a space which is contiguous to the previous. */
816 POINTER new_bloc_start;
817 heap_ptr h = first_heap;
818 SIZE get = ROUNDUP (size);
819
820 address = (POINTER) ROUNDUP (virtual_break_value);
821
822 /* Search the list upward for a heap which is large enough. */
823 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
824 {
825 h = h->next;
826 if (h == NIL_HEAP)
827 break;
828 address = (POINTER) ROUNDUP (h->start);
829 }
830
831 /* If not found, obtain more space. */
832 if (h == NIL_HEAP)
833 {
834 get += extra_bytes + page_size;
835
836 if (! obtain (address, get))
837 return 0;
838
839 if (first_heap == last_heap)
840 address = (POINTER) ROUNDUP (virtual_break_value);
841 else
842 address = (POINTER) ROUNDUP (last_heap->start);
843 h = last_heap;
844 }
845
846 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
847
848 if (first_heap->bloc_start < new_bloc_start)
849 {
850 /* This is no clean solution - no idea how to do it better. */
851 if (r_alloc_freeze_level)
852 return NIL;
853
854 /* There is a bug here: if the above obtain call succeeded, but the
855 relocate_blocs call below does not succeed, we need to free
856 the memory that we got with obtain. */
857
858 /* Move all blocs upward. */
859 if (! relocate_blocs (first_bloc, h, new_bloc_start))
860 return 0;
861
862 /* Note that (POINTER)(h+1) <= new_bloc_start since
863 get >= page_size, so the following does not destroy the heap
864 header. */
865 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
866 {
867 safe_bcopy (b->data, b->new_data, b->size);
868 *b->variable = b->data = b->new_data;
869 }
870
871 h->bloc_start = new_bloc_start;
872
873 update_heap_bloc_correspondence (first_bloc, h);
874 }
875 if (h != first_heap)
876 {
877 /* Give up managing heaps below the one the new
878 virtual_break_value points to. */
879 first_heap->prev = NIL_HEAP;
880 first_heap->next = h->next;
881 first_heap->start = h->start;
882 first_heap->end = h->end;
883 first_heap->free = h->free;
884 first_heap->first_bloc = h->first_bloc;
885 first_heap->last_bloc = h->last_bloc;
886 first_heap->bloc_start = h->bloc_start;
887
888 if (first_heap->next)
889 first_heap->next->prev = first_heap;
890 else
891 last_heap = first_heap;
892 }
893
894 bzero (address, size);
895 }
896 else /* size < 0 */
897 {
898 SIZE excess = (char *)first_heap->bloc_start
899 - ((char *)virtual_break_value + size);
900
901 address = virtual_break_value;
902
903 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
904 {
905 excess -= extra_bytes;
906 first_heap->bloc_start
907 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
908
909 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
910
911 for (b = first_bloc; b != NIL_BLOC; b = b->next)
912 {
913 safe_bcopy (b->data, b->new_data, b->size);
914 *b->variable = b->data = b->new_data;
915 }
916 }
917
918 if ((char *)virtual_break_value + size < (char *)first_heap->start)
919 {
920 /* We found an additional space below the first heap */
921 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
922 }
923 }
924
925 virtual_break_value = (POINTER) ((char *)address + size);
926 break_value = (last_bloc
927 ? last_bloc->data + last_bloc->size
928 : first_heap->bloc_start);
929 if (size < 0)
930 relinquish ();
931
932 return address;
933 }
934
935 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
936 the data is returned in *PTR. PTR is thus the address of some variable
937 which will use the data area.
938
939 The allocation of 0 bytes is valid.
940 In case r_alloc_freeze is set, a best fit of unused blocs could be done
941 before allocating a new area. Not yet done.
942
943 If we can't allocate the necessary memory, set *PTR to zero, and
944 return zero. */
945
946 POINTER
947 r_alloc (ptr, size)
948 POINTER *ptr;
949 SIZE size;
950 {
951 register bloc_ptr new_bloc;
952
953 if (! r_alloc_initialized)
954 r_alloc_init ();
955
956 new_bloc = get_bloc (MEM_ROUNDUP (size));
957 if (new_bloc)
958 {
959 new_bloc->variable = ptr;
960 *ptr = new_bloc->data;
961 }
962 else
963 *ptr = 0;
964
965 return *ptr;
966 }
967
968 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
969 Store 0 in *PTR to show there's no block allocated. */
970
971 void
972 r_alloc_free (ptr)
973 register POINTER *ptr;
974 {
975 register bloc_ptr dead_bloc;
976
977 if (! r_alloc_initialized)
978 r_alloc_init ();
979
980 dead_bloc = find_bloc (ptr);
981 if (dead_bloc == NIL_BLOC)
982 abort ();
983
984 free_bloc (dead_bloc);
985 *ptr = 0;
986
987 #ifdef emacs
988 refill_memory_reserve ();
989 #endif
990 }
991
992 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
993 Do this by shifting all blocks above this one up in memory, unless
994 SIZE is less than or equal to the current bloc size, in which case
995 do nothing.
996
997 In case r_alloc_freeze is set, a new bloc is allocated, and the
998 memory copied to it. Not very efficient. We could traverse the
999 bloc_list for a best fit of free blocs first.
1000
1001 Change *PTR to reflect the new bloc, and return this value.
1002
1003 If more memory cannot be allocated, then leave *PTR unchanged, and
1004 return zero. */
1005
1006 POINTER
1007 r_re_alloc (ptr, size)
1008 POINTER *ptr;
1009 SIZE size;
1010 {
1011 register bloc_ptr bloc;
1012
1013 if (! r_alloc_initialized)
1014 r_alloc_init ();
1015
1016 if (!*ptr)
1017 return r_alloc (ptr, size);
1018 if (!size)
1019 {
1020 r_alloc_free (ptr);
1021 return r_alloc (ptr, 0);
1022 }
1023
1024 bloc = find_bloc (ptr);
1025 if (bloc == NIL_BLOC)
1026 abort ();
1027
1028 if (size < bloc->size)
1029 {
1030 /* Wouldn't it be useful to actually resize the bloc here? */
1031 /* I think so too, but not if it's too expensive... */
1032 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1033 && r_alloc_freeze_level == 0)
1034 {
1035 resize_bloc (bloc, MEM_ROUNDUP (size));
1036 /* Never mind if this fails, just do nothing... */
1037 /* It *should* be infallible! */
1038 }
1039 }
1040 else if (size > bloc->size)
1041 {
1042 if (r_alloc_freeze_level)
1043 {
1044 bloc_ptr new_bloc;
1045 new_bloc = get_bloc (MEM_ROUNDUP (size));
1046 if (new_bloc)
1047 {
1048 new_bloc->variable = ptr;
1049 *ptr = new_bloc->data;
1050 bloc->variable = (POINTER *) NIL;
1051 }
1052 else
1053 return NIL;
1054 }
1055 else
1056 {
1057 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1058 return NIL;
1059 }
1060 }
1061 return *ptr;
1062 }
1063
1064 /* Disable relocations, after making room for at least SIZE bytes
1065 of non-relocatable heap if possible. The relocatable blocs are
1066 guaranteed to hold still until thawed, even if this means that
1067 malloc must return a null pointer. */
1068
1069 void
1070 r_alloc_freeze (size)
1071 long size;
1072 {
1073 if (! r_alloc_initialized)
1074 r_alloc_init ();
1075
1076 /* If already frozen, we can't make any more room, so don't try. */
1077 if (r_alloc_freeze_level > 0)
1078 size = 0;
1079 /* If we can't get the amount requested, half is better than nothing. */
1080 while (size > 0 && r_alloc_sbrk (size) == 0)
1081 size /= 2;
1082 ++r_alloc_freeze_level;
1083 if (size > 0)
1084 r_alloc_sbrk (-size);
1085 }
1086
1087 void
1088 r_alloc_thaw ()
1089 {
1090
1091 if (! r_alloc_initialized)
1092 r_alloc_init ();
1093
1094 if (--r_alloc_freeze_level < 0)
1095 abort ();
1096
1097 /* This frees all unused blocs. It is not too inefficient, as the resize
1098 and bcopy is done only once. Afterwards, all unreferenced blocs are
1099 already shrunk to zero size. */
1100 if (!r_alloc_freeze_level)
1101 {
1102 bloc_ptr *b = &first_bloc;
1103 while (*b)
1104 if (!(*b)->variable)
1105 free_bloc (*b);
1106 else
1107 b = &(*b)->next;
1108 }
1109 }
1110
1111 \f
1112 /* The hook `malloc' uses for the function which gets more space
1113 from the system. */
1114 extern POINTER (*__morecore) ();
1115
1116 /* Initialize various things for memory allocation. */
1117
1118 static void
1119 r_alloc_init ()
1120 {
1121 if (r_alloc_initialized)
1122 return;
1123
1124 r_alloc_initialized = 1;
1125 real_morecore = __morecore;
1126 __morecore = r_alloc_sbrk;
1127
1128 first_heap = last_heap = &heap_base;
1129 first_heap->next = first_heap->prev = NIL_HEAP;
1130 first_heap->start = first_heap->bloc_start
1131 = virtual_break_value = break_value = (*real_morecore) (0);
1132 if (break_value == NIL)
1133 abort ();
1134
1135 page_size = PAGE;
1136 extra_bytes = ROUNDUP (50000);
1137
1138 #ifdef DOUG_LEA_MALLOC
1139 mallopt (M_TOP_PAD, 64 * 4096);
1140 #else
1141 /* Give GNU malloc's morecore some hysteresis
1142 so that we move all the relocatable blocks much less often. */
1143 __malloc_extra_blocks = 64;
1144 #endif
1145
1146 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1147
1148 /* The extra call to real_morecore guarantees that the end of the
1149 address space is a multiple of page_size, even if page_size is
1150 not really the page size of the system running the binary in
1151 which page_size is stored. This allows a binary to be built on a
1152 system with one page size and run on a system with a smaller page
1153 size. */
1154 (*real_morecore) (first_heap->end - first_heap->start);
1155
1156 /* Clear the rest of the last page; this memory is in our address space
1157 even though it is after the sbrk value. */
1158 /* Doubly true, with the additional call that explicitly adds the
1159 rest of that page to the address space. */
1160 bzero (first_heap->start, first_heap->end - first_heap->start);
1161 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1162 use_relocatable_buffers = 1;
1163 }
1164
1165 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1166
1167 /* Reinitialize the morecore hook variables after restarting a dumped
1168 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1169 void
1170 r_alloc_reinit ()
1171 {
1172 /* Only do this if the hook has been reset, so that we don't get an
1173 infinite loop, in case Emacs was linked statically. */
1174 if (__morecore != r_alloc_sbrk)
1175 {
1176 real_morecore = __morecore;
1177 __morecore = r_alloc_sbrk;
1178 }
1179 }
1180 #endif
1181
1182 #ifdef DEBUG
1183 #include <assert.h>
1184
1185 void
1186 r_alloc_check ()
1187 {
1188 int found = 0;
1189 heap_ptr h, ph = 0;
1190 bloc_ptr b, pb = 0;
1191
1192 if (!r_alloc_initialized)
1193 return;
1194
1195 assert (first_heap);
1196 assert (last_heap->end <= (POINTER) sbrk (0));
1197 assert ((POINTER) first_heap < first_heap->start);
1198 assert (first_heap->start <= virtual_break_value);
1199 assert (virtual_break_value <= first_heap->end);
1200
1201 for (h = first_heap; h; h = h->next)
1202 {
1203 assert (h->prev == ph);
1204 assert ((POINTER) ROUNDUP (h->end) == h->end);
1205 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1206 the heap start has any sort of alignment.
1207 Perhaps it should. */
1208 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1209 #endif
1210 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1211 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1212
1213 if (ph)
1214 {
1215 assert (ph->end < h->start);
1216 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1217 }
1218
1219 if (h->bloc_start <= break_value && break_value <= h->end)
1220 found = 1;
1221
1222 ph = h;
1223 }
1224
1225 assert (found);
1226 assert (last_heap == ph);
1227
1228 for (b = first_bloc; b; b = b->next)
1229 {
1230 assert (b->prev == pb);
1231 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1232 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1233
1234 ph = 0;
1235 for (h = first_heap; h; h = h->next)
1236 {
1237 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1238 break;
1239 ph = h;
1240 }
1241
1242 assert (h);
1243
1244 if (pb && pb->data + pb->size != b->data)
1245 {
1246 assert (ph && b->data == h->bloc_start);
1247 while (ph)
1248 {
1249 if (ph->bloc_start <= pb->data
1250 && pb->data + pb->size <= ph->end)
1251 {
1252 assert (pb->data + pb->size + b->size > ph->end);
1253 break;
1254 }
1255 else
1256 {
1257 assert (ph->bloc_start + b->size > ph->end);
1258 }
1259 ph = ph->prev;
1260 }
1261 }
1262 pb = b;
1263 }
1264
1265 assert (last_bloc == pb);
1266
1267 if (last_bloc)
1268 assert (last_bloc->data + last_bloc->size == break_value);
1269 else
1270 assert (first_heap->bloc_start == break_value);
1271 }
1272 #endif /* DEBUG */