(dired-do-shell-command): New arg FILE-LIST
[bpt/emacs.git] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21 /* NOTES:
22
23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
24 rather than all of them. This means allowing for a possible
25 hole between the first bloc and the end of malloc storage. */
26
27 #ifdef emacs
28
29 #include <config.h>
30 #include "lisp.h" /* Needed for VALBITS. */
31
32 #undef NULL
33
34 /* The important properties of this type are that 1) it's a pointer, and
35 2) arithmetic on it should work as if the size of the object pointed
36 to has a size of 1. */
37 #if 0 /* Arithmetic on void* is a GCC extension. */
38 #ifdef __STDC__
39 typedef void *POINTER;
40 #else
41
42 #ifdef HAVE_CONFIG_H
43 #include "config.h"
44 #endif
45
46 typedef char *POINTER;
47
48 #endif
49 #endif /* 0 */
50
51 /* Unconditionally use char * for this. */
52 typedef char *POINTER;
53
54 typedef unsigned long SIZE;
55
56 /* Declared in dispnew.c, this version doesn't screw up if regions
57 overlap. */
58 extern void safe_bcopy ();
59
60 #ifdef DOUG_LEA_MALLOC
61 #define M_TOP_PAD -2
62 extern int mallopt ();
63 #else
64 extern int __malloc_extra_blocks;
65 #endif
66
67 #else /* not emacs */
68
69 #include <stddef.h>
70
71 typedef size_t SIZE;
72 typedef void *POINTER;
73
74 #include <unistd.h>
75 #include <malloc.h>
76 #include <string.h>
77
78 #define safe_bcopy(x, y, z) memmove (y, x, z)
79 #define bzero(x, len) memset (x, 0, len)
80
81 #endif /* not emacs */
82
83 #include "getpagesize.h"
84
85 #define NIL ((POINTER) 0)
86
87 /* A flag to indicate whether we have initialized ralloc yet. For
88 Emacs's sake, please do not make this local to malloc_init; on some
89 machines, the dumping procedure makes all static variables
90 read-only. On these machines, the word static is #defined to be
91 the empty string, meaning that r_alloc_initialized becomes an
92 automatic variable, and loses its value each time Emacs is started up. */
93 static int r_alloc_initialized = 0;
94
95 static void r_alloc_init ();
96 \f
97 /* Declarations for working with the malloc, ralloc, and system breaks. */
98
99 /* Function to set the real break value. */
100 static POINTER (*real_morecore) ();
101
102 /* The break value, as seen by malloc. */
103 static POINTER virtual_break_value;
104
105 /* The address of the end of the last data in use by ralloc,
106 including relocatable blocs as well as malloc data. */
107 static POINTER break_value;
108
109 /* This is the size of a page. We round memory requests to this boundary. */
110 static int page_size;
111
112 /* Whenever we get memory from the system, get this many extra bytes. This
113 must be a multiple of page_size. */
114 static int extra_bytes;
115
116 /* Macros for rounding. Note that rounding to any value is possible
117 by changing the definition of PAGE. */
118 #define PAGE (getpagesize ())
119 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
120 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
121 & ~(page_size - 1))
122 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
123
124 #define MEM_ALIGN sizeof(double)
125 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
126 & ~(MEM_ALIGN - 1))
127 \f
128 /* Data structures of heaps and blocs. */
129
130 /* The relocatable objects, or blocs, and the malloc data
131 both reside within one or more heaps.
132 Each heap contains malloc data, running from `start' to `bloc_start',
133 and relocatable objects, running from `bloc_start' to `free'.
134
135 Relocatable objects may relocate within the same heap
136 or may move into another heap; the heaps themselves may grow
137 but they never move.
138
139 We try to make just one heap and make it larger as necessary.
140 But sometimes we can't do that, because we can't get contiguous
141 space to add onto the heap. When that happens, we start a new heap. */
142
143 typedef struct heap
144 {
145 struct heap *next;
146 struct heap *prev;
147 /* Start of memory range of this heap. */
148 POINTER start;
149 /* End of memory range of this heap. */
150 POINTER end;
151 /* Start of relocatable data in this heap. */
152 POINTER bloc_start;
153 /* Start of unused space in this heap. */
154 POINTER free;
155 /* First bloc in this heap. */
156 struct bp *first_bloc;
157 /* Last bloc in this heap. */
158 struct bp *last_bloc;
159 } *heap_ptr;
160
161 #define NIL_HEAP ((heap_ptr) 0)
162 #define HEAP_PTR_SIZE (sizeof (struct heap))
163
164 /* This is the first heap object.
165 If we need additional heap objects, each one resides at the beginning of
166 the space it covers. */
167 static struct heap heap_base;
168
169 /* Head and tail of the list of heaps. */
170 static heap_ptr first_heap, last_heap;
171
172 /* These structures are allocated in the malloc arena.
173 The linked list is kept in order of increasing '.data' members.
174 The data blocks abut each other; if b->next is non-nil, then
175 b->data + b->size == b->next->data.
176
177 An element with variable==NIL denotes a freed block, which has not yet
178 been collected. They may only appear while r_alloc_freeze > 0, and will be
179 freed when the arena is thawed. Currently, these blocs are not reusable,
180 while the arena is frozen. Very inefficient. */
181
182 typedef struct bp
183 {
184 struct bp *next;
185 struct bp *prev;
186 POINTER *variable;
187 POINTER data;
188 SIZE size;
189 POINTER new_data; /* temporarily used for relocation */
190 struct heap *heap; /* Heap this bloc is in. */
191 } *bloc_ptr;
192
193 #define NIL_BLOC ((bloc_ptr) 0)
194 #define BLOC_PTR_SIZE (sizeof (struct bp))
195
196 /* Head and tail of the list of relocatable blocs. */
197 static bloc_ptr first_bloc, last_bloc;
198
199 static int use_relocatable_buffers;
200
201 /* If >0, no relocation whatsoever takes place. */
202 static int r_alloc_freeze_level;
203
204 \f
205 /* Functions to get and return memory from the system. */
206
207 /* Find the heap that ADDRESS falls within. */
208
209 static heap_ptr
210 find_heap (address)
211 POINTER address;
212 {
213 heap_ptr heap;
214
215 for (heap = last_heap; heap; heap = heap->prev)
216 {
217 if (heap->start <= address && address <= heap->end)
218 return heap;
219 }
220
221 return NIL_HEAP;
222 }
223
224 /* Find SIZE bytes of space in a heap.
225 Try to get them at ADDRESS (which must fall within some heap's range)
226 if we can get that many within one heap.
227
228 If enough space is not presently available in our reserve, this means
229 getting more page-aligned space from the system. If the returned space
230 is not contiguous to the last heap, allocate a new heap, and append it
231
232 obtain does not try to keep track of whether space is in use
233 or not in use. It just returns the address of SIZE bytes that
234 fall within a single heap. If you call obtain twice in a row
235 with the same arguments, you typically get the same value.
236 to the heap list. It's the caller's responsibility to keep
237 track of what space is in use.
238
239 Return the address of the space if all went well, or zero if we couldn't
240 allocate the memory. */
241
242 static POINTER
243 obtain (address, size)
244 POINTER address;
245 SIZE size;
246 {
247 heap_ptr heap;
248 SIZE already_available;
249
250 /* Find the heap that ADDRESS falls within. */
251 for (heap = last_heap; heap; heap = heap->prev)
252 {
253 if (heap->start <= address && address <= heap->end)
254 break;
255 }
256
257 if (! heap)
258 abort ();
259
260 /* If we can't fit SIZE bytes in that heap,
261 try successive later heaps. */
262 while (heap && address + size > heap->end)
263 {
264 heap = heap->next;
265 if (heap == NIL_HEAP)
266 break;
267 address = heap->bloc_start;
268 }
269
270 /* If we can't fit them within any existing heap,
271 get more space. */
272 if (heap == NIL_HEAP)
273 {
274 POINTER new = (*real_morecore)(0);
275 SIZE get;
276
277 already_available = (char *)last_heap->end - (char *)address;
278
279 if (new != last_heap->end)
280 {
281 /* Someone else called sbrk. Make a new heap. */
282
283 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
284 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
285
286 if ((*real_morecore) (bloc_start - new) != new)
287 return 0;
288
289 new_heap->start = new;
290 new_heap->end = bloc_start;
291 new_heap->bloc_start = bloc_start;
292 new_heap->free = bloc_start;
293 new_heap->next = NIL_HEAP;
294 new_heap->prev = last_heap;
295 new_heap->first_bloc = NIL_BLOC;
296 new_heap->last_bloc = NIL_BLOC;
297 last_heap->next = new_heap;
298 last_heap = new_heap;
299
300 address = bloc_start;
301 already_available = 0;
302 }
303
304 /* Add space to the last heap (which we may have just created).
305 Get some extra, so we can come here less often. */
306
307 get = size + extra_bytes - already_available;
308 get = (char *) ROUNDUP ((char *)last_heap->end + get)
309 - (char *) last_heap->end;
310
311 if ((*real_morecore) (get) != last_heap->end)
312 return 0;
313
314 last_heap->end += get;
315 }
316
317 return address;
318 }
319
320 /* Return unused heap space to the system
321 if there is a lot of unused space now.
322 This can make the last heap smaller;
323 it can also eliminate the last heap entirely. */
324
325 static void
326 relinquish ()
327 {
328 register heap_ptr h;
329 int excess = 0;
330
331 /* Add the amount of space beyond break_value
332 in all heaps which have extend beyond break_value at all. */
333
334 for (h = last_heap; h && break_value < h->end; h = h->prev)
335 {
336 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
337 ? h->bloc_start : break_value);
338 }
339
340 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
341 {
342 /* Keep extra_bytes worth of empty space.
343 And don't free anything unless we can free at least extra_bytes. */
344 excess -= extra_bytes;
345
346 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
347 {
348 /* This heap should have no blocs in it. */
349 if (last_heap->first_bloc != NIL_BLOC
350 || last_heap->last_bloc != NIL_BLOC)
351 abort ();
352
353 /* Return the last heap, with its header, to the system. */
354 excess = (char *)last_heap->end - (char *)last_heap->start;
355 last_heap = last_heap->prev;
356 last_heap->next = NIL_HEAP;
357 }
358 else
359 {
360 excess = (char *) last_heap->end
361 - (char *) ROUNDUP ((char *)last_heap->end - excess);
362 last_heap->end -= excess;
363 }
364
365 if ((*real_morecore) (- excess) == 0)
366 abort ();
367 }
368 }
369
370 /* Return the total size in use by relocating allocator,
371 above where malloc gets space. */
372
373 long
374 r_alloc_size_in_use ()
375 {
376 return break_value - virtual_break_value;
377 }
378 \f
379 /* The meat - allocating, freeing, and relocating blocs. */
380
381 /* Find the bloc referenced by the address in PTR. Returns a pointer
382 to that block. */
383
384 static bloc_ptr
385 find_bloc (ptr)
386 POINTER *ptr;
387 {
388 register bloc_ptr p = first_bloc;
389
390 while (p != NIL_BLOC)
391 {
392 if (p->variable == ptr && p->data == *ptr)
393 return p;
394
395 p = p->next;
396 }
397
398 return p;
399 }
400
401 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
402 Returns a pointer to the new bloc, or zero if we couldn't allocate
403 memory for the new block. */
404
405 static bloc_ptr
406 get_bloc (size)
407 SIZE size;
408 {
409 register bloc_ptr new_bloc;
410 register heap_ptr heap;
411
412 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
413 || ! (new_bloc->data = obtain (break_value, size)))
414 {
415 if (new_bloc)
416 free (new_bloc);
417
418 return 0;
419 }
420
421 break_value = new_bloc->data + size;
422
423 new_bloc->size = size;
424 new_bloc->next = NIL_BLOC;
425 new_bloc->variable = (POINTER *) NIL;
426 new_bloc->new_data = 0;
427
428 /* Record in the heap that this space is in use. */
429 heap = find_heap (new_bloc->data);
430 heap->free = break_value;
431
432 /* Maintain the correspondence between heaps and blocs. */
433 new_bloc->heap = heap;
434 heap->last_bloc = new_bloc;
435 if (heap->first_bloc == NIL_BLOC)
436 heap->first_bloc = new_bloc;
437
438 /* Put this bloc on the doubly-linked list of blocs. */
439 if (first_bloc)
440 {
441 new_bloc->prev = last_bloc;
442 last_bloc->next = new_bloc;
443 last_bloc = new_bloc;
444 }
445 else
446 {
447 first_bloc = last_bloc = new_bloc;
448 new_bloc->prev = NIL_BLOC;
449 }
450
451 return new_bloc;
452 }
453 \f
454 /* Calculate new locations of blocs in the list beginning with BLOC,
455 relocating it to start at ADDRESS, in heap HEAP. If enough space is
456 not presently available in our reserve, call obtain for
457 more space.
458
459 Store the new location of each bloc in its new_data field.
460 Do not touch the contents of blocs or break_value. */
461
462 static int
463 relocate_blocs (bloc, heap, address)
464 bloc_ptr bloc;
465 heap_ptr heap;
466 POINTER address;
467 {
468 register bloc_ptr b = bloc;
469
470 /* No need to ever call this if arena is frozen, bug somewhere! */
471 if (r_alloc_freeze_level)
472 abort();
473
474 while (b)
475 {
476 /* If bloc B won't fit within HEAP,
477 move to the next heap and try again. */
478 while (heap && address + b->size > heap->end)
479 {
480 heap = heap->next;
481 if (heap == NIL_HEAP)
482 break;
483 address = heap->bloc_start;
484 }
485
486 /* If BLOC won't fit in any heap,
487 get enough new space to hold BLOC and all following blocs. */
488 if (heap == NIL_HEAP)
489 {
490 register bloc_ptr tb = b;
491 register SIZE s = 0;
492
493 /* Add up the size of all the following blocs. */
494 while (tb != NIL_BLOC)
495 {
496 if (tb->variable)
497 s += tb->size;
498
499 tb = tb->next;
500 }
501
502 /* Get that space. */
503 address = obtain (address, s);
504 if (address == 0)
505 return 0;
506
507 heap = last_heap;
508 }
509
510 /* Record the new address of this bloc
511 and update where the next bloc can start. */
512 b->new_data = address;
513 if (b->variable)
514 address += b->size;
515 b = b->next;
516 }
517
518 return 1;
519 }
520
521 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
522 This is necessary if we put the memory of space of BLOC
523 before that of BEFORE. */
524
525 static void
526 reorder_bloc (bloc, before)
527 bloc_ptr bloc, before;
528 {
529 bloc_ptr prev, next;
530
531 /* Splice BLOC out from where it is. */
532 prev = bloc->prev;
533 next = bloc->next;
534
535 if (prev)
536 prev->next = next;
537 if (next)
538 next->prev = prev;
539
540 /* Splice it in before BEFORE. */
541 prev = before->prev;
542
543 if (prev)
544 prev->next = bloc;
545 bloc->prev = prev;
546
547 before->prev = bloc;
548 bloc->next = before;
549 }
550 \f
551 /* Update the records of which heaps contain which blocs, starting
552 with heap HEAP and bloc BLOC. */
553
554 static void
555 update_heap_bloc_correspondence (bloc, heap)
556 bloc_ptr bloc;
557 heap_ptr heap;
558 {
559 register bloc_ptr b;
560
561 /* Initialize HEAP's status to reflect blocs before BLOC. */
562 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
563 {
564 /* The previous bloc is in HEAP. */
565 heap->last_bloc = bloc->prev;
566 heap->free = bloc->prev->data + bloc->prev->size;
567 }
568 else
569 {
570 /* HEAP contains no blocs before BLOC. */
571 heap->first_bloc = NIL_BLOC;
572 heap->last_bloc = NIL_BLOC;
573 heap->free = heap->bloc_start;
574 }
575
576 /* Advance through blocs one by one. */
577 for (b = bloc; b != NIL_BLOC; b = b->next)
578 {
579 /* Advance through heaps, marking them empty,
580 till we get to the one that B is in. */
581 while (heap)
582 {
583 if (heap->bloc_start <= b->data && b->data <= heap->end)
584 break;
585 heap = heap->next;
586 /* We know HEAP is not null now,
587 because there has to be space for bloc B. */
588 heap->first_bloc = NIL_BLOC;
589 heap->last_bloc = NIL_BLOC;
590 heap->free = heap->bloc_start;
591 }
592
593 /* Update HEAP's status for bloc B. */
594 heap->free = b->data + b->size;
595 heap->last_bloc = b;
596 if (heap->first_bloc == NIL_BLOC)
597 heap->first_bloc = b;
598
599 /* Record that B is in HEAP. */
600 b->heap = heap;
601 }
602
603 /* If there are any remaining heaps and no blocs left,
604 mark those heaps as empty. */
605 heap = heap->next;
606 while (heap)
607 {
608 heap->first_bloc = NIL_BLOC;
609 heap->last_bloc = NIL_BLOC;
610 heap->free = heap->bloc_start;
611 heap = heap->next;
612 }
613 }
614 \f
615 /* Resize BLOC to SIZE bytes. This relocates the blocs
616 that come after BLOC in memory. */
617
618 static int
619 resize_bloc (bloc, size)
620 bloc_ptr bloc;
621 SIZE size;
622 {
623 register bloc_ptr b;
624 heap_ptr heap;
625 POINTER address;
626 SIZE old_size;
627
628 /* No need to ever call this if arena is frozen, bug somewhere! */
629 if (r_alloc_freeze_level)
630 abort();
631
632 if (bloc == NIL_BLOC || size == bloc->size)
633 return 1;
634
635 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
636 {
637 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
638 break;
639 }
640
641 if (heap == NIL_HEAP)
642 abort ();
643
644 old_size = bloc->size;
645 bloc->size = size;
646
647 /* Note that bloc could be moved into the previous heap. */
648 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
649 : first_heap->bloc_start);
650 while (heap)
651 {
652 if (heap->bloc_start <= address && address <= heap->end)
653 break;
654 heap = heap->prev;
655 }
656
657 if (! relocate_blocs (bloc, heap, address))
658 {
659 bloc->size = old_size;
660 return 0;
661 }
662
663 if (size > old_size)
664 {
665 for (b = last_bloc; b != bloc; b = b->prev)
666 {
667 if (!b->variable)
668 {
669 b->size = 0;
670 b->data = b->new_data;
671 }
672 else
673 {
674 safe_bcopy (b->data, b->new_data, b->size);
675 *b->variable = b->data = b->new_data;
676 }
677 }
678 if (!bloc->variable)
679 {
680 bloc->size = 0;
681 bloc->data = bloc->new_data;
682 }
683 else
684 {
685 safe_bcopy (bloc->data, bloc->new_data, old_size);
686 bzero (bloc->new_data + old_size, size - old_size);
687 *bloc->variable = bloc->data = bloc->new_data;
688 }
689 }
690 else
691 {
692 for (b = bloc; b != NIL_BLOC; b = b->next)
693 {
694 if (!b->variable)
695 {
696 b->size = 0;
697 b->data = b->new_data;
698 }
699 else
700 {
701 safe_bcopy (b->data, b->new_data, b->size);
702 *b->variable = b->data = b->new_data;
703 }
704 }
705 }
706
707 update_heap_bloc_correspondence (bloc, heap);
708
709 break_value = (last_bloc ? last_bloc->data + last_bloc->size
710 : first_heap->bloc_start);
711 return 1;
712 }
713 \f
714 /* Free BLOC from the chain of blocs, relocating any blocs above it.
715 This may return space to the system. */
716
717 static void
718 free_bloc (bloc)
719 bloc_ptr bloc;
720 {
721 heap_ptr heap = bloc->heap;
722
723 if (r_alloc_freeze_level)
724 {
725 bloc->variable = (POINTER *) NIL;
726 return;
727 }
728
729 resize_bloc (bloc, 0);
730
731 if (bloc == first_bloc && bloc == last_bloc)
732 {
733 first_bloc = last_bloc = NIL_BLOC;
734 }
735 else if (bloc == last_bloc)
736 {
737 last_bloc = bloc->prev;
738 last_bloc->next = NIL_BLOC;
739 }
740 else if (bloc == first_bloc)
741 {
742 first_bloc = bloc->next;
743 first_bloc->prev = NIL_BLOC;
744 }
745 else
746 {
747 bloc->next->prev = bloc->prev;
748 bloc->prev->next = bloc->next;
749 }
750
751 /* Update the records of which blocs are in HEAP. */
752 if (heap->first_bloc == bloc)
753 {
754 if (bloc->next != 0 && bloc->next->heap == heap)
755 heap->first_bloc = bloc->next;
756 else
757 heap->first_bloc = heap->last_bloc = NIL_BLOC;
758 }
759 if (heap->last_bloc == bloc)
760 {
761 if (bloc->prev != 0 && bloc->prev->heap == heap)
762 heap->last_bloc = bloc->prev;
763 else
764 heap->first_bloc = heap->last_bloc = NIL_BLOC;
765 }
766
767 relinquish ();
768 free (bloc);
769 }
770 \f
771 /* Interface routines. */
772
773 /* Obtain SIZE bytes of storage from the free pool, or the system, as
774 necessary. If relocatable blocs are in use, this means relocating
775 them. This function gets plugged into the GNU malloc's __morecore
776 hook.
777
778 We provide hysteresis, never relocating by less than extra_bytes.
779
780 If we're out of memory, we should return zero, to imitate the other
781 __morecore hook values - in particular, __default_morecore in the
782 GNU malloc package. */
783
784 POINTER
785 r_alloc_sbrk (size)
786 long size;
787 {
788 register bloc_ptr b;
789 POINTER address;
790
791 if (! r_alloc_initialized)
792 r_alloc_init ();
793
794 if (! use_relocatable_buffers)
795 return (*real_morecore) (size);
796
797 if (size == 0)
798 return virtual_break_value;
799
800 if (size > 0)
801 {
802 /* Allocate a page-aligned space. GNU malloc would reclaim an
803 extra space if we passed an unaligned one. But we could
804 not always find a space which is contiguous to the previous. */
805 POINTER new_bloc_start;
806 heap_ptr h = first_heap;
807 SIZE get = ROUNDUP (size);
808
809 address = (POINTER) ROUNDUP (virtual_break_value);
810
811 /* Search the list upward for a heap which is large enough. */
812 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
813 {
814 h = h->next;
815 if (h == NIL_HEAP)
816 break;
817 address = (POINTER) ROUNDUP (h->start);
818 }
819
820 /* If not found, obtain more space. */
821 if (h == NIL_HEAP)
822 {
823 get += extra_bytes + page_size;
824
825 if (! obtain (address, get))
826 return 0;
827
828 if (first_heap == last_heap)
829 address = (POINTER) ROUNDUP (virtual_break_value);
830 else
831 address = (POINTER) ROUNDUP (last_heap->start);
832 h = last_heap;
833 }
834
835 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
836
837 if (first_heap->bloc_start < new_bloc_start)
838 {
839 /* This is no clean solution - no idea how to do it better. */
840 if (r_alloc_freeze_level)
841 return NIL;
842
843 /* There is a bug here: if the above obtain call succeeded, but the
844 relocate_blocs call below does not succeed, we need to free
845 the memory that we got with obtain. */
846
847 /* Move all blocs upward. */
848 if (! relocate_blocs (first_bloc, h, new_bloc_start))
849 return 0;
850
851 /* Note that (POINTER)(h+1) <= new_bloc_start since
852 get >= page_size, so the following does not destroy the heap
853 header. */
854 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
855 {
856 safe_bcopy (b->data, b->new_data, b->size);
857 *b->variable = b->data = b->new_data;
858 }
859
860 h->bloc_start = new_bloc_start;
861
862 update_heap_bloc_correspondence (first_bloc, h);
863 }
864 if (h != first_heap)
865 {
866 /* Give up managing heaps below the one the new
867 virtual_break_value points to. */
868 first_heap->prev = NIL_HEAP;
869 first_heap->next = h->next;
870 first_heap->start = h->start;
871 first_heap->end = h->end;
872 first_heap->free = h->free;
873 first_heap->first_bloc = h->first_bloc;
874 first_heap->last_bloc = h->last_bloc;
875 first_heap->bloc_start = h->bloc_start;
876
877 if (first_heap->next)
878 first_heap->next->prev = first_heap;
879 else
880 last_heap = first_heap;
881 }
882
883 bzero (address, size);
884 }
885 else /* size < 0 */
886 {
887 SIZE excess = (char *)first_heap->bloc_start
888 - ((char *)virtual_break_value + size);
889
890 address = virtual_break_value;
891
892 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
893 {
894 excess -= extra_bytes;
895 first_heap->bloc_start
896 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
897
898 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
899
900 for (b = first_bloc; b != NIL_BLOC; b = b->next)
901 {
902 safe_bcopy (b->data, b->new_data, b->size);
903 *b->variable = b->data = b->new_data;
904 }
905 }
906
907 if ((char *)virtual_break_value + size < (char *)first_heap->start)
908 {
909 /* We found an additional space below the first heap */
910 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
911 }
912 }
913
914 virtual_break_value = (POINTER) ((char *)address + size);
915 break_value = (last_bloc
916 ? last_bloc->data + last_bloc->size
917 : first_heap->bloc_start);
918 if (size < 0)
919 relinquish ();
920
921 return address;
922 }
923
924 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
925 the data is returned in *PTR. PTR is thus the address of some variable
926 which will use the data area.
927
928 The allocation of 0 bytes is valid.
929 In case r_alloc_freeze is set, a best fit of unused blocs could be done
930 before allocating a new area. Not yet done.
931
932 If we can't allocate the necessary memory, set *PTR to zero, and
933 return zero. */
934
935 POINTER
936 r_alloc (ptr, size)
937 POINTER *ptr;
938 SIZE size;
939 {
940 register bloc_ptr new_bloc;
941
942 if (! r_alloc_initialized)
943 r_alloc_init ();
944
945 new_bloc = get_bloc (MEM_ROUNDUP (size));
946 if (new_bloc)
947 {
948 new_bloc->variable = ptr;
949 *ptr = new_bloc->data;
950 }
951 else
952 *ptr = 0;
953
954 return *ptr;
955 }
956
957 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
958 Store 0 in *PTR to show there's no block allocated. */
959
960 void
961 r_alloc_free (ptr)
962 register POINTER *ptr;
963 {
964 register bloc_ptr dead_bloc;
965
966 if (! r_alloc_initialized)
967 r_alloc_init ();
968
969 dead_bloc = find_bloc (ptr);
970 if (dead_bloc == NIL_BLOC)
971 abort ();
972
973 free_bloc (dead_bloc);
974 *ptr = 0;
975
976 #ifdef emacs
977 refill_memory_reserve ();
978 #endif
979 }
980
981 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
982 Do this by shifting all blocks above this one up in memory, unless
983 SIZE is less than or equal to the current bloc size, in which case
984 do nothing.
985
986 In case r_alloc_freeze is set, a new bloc is allocated, and the
987 memory copied to it. Not very efficient. We could traverse the
988 bloc_list for a best fit of free blocs first.
989
990 Change *PTR to reflect the new bloc, and return this value.
991
992 If more memory cannot be allocated, then leave *PTR unchanged, and
993 return zero. */
994
995 POINTER
996 r_re_alloc (ptr, size)
997 POINTER *ptr;
998 SIZE size;
999 {
1000 register bloc_ptr bloc;
1001
1002 if (! r_alloc_initialized)
1003 r_alloc_init ();
1004
1005 if (!*ptr)
1006 return r_alloc (ptr, size);
1007 if (!size)
1008 {
1009 r_alloc_free (ptr);
1010 return r_alloc (ptr, 0);
1011 }
1012
1013 bloc = find_bloc (ptr);
1014 if (bloc == NIL_BLOC)
1015 abort ();
1016
1017 if (size < bloc->size)
1018 {
1019 /* Wouldn't it be useful to actually resize the bloc here? */
1020 /* I think so too, but not if it's too expensive... */
1021 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1022 && r_alloc_freeze_level == 0)
1023 {
1024 resize_bloc (bloc, MEM_ROUNDUP (size));
1025 /* Never mind if this fails, just do nothing... */
1026 /* It *should* be infallible! */
1027 }
1028 }
1029 else if (size > bloc->size)
1030 {
1031 if (r_alloc_freeze_level)
1032 {
1033 bloc_ptr new_bloc;
1034 new_bloc = get_bloc (MEM_ROUNDUP (size));
1035 if (new_bloc)
1036 {
1037 new_bloc->variable = ptr;
1038 *ptr = new_bloc->data;
1039 bloc->variable = (POINTER *) NIL;
1040 }
1041 else
1042 return NIL;
1043 }
1044 else
1045 {
1046 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1047 return NIL;
1048 }
1049 }
1050 return *ptr;
1051 }
1052
1053 /* Disable relocations, after making room for at least SIZE bytes
1054 of non-relocatable heap if possible. The relocatable blocs are
1055 guaranteed to hold still until thawed, even if this means that
1056 malloc must return a null pointer. */
1057
1058 void
1059 r_alloc_freeze (size)
1060 long size;
1061 {
1062 if (! r_alloc_initialized)
1063 r_alloc_init ();
1064
1065 /* If already frozen, we can't make any more room, so don't try. */
1066 if (r_alloc_freeze_level > 0)
1067 size = 0;
1068 /* If we can't get the amount requested, half is better than nothing. */
1069 while (size > 0 && r_alloc_sbrk (size) == 0)
1070 size /= 2;
1071 ++r_alloc_freeze_level;
1072 if (size > 0)
1073 r_alloc_sbrk (-size);
1074 }
1075
1076 void
1077 r_alloc_thaw ()
1078 {
1079
1080 if (! r_alloc_initialized)
1081 r_alloc_init ();
1082
1083 if (--r_alloc_freeze_level < 0)
1084 abort ();
1085
1086 /* This frees all unused blocs. It is not too inefficient, as the resize
1087 and bcopy is done only once. Afterwards, all unreferenced blocs are
1088 already shrunk to zero size. */
1089 if (!r_alloc_freeze_level)
1090 {
1091 bloc_ptr *b = &first_bloc;
1092 while (*b)
1093 if (!(*b)->variable)
1094 free_bloc (*b);
1095 else
1096 b = &(*b)->next;
1097 }
1098 }
1099
1100 \f
1101 /* The hook `malloc' uses for the function which gets more space
1102 from the system. */
1103 extern POINTER (*__morecore) ();
1104
1105 /* Initialize various things for memory allocation. */
1106
1107 static void
1108 r_alloc_init ()
1109 {
1110 if (r_alloc_initialized)
1111 return;
1112
1113 r_alloc_initialized = 1;
1114 real_morecore = __morecore;
1115 __morecore = r_alloc_sbrk;
1116
1117 first_heap = last_heap = &heap_base;
1118 first_heap->next = first_heap->prev = NIL_HEAP;
1119 first_heap->start = first_heap->bloc_start
1120 = virtual_break_value = break_value = (*real_morecore) (0);
1121 if (break_value == NIL)
1122 abort ();
1123
1124 page_size = PAGE;
1125 extra_bytes = ROUNDUP (50000);
1126
1127 #ifdef DOUG_LEA_MALLOC
1128 mallopt (M_TOP_PAD, 64 * 4096);
1129 #else
1130 /* Give GNU malloc's morecore some hysteresis
1131 so that we move all the relocatable blocks much less often. */
1132 __malloc_extra_blocks = 64;
1133 #endif
1134
1135 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1136
1137 /* The extra call to real_morecore guarantees that the end of the
1138 address space is a multiple of page_size, even if page_size is
1139 not really the page size of the system running the binary in
1140 which page_size is stored. This allows a binary to be built on a
1141 system with one page size and run on a system with a smaller page
1142 size. */
1143 (*real_morecore) (first_heap->end - first_heap->start);
1144
1145 /* Clear the rest of the last page; this memory is in our address space
1146 even though it is after the sbrk value. */
1147 /* Doubly true, with the additional call that explicitly adds the
1148 rest of that page to the address space. */
1149 bzero (first_heap->start, first_heap->end - first_heap->start);
1150 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1151 use_relocatable_buffers = 1;
1152 }
1153 #ifdef DEBUG
1154 #include <assert.h>
1155
1156 void
1157 r_alloc_check ()
1158 {
1159 int found = 0;
1160 heap_ptr h, ph = 0;
1161 bloc_ptr b, pb = 0;
1162
1163 if (!r_alloc_initialized)
1164 return;
1165
1166 assert (first_heap);
1167 assert (last_heap->end <= (POINTER) sbrk (0));
1168 assert ((POINTER) first_heap < first_heap->start);
1169 assert (first_heap->start <= virtual_break_value);
1170 assert (virtual_break_value <= first_heap->end);
1171
1172 for (h = first_heap; h; h = h->next)
1173 {
1174 assert (h->prev == ph);
1175 assert ((POINTER) ROUNDUP (h->end) == h->end);
1176 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1177 the heap start has any sort of alignment.
1178 Perhaps it should. */
1179 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1180 #endif
1181 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1182 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1183
1184 if (ph)
1185 {
1186 assert (ph->end < h->start);
1187 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1188 }
1189
1190 if (h->bloc_start <= break_value && break_value <= h->end)
1191 found = 1;
1192
1193 ph = h;
1194 }
1195
1196 assert (found);
1197 assert (last_heap == ph);
1198
1199 for (b = first_bloc; b; b = b->next)
1200 {
1201 assert (b->prev == pb);
1202 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1203 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1204
1205 ph = 0;
1206 for (h = first_heap; h; h = h->next)
1207 {
1208 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1209 break;
1210 ph = h;
1211 }
1212
1213 assert (h);
1214
1215 if (pb && pb->data + pb->size != b->data)
1216 {
1217 assert (ph && b->data == h->bloc_start);
1218 while (ph)
1219 {
1220 if (ph->bloc_start <= pb->data
1221 && pb->data + pb->size <= ph->end)
1222 {
1223 assert (pb->data + pb->size + b->size > ph->end);
1224 break;
1225 }
1226 else
1227 {
1228 assert (ph->bloc_start + b->size > ph->end);
1229 }
1230 ph = ph->prev;
1231 }
1232 }
1233 pb = b;
1234 }
1235
1236 assert (last_bloc == pb);
1237
1238 if (last_bloc)
1239 assert (last_bloc->data + last_bloc->size == break_value);
1240 else
1241 assert (first_heap->bloc_start == break_value);
1242 }
1243 #endif /* DEBUG */