*** empty log message ***
[bpt/emacs.git] / src / ralloc.c
CommitLineData
dcfdbac7 1/* Block-relocating memory allocator.
c6c5df7f 2 Copyright (C) 1993 Free Software Foundation, Inc.
dcfdbac7
JB
3
4This file is part of GNU Emacs.
5
6GNU Emacs is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 1, or (at your option)
9any later version.
10
11GNU Emacs is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU Emacs; see the file COPYING. If not, write to
18the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
19
20/* NOTES:
21
eb8c3be9 22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
dcfdbac7 23 rather than all of them. This means allowing for a possible
abe9ff32 24 hole between the first bloc and the end of malloc storage. */
dcfdbac7 25
2c46d29f 26#ifdef emacs
aef4d570 27
18160b98 28#include <config.h>
956ace37 29#include "lisp.h" /* Needed for VALBITS. */
2c46d29f 30
aef4d570
RM
31#undef NULL
32
f275fd9a
RS
33/* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
a8c0e5ea 36#if 0 /* Arithmetic on void* is a GCC extension. */
f275fd9a
RS
37#ifdef __STDC__
38typedef void *POINTER;
39#else
1df181b6
RM
40
41#ifdef HAVE_CONFIG_H
42#include "config.h"
43#endif
44
f275fd9a 45typedef char *POINTER;
1df181b6 46
f275fd9a 47#endif
a8c0e5ea
RS
48#endif /* 0 */
49
50/* Unconditionally use char * for this. */
51typedef char *POINTER;
f275fd9a
RS
52
53typedef unsigned long SIZE;
54
2c46d29f
RS
55/* Declared in dispnew.c, this version doesn't screw up if regions
56 overlap. */
57extern void safe_bcopy ();
2c46d29f 58
aef4d570
RM
59#include "getpagesize.h"
60
61#else /* Not emacs. */
62
2c46d29f 63#include <stddef.h>
aef4d570 64
2c46d29f
RS
65typedef size_t SIZE;
66typedef void *POINTER;
aef4d570 67
aef4d570
RM
68#include <unistd.h>
69#include <malloc.h>
70#include <string.h>
71
2c46d29f 72#define safe_bcopy(x, y, z) memmove (y, x, z)
2c46d29f 73
aef4d570 74#endif /* emacs. */
dcfdbac7
JB
75
76#define NIL ((POINTER) 0)
77
2c46d29f
RS
78/* A flag to indicate whether we have initialized ralloc yet. For
79 Emacs's sake, please do not make this local to malloc_init; on some
80 machines, the dumping procedure makes all static variables
81 read-only. On these machines, the word static is #defined to be
82 the empty string, meaning that r_alloc_initialized becomes an
83 automatic variable, and loses its value each time Emacs is started up. */
84static int r_alloc_initialized = 0;
85
86static void r_alloc_init ();
dcfdbac7 87\f
956ace37
JB
88/* Declarations for working with the malloc, ralloc, and system breaks. */
89
abe9ff32 90/* Function to set the real break value. */
bbc60227 91static POINTER (*real_morecore) ();
dcfdbac7 92
abe9ff32 93/* The break value, as seen by malloc. */
dcfdbac7
JB
94static POINTER virtual_break_value;
95
abe9ff32
RS
96/* The address of the end of the last data in use by ralloc,
97 including relocatable blocs as well as malloc data. */
dcfdbac7
JB
98static POINTER break_value;
99
7516b7d5
RS
100/* This is the size of a page. We round memory requests to this boundary. */
101static int page_size;
102
ad3bb3d2
JB
103/* Whenever we get memory from the system, get this many extra bytes. This
104 must be a multiple of page_size. */
7516b7d5
RS
105static int extra_bytes;
106
dcfdbac7 107/* Macros for rounding. Note that rounding to any value is possible
abe9ff32 108 by changing the definition of PAGE. */
dcfdbac7 109#define PAGE (getpagesize ())
f7a009a5
RM
110#define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
111#define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
112 & ~(page_size - 1))
7516b7d5 113#define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
e429caa2
KH
114
115#define MEM_ALIGN sizeof(double)
116#define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
117 & ~(MEM_ALIGN - 1))
118\f
abe9ff32
RS
119/* Data structures of heaps and blocs. */
120
121/* The relocatable objects, or blocs, and the malloc data
122 both reside within one or more heaps.
123 Each heap contains malloc data, running from `start' to `bloc_start',
124 and relocatable objects, running from `bloc_start' to `free'.
125
126 Relocatable objects may relocate within the same heap
127 or may move into another heap; the heaps themselves may grow
128 but they never move.
129
130 We try to make just one heap and make it larger as necessary.
131 But sometimes we can't do that, because we can't get continguous
132 space to add onto the heap. When that happens, we start a new heap. */
133
e429caa2
KH
134typedef struct heap
135{
136 struct heap *next;
137 struct heap *prev;
abe9ff32 138 /* Start of memory range of this heap. */
e429caa2 139 POINTER start;
abe9ff32 140 /* End of memory range of this heap. */
e429caa2 141 POINTER end;
abe9ff32
RS
142 /* Start of relocatable data in this heap. */
143 POINTER bloc_start;
144 /* Start of unused space in this heap. */
145 POINTER free;
47f13333
RS
146 /* First bloc in this heap. */
147 struct bp *first_bloc;
148 /* Last bloc in this heap. */
149 struct bp *last_bloc;
e429caa2
KH
150} *heap_ptr;
151
152#define NIL_HEAP ((heap_ptr) 0)
153#define HEAP_PTR_SIZE (sizeof (struct heap))
154
abe9ff32
RS
155/* This is the first heap object.
156 If we need additional heap objects, each one resides at the beginning of
157 the space it covers. */
158static struct heap heap_base;
159
160/* Head and tail of the list of heaps. */
e429caa2
KH
161static heap_ptr first_heap, last_heap;
162
163/* These structures are allocated in the malloc arena.
164 The linked list is kept in order of increasing '.data' members.
165 The data blocks abut each other; if b->next is non-nil, then
166 b->data + b->size == b->next->data. */
167typedef struct bp
168{
169 struct bp *next;
170 struct bp *prev;
171 POINTER *variable;
172 POINTER data;
173 SIZE size;
174 POINTER new_data; /* tmporarily used for relocation */
47f13333
RS
175 /* Heap this bloc is in. */
176 struct heap *heap;
e429caa2
KH
177} *bloc_ptr;
178
179#define NIL_BLOC ((bloc_ptr) 0)
180#define BLOC_PTR_SIZE (sizeof (struct bp))
181
abe9ff32 182/* Head and tail of the list of relocatable blocs. */
e429caa2
KH
183static bloc_ptr first_bloc, last_bloc;
184
dcfdbac7 185\f
956ace37
JB
186/* Functions to get and return memory from the system. */
187
abe9ff32
RS
188/* Find the heap that ADDRESS falls within. */
189
190static heap_ptr
191find_heap (address)
192 POINTER address;
193{
194 heap_ptr heap;
195
196 for (heap = last_heap; heap; heap = heap->prev)
197 {
198 if (heap->start <= address && address <= heap->end)
199 return heap;
200 }
201
202 return NIL_HEAP;
203}
204
205/* Find SIZE bytes of space in a heap.
206 Try to get them at ADDRESS (which must fall within some heap's range)
207 if we can get that many within one heap.
208
e429caa2
KH
209 If enough space is not presently available in our reserve, this means
210 getting more page-aligned space from the system. If the retuned space
211 is not contiguos to the last heap, allocate a new heap, and append it
abe9ff32
RS
212
213 obtain does not try to keep track of whether space is in use
214 or not in use. It just returns the address of SIZE bytes that
215 fall within a single heap. If you call obtain twice in a row
216 with the same arguments, you typically get the same value.
217 to the heap list. It's the caller's responsibility to keep
218 track of what space is in use.
dcfdbac7 219
e429caa2
KH
220 Return the address of the space if all went well, or zero if we couldn't
221 allocate the memory. */
abe9ff32 222
e429caa2
KH
223static POINTER
224obtain (address, size)
225 POINTER address;
226 SIZE size;
dcfdbac7 227{
e429caa2
KH
228 heap_ptr heap;
229 SIZE already_available;
dcfdbac7 230
abe9ff32 231 /* Find the heap that ADDRESS falls within. */
e429caa2 232 for (heap = last_heap; heap; heap = heap->prev)
dcfdbac7 233 {
e429caa2
KH
234 if (heap->start <= address && address <= heap->end)
235 break;
236 }
dcfdbac7 237
e429caa2 238 if (! heap)
abe9ff32 239 abort ();
dcfdbac7 240
abe9ff32
RS
241 /* If we can't fit SIZE bytes in that heap,
242 try successive later heaps. */
e429caa2
KH
243 while (heap && address + size > heap->end)
244 {
245 heap = heap->next;
246 if (heap == NIL_HEAP)
247 break;
248 address = heap->bloc_start;
dcfdbac7
JB
249 }
250
abe9ff32
RS
251 /* If we can't fit them within any existing heap,
252 get more space. */
e429caa2
KH
253 if (heap == NIL_HEAP)
254 {
255 POINTER new = (*real_morecore)(0);
256 SIZE get;
98b7fe02 257
e429caa2 258 already_available = (char *)last_heap->end - (char *)address;
dcfdbac7 259
e429caa2
KH
260 if (new != last_heap->end)
261 {
abe9ff32
RS
262 /* Someone else called sbrk. Make a new heap. */
263
264 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
265 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
e429caa2
KH
266
267 if ((*real_morecore) (bloc_start - new) != new)
268 return 0;
269
270 new_heap->start = new;
271 new_heap->end = bloc_start;
272 new_heap->bloc_start = bloc_start;
abe9ff32 273 new_heap->free = bloc_start;
e429caa2
KH
274 new_heap->next = NIL_HEAP;
275 new_heap->prev = last_heap;
47f13333
RS
276 new_heap->first_bloc = NIL_BLOC;
277 new_heap->last_bloc = NIL_BLOC;
e429caa2
KH
278 last_heap->next = new_heap;
279 last_heap = new_heap;
280
281 address = bloc_start;
282 already_available = 0;
283 }
dcfdbac7 284
abe9ff32
RS
285 /* Add space to the last heap (which we may have just created).
286 Get some extra, so we can come here less often. */
287
e429caa2 288 get = size + extra_bytes - already_available;
abe9ff32 289 get = (char *) ROUNDUP ((char *)last_heap->end + get)
e429caa2 290 - (char *) last_heap->end;
dcfdbac7 291
e429caa2
KH
292 if ((*real_morecore) (get) != last_heap->end)
293 return 0;
294
295 last_heap->end += get;
296 }
297
298 return address;
299}
dcfdbac7 300
abe9ff32
RS
301/* Return unused heap space to the system
302 if there is a lot of unused space now.
303 This can make the last heap smaller;
304 it can also eliminate the last heap entirely. */
305
dcfdbac7 306static void
e429caa2 307relinquish ()
dcfdbac7 308{
e429caa2
KH
309 register heap_ptr h;
310 int excess = 0;
311
abe9ff32
RS
312 /* Add the amount of space beyond break_value
313 in all heaps which have extend beyond break_value at all. */
314
e429caa2
KH
315 for (h = last_heap; h && break_value < h->end; h = h->prev)
316 {
317 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
318 ? h->bloc_start : break_value);
319 }
320
321 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
dcfdbac7 322 {
7516b7d5
RS
323 /* Keep extra_bytes worth of empty space.
324 And don't free anything unless we can free at least extra_bytes. */
e429caa2 325 excess -= extra_bytes;
dcfdbac7 326
e429caa2
KH
327 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
328 {
47f13333
RS
329 /* This heap should have no blocs in it. */
330 if (last_heap->first_bloc != NIL_BLOC
331 || last_heap->last_bloc != NIL_BLOC)
332 abort ();
333
abe9ff32 334 /* Return the last heap, with its header, to the system. */
e429caa2
KH
335 excess = (char *)last_heap->end - (char *)last_heap->start;
336 last_heap = last_heap->prev;
337 last_heap->next = NIL_HEAP;
338 }
339 else
340 {
341 excess = (char *) last_heap->end
abe9ff32 342 - (char *) ROUNDUP ((char *)last_heap->end - excess);
e429caa2
KH
343 last_heap->end -= excess;
344 }
dcfdbac7 345
e429caa2
KH
346 if ((*real_morecore) (- excess) == 0)
347 abort ();
348 }
dcfdbac7
JB
349}
350\f
956ace37
JB
351/* The meat - allocating, freeing, and relocating blocs. */
352
956ace37 353/* Find the bloc referenced by the address in PTR. Returns a pointer
abe9ff32 354 to that block. */
dcfdbac7
JB
355
356static bloc_ptr
357find_bloc (ptr)
358 POINTER *ptr;
359{
360 register bloc_ptr p = first_bloc;
361
362 while (p != NIL_BLOC)
363 {
364 if (p->variable == ptr && p->data == *ptr)
365 return p;
366
367 p = p->next;
368 }
369
370 return p;
371}
372
373/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
98b7fe02
JB
374 Returns a pointer to the new bloc, or zero if we couldn't allocate
375 memory for the new block. */
dcfdbac7
JB
376
377static bloc_ptr
378get_bloc (size)
379 SIZE size;
380{
98b7fe02 381 register bloc_ptr new_bloc;
abe9ff32 382 register heap_ptr heap;
98b7fe02
JB
383
384 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
e429caa2 385 || ! (new_bloc->data = obtain (break_value, size)))
98b7fe02
JB
386 {
387 if (new_bloc)
388 free (new_bloc);
389
390 return 0;
391 }
dcfdbac7 392
e429caa2
KH
393 break_value = new_bloc->data + size;
394
dcfdbac7
JB
395 new_bloc->size = size;
396 new_bloc->next = NIL_BLOC;
8c7f1e35 397 new_bloc->variable = (POINTER *) NIL;
e429caa2 398 new_bloc->new_data = 0;
dcfdbac7 399
abe9ff32
RS
400 /* Record in the heap that this space is in use. */
401 heap = find_heap (new_bloc->data);
402 heap->free = break_value;
403
47f13333
RS
404 /* Maintain the correspondence between heaps and blocs. */
405 new_bloc->heap = heap;
406 heap->last_bloc = new_bloc;
407 if (heap->first_bloc == NIL_BLOC)
408 heap->first_bloc = new_bloc;
409
abe9ff32 410 /* Put this bloc on the doubly-linked list of blocs. */
dcfdbac7
JB
411 if (first_bloc)
412 {
413 new_bloc->prev = last_bloc;
414 last_bloc->next = new_bloc;
415 last_bloc = new_bloc;
416 }
417 else
418 {
419 first_bloc = last_bloc = new_bloc;
420 new_bloc->prev = NIL_BLOC;
421 }
422
423 return new_bloc;
424}
47f13333 425\f
abe9ff32
RS
426/* Calculate new locations of blocs in the list beginning with BLOC,
427 relocating it to start at ADDRESS, in heap HEAP. If enough space is
428 not presently available in our reserve, call obtain for
e429caa2
KH
429 more space.
430
abe9ff32
RS
431 Store the new location of each bloc in its new_data field.
432 Do not touch the contents of blocs or break_value. */
dcfdbac7 433
e429caa2
KH
434static int
435relocate_blocs (bloc, heap, address)
436 bloc_ptr bloc;
437 heap_ptr heap;
438 POINTER address;
439{
440 register bloc_ptr b = bloc;
ad3bb3d2 441
e429caa2
KH
442 while (b)
443 {
abe9ff32
RS
444 /* If bloc B won't fit within HEAP,
445 move to the next heap and try again. */
e429caa2
KH
446 while (heap && address + b->size > heap->end)
447 {
448 heap = heap->next;
449 if (heap == NIL_HEAP)
450 break;
451 address = heap->bloc_start;
452 }
dcfdbac7 453
abe9ff32
RS
454 /* If BLOC won't fit in any heap,
455 get enough new space to hold BLOC and all following blocs. */
e429caa2
KH
456 if (heap == NIL_HEAP)
457 {
458 register bloc_ptr tb = b;
459 register SIZE s = 0;
460
abe9ff32 461 /* Add up the size of all the following blocs. */
e429caa2
KH
462 while (tb != NIL_BLOC)
463 {
464 s += tb->size;
465 tb = tb->next;
466 }
467
abe9ff32
RS
468 /* Get that space. */
469 address = obtain (address, s);
470 if (address == 0)
e429caa2
KH
471 return 0;
472
473 heap = last_heap;
474 }
475
abe9ff32
RS
476 /* Record the new address of this bloc
477 and update where the next bloc can start. */
e429caa2
KH
478 b->new_data = address;
479 address += b->size;
480 b = b->next;
481 }
482
483 return 1;
484}
485
47f13333
RS
486/* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
487 This is necessary if we put the memory of space of BLOC
488 before that of BEFORE. */
489
490static void
491reorder_bloc (bloc, before)
492 bloc_ptr bloc, before;
493{
494 bloc_ptr prev, next;
495
496 /* Splice BLOC out from where it is. */
497 prev = bloc->prev;
498 next = bloc->next;
499
500 if (prev)
501 prev->next = next;
502 if (next)
503 next->prev = prev;
504
505 /* Splice it in before BEFORE. */
506 prev = before->prev;
abe9ff32 507
47f13333
RS
508 if (prev)
509 prev->next = bloc;
510 bloc->prev = prev;
511
512 before->prev = bloc;
513 bloc->next = before;
514}
515\f
516/* Update the records of which heaps contain which blocs, starting
517 with heap HEAP and bloc BLOC. */
518
519static void
520update_heap_bloc_correspondence (bloc, heap)
abe9ff32
RS
521 bloc_ptr bloc;
522 heap_ptr heap;
523{
524 register bloc_ptr b;
525
47f13333
RS
526 /* Initialize HEAP's status to reflect blocs before BLOC. */
527 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
528 {
529 /* The previous bloc is in HEAP. */
530 heap->last_bloc = bloc->prev;
531 heap->free = bloc->prev->data + bloc->prev->size;
532 }
533 else
534 {
535 /* HEAP contains no blocs before BLOC. */
536 heap->first_bloc = NIL_BLOC;
537 heap->last_bloc = NIL_BLOC;
538 heap->free = heap->bloc_start;
539 }
540
abe9ff32
RS
541 /* Advance through blocs one by one. */
542 for (b = bloc; b != NIL_BLOC; b = b->next)
543 {
47f13333
RS
544 /* Advance through heaps, marking them empty,
545 till we get to the one that B is in. */
abe9ff32
RS
546 while (heap)
547 {
548 if (heap->bloc_start <= b->data && b->data <= heap->end)
549 break;
550 heap = heap->next;
47f13333
RS
551 /* We know HEAP is not null now,
552 because there has to be space for bloc B. */
553 heap->first_bloc = NIL_BLOC;
554 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
555 heap->free = heap->bloc_start;
556 }
47f13333
RS
557
558 /* Update HEAP's status for bloc B. */
abe9ff32 559 heap->free = b->data + b->size;
47f13333
RS
560 heap->last_bloc = b;
561 if (heap->first_bloc == NIL_BLOC)
562 heap->first_bloc = b;
563
564 /* Record that B is in HEAP. */
565 b->heap = heap;
abe9ff32
RS
566 }
567
568 /* If there are any remaining heaps and no blocs left,
47f13333 569 mark those heaps as empty. */
abe9ff32
RS
570 heap = heap->next;
571 while (heap)
572 {
47f13333
RS
573 heap->first_bloc = NIL_BLOC;
574 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
575 heap->free = heap->bloc_start;
576 heap = heap->next;
577 }
578}
47f13333 579\f
abe9ff32
RS
580/* Resize BLOC to SIZE bytes. This relocates the blocs
581 that come after BLOC in memory. */
582
e429caa2
KH
583static int
584resize_bloc (bloc, size)
585 bloc_ptr bloc;
586 SIZE size;
dcfdbac7 587{
e429caa2
KH
588 register bloc_ptr b;
589 heap_ptr heap;
590 POINTER address;
591 SIZE old_size;
592
593 if (bloc == NIL_BLOC || size == bloc->size)
594 return 1;
595
596 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
597 {
598 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
599 break;
600 }
601
602 if (heap == NIL_HEAP)
abe9ff32 603 abort ();
e429caa2
KH
604
605 old_size = bloc->size;
606 bloc->size = size;
607
abe9ff32
RS
608 /* Note that bloc could be moved into the previous heap. */
609 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
610 : first_heap->bloc_start);
e429caa2
KH
611 while (heap)
612 {
613 if (heap->bloc_start <= address && address <= heap->end)
614 break;
615 heap = heap->prev;
616 }
617
618 if (! relocate_blocs (bloc, heap, address))
619 {
620 bloc->size = old_size;
621 return 0;
622 }
623
624 if (size > old_size)
625 {
626 for (b = last_bloc; b != bloc; b = b->prev)
627 {
628 safe_bcopy (b->data, b->new_data, b->size);
629 *b->variable = b->data = b->new_data;
630 }
631 safe_bcopy (bloc->data, bloc->new_data, old_size);
632 bzero (bloc->new_data + old_size, size - old_size);
633 *bloc->variable = bloc->data = bloc->new_data;
634 }
635 else
dcfdbac7 636 {
ad3bb3d2
JB
637 for (b = bloc; b != NIL_BLOC; b = b->next)
638 {
e429caa2
KH
639 safe_bcopy (b->data, b->new_data, b->size);
640 *b->variable = b->data = b->new_data;
ad3bb3d2 641 }
ad3bb3d2 642 }
dcfdbac7 643
47f13333 644 update_heap_bloc_correspondence (bloc, heap);
abe9ff32
RS
645
646 break_value = (last_bloc ? last_bloc->data + last_bloc->size
647 : first_heap->bloc_start);
e429caa2
KH
648 return 1;
649}
47f13333 650\f
abe9ff32
RS
651/* Free BLOC from the chain of blocs, relocating any blocs above it.
652 This may return space to the system. */
dcfdbac7
JB
653
654static void
655free_bloc (bloc)
656 bloc_ptr bloc;
657{
47f13333
RS
658 heap_ptr heap = bloc->heap;
659
e429caa2
KH
660 resize_bloc (bloc, 0);
661
dcfdbac7
JB
662 if (bloc == first_bloc && bloc == last_bloc)
663 {
664 first_bloc = last_bloc = NIL_BLOC;
665 }
666 else if (bloc == last_bloc)
667 {
668 last_bloc = bloc->prev;
669 last_bloc->next = NIL_BLOC;
670 }
671 else if (bloc == first_bloc)
672 {
673 first_bloc = bloc->next;
674 first_bloc->prev = NIL_BLOC;
dcfdbac7
JB
675 }
676 else
677 {
678 bloc->next->prev = bloc->prev;
679 bloc->prev->next = bloc->next;
dcfdbac7
JB
680 }
681
47f13333
RS
682 /* Update the records of which blocs are in HEAP. */
683 if (heap->first_bloc == bloc)
684 {
685 if (bloc->next->heap == heap)
686 heap->first_bloc = bloc->next;
687 else
688 heap->first_bloc = heap->last_bloc = NIL_BLOC;
689 }
690 if (heap->last_bloc == bloc)
691 {
692 if (bloc->prev->heap == heap)
693 heap->last_bloc = bloc->prev;
694 else
695 heap->first_bloc = heap->last_bloc = NIL_BLOC;
696 }
697
e429caa2 698 relinquish ();
dcfdbac7
JB
699 free (bloc);
700}
701\f
956ace37
JB
702/* Interface routines. */
703
dcfdbac7 704static int use_relocatable_buffers;
81bd58e8 705static int r_alloc_freeze_level;
dcfdbac7 706
98b7fe02 707/* Obtain SIZE bytes of storage from the free pool, or the system, as
2c46d29f 708 necessary. If relocatable blocs are in use, this means relocating
98b7fe02
JB
709 them. This function gets plugged into the GNU malloc's __morecore
710 hook.
711
7516b7d5
RS
712 We provide hysteresis, never relocating by less than extra_bytes.
713
98b7fe02
JB
714 If we're out of memory, we should return zero, to imitate the other
715 __morecore hook values - in particular, __default_morecore in the
716 GNU malloc package. */
dcfdbac7
JB
717
718POINTER
719r_alloc_sbrk (size)
720 long size;
721{
e429caa2
KH
722 register bloc_ptr b;
723 POINTER address;
dcfdbac7
JB
724
725 if (! use_relocatable_buffers)
bbc60227 726 return (*real_morecore) (size);
dcfdbac7 727
e429caa2
KH
728 if (size == 0)
729 return virtual_break_value;
7516b7d5 730
e429caa2 731 if (size > 0)
dcfdbac7 732 {
abe9ff32
RS
733 /* Allocate a page-aligned space. GNU malloc would reclaim an
734 extra space if we passed an unaligned one. But we could
735 not always find a space which is contiguos to the previous. */
e429caa2
KH
736 POINTER new_bloc_start;
737 heap_ptr h = first_heap;
abe9ff32 738 SIZE get = ROUNDUP (size);
7516b7d5 739
abe9ff32 740 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 741
abe9ff32
RS
742 /* Search the list upward for a heap which is large enough. */
743 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
e429caa2
KH
744 {
745 h = h->next;
746 if (h == NIL_HEAP)
747 break;
abe9ff32 748 address = (POINTER) ROUNDUP (h->start);
e429caa2
KH
749 }
750
abe9ff32 751 /* If not found, obtain more space. */
e429caa2
KH
752 if (h == NIL_HEAP)
753 {
754 get += extra_bytes + page_size;
755
abe9ff32 756 if (r_alloc_freeze_level > 0 || ! obtain (address, get))
e429caa2 757 return 0;
98b7fe02 758
e429caa2 759 if (first_heap == last_heap)
abe9ff32 760 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 761 else
abe9ff32 762 address = (POINTER) ROUNDUP (last_heap->start);
e429caa2
KH
763 h = last_heap;
764 }
765
abe9ff32 766 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
e429caa2
KH
767
768 if (first_heap->bloc_start < new_bloc_start)
769 {
abe9ff32 770 /* Move all blocs upward. */
e429caa2
KH
771 if (r_alloc_freeze_level > 0
772 || ! relocate_blocs (first_bloc, h, new_bloc_start))
773 return 0;
774
775 /* Note that (POINTER)(h+1) <= new_bloc_start since
776 get >= page_size, so the following does not destroy the heap
abe9ff32 777 header. */
e429caa2
KH
778 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
779 {
780 safe_bcopy (b->data, b->new_data, b->size);
781 *b->variable = b->data = b->new_data;
782 }
783
784 h->bloc_start = new_bloc_start;
abe9ff32 785
47f13333 786 update_heap_bloc_correspondence (first_bloc, h);
e429caa2 787 }
956ace37 788
e429caa2
KH
789 if (h != first_heap)
790 {
791 /* Give up managing heaps below the one the new
abe9ff32 792 virtual_break_value points to. */
e429caa2
KH
793 first_heap->prev = NIL_HEAP;
794 first_heap->next = h->next;
795 first_heap->start = h->start;
796 first_heap->end = h->end;
abe9ff32 797 first_heap->free = h->free;
47f13333
RS
798 first_heap->first_bloc = h->first_bloc;
799 first_heap->last_bloc = h->last_bloc;
e429caa2
KH
800 first_heap->bloc_start = h->bloc_start;
801
802 if (first_heap->next)
803 first_heap->next->prev = first_heap;
804 else
805 last_heap = first_heap;
806 }
807
808 bzero (address, size);
dcfdbac7 809 }
e429caa2 810 else /* size < 0 */
dcfdbac7 811 {
e429caa2
KH
812 SIZE excess = (char *)first_heap->bloc_start
813 - ((char *)virtual_break_value + size);
814
815 address = virtual_break_value;
816
817 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
818 {
819 excess -= extra_bytes;
820 first_heap->bloc_start
47f13333 821 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
e429caa2 822
abe9ff32 823 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
7516b7d5 824
e429caa2
KH
825 for (b = first_bloc; b != NIL_BLOC; b = b->next)
826 {
827 safe_bcopy (b->data, b->new_data, b->size);
828 *b->variable = b->data = b->new_data;
829 }
830 }
831
832 if ((char *)virtual_break_value + size < (char *)first_heap->start)
833 {
834 /* We found an additional space below the first heap */
835 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
836 }
dcfdbac7
JB
837 }
838
e429caa2 839 virtual_break_value = (POINTER) ((char *)address + size);
47f13333
RS
840 break_value = (last_bloc
841 ? last_bloc->data + last_bloc->size
842 : first_heap->bloc_start);
e429caa2 843 if (size < 0)
abe9ff32 844 relinquish ();
7516b7d5 845
e429caa2 846 return address;
dcfdbac7
JB
847}
848
849/* Allocate a relocatable bloc of storage of size SIZE. A pointer to
850 the data is returned in *PTR. PTR is thus the address of some variable
98b7fe02
JB
851 which will use the data area.
852
853 If we can't allocate the necessary memory, set *PTR to zero, and
854 return zero. */
dcfdbac7
JB
855
856POINTER
857r_alloc (ptr, size)
858 POINTER *ptr;
859 SIZE size;
860{
861 register bloc_ptr new_bloc;
862
2c46d29f
RS
863 if (! r_alloc_initialized)
864 r_alloc_init ();
865
abe9ff32 866 new_bloc = get_bloc (MEM_ROUNDUP (size));
98b7fe02
JB
867 if (new_bloc)
868 {
869 new_bloc->variable = ptr;
870 *ptr = new_bloc->data;
871 }
872 else
873 *ptr = 0;
dcfdbac7
JB
874
875 return *ptr;
876}
877
2c46d29f
RS
878/* Free a bloc of relocatable storage whose data is pointed to by PTR.
879 Store 0 in *PTR to show there's no block allocated. */
dcfdbac7
JB
880
881void
882r_alloc_free (ptr)
883 register POINTER *ptr;
884{
885 register bloc_ptr dead_bloc;
886
dcfdbac7
JB
887 dead_bloc = find_bloc (ptr);
888 if (dead_bloc == NIL_BLOC)
889 abort ();
890
891 free_bloc (dead_bloc);
2c46d29f 892 *ptr = 0;
dcfdbac7
JB
893}
894
16a5c729 895/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
98b7fe02
JB
896 Do this by shifting all blocks above this one up in memory, unless
897 SIZE is less than or equal to the current bloc size, in which case
898 do nothing.
dcfdbac7 899
98b7fe02
JB
900 Change *PTR to reflect the new bloc, and return this value.
901
902 If more memory cannot be allocated, then leave *PTR unchanged, and
903 return zero. */
dcfdbac7
JB
904
905POINTER
906r_re_alloc (ptr, size)
907 POINTER *ptr;
908 SIZE size;
909{
16a5c729 910 register bloc_ptr bloc;
dcfdbac7 911
16a5c729
JB
912 bloc = find_bloc (ptr);
913 if (bloc == NIL_BLOC)
dcfdbac7
JB
914 abort ();
915
16a5c729 916 if (size <= bloc->size)
956ace37 917 /* Wouldn't it be useful to actually resize the bloc here? */
dcfdbac7
JB
918 return *ptr;
919
abe9ff32 920 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
98b7fe02
JB
921 return 0;
922
dcfdbac7
JB
923 return *ptr;
924}
81bd58e8
KH
925
926/* Disable relocations, after making room for at least SIZE bytes
927 of non-relocatable heap if possible. The relocatable blocs are
928 guaranteed to hold still until thawed, even if this means that
929 malloc must return a null pointer. */
abe9ff32 930
81bd58e8
KH
931void
932r_alloc_freeze (size)
933 long size;
934{
935 /* If already frozen, we can't make any more room, so don't try. */
936 if (r_alloc_freeze_level > 0)
937 size = 0;
938 /* If we can't get the amount requested, half is better than nothing. */
939 while (size > 0 && r_alloc_sbrk (size) == 0)
940 size /= 2;
941 ++r_alloc_freeze_level;
942 if (size > 0)
943 r_alloc_sbrk (-size);
944}
945
946void
947r_alloc_thaw ()
948{
949 if (--r_alloc_freeze_level < 0)
950 abort ();
951}
dcfdbac7
JB
952\f
953/* The hook `malloc' uses for the function which gets more space
954 from the system. */
955extern POINTER (*__morecore) ();
956
abe9ff32 957/* Initialize various things for memory allocation. */
dcfdbac7 958
2c46d29f
RS
959static void
960r_alloc_init ()
dcfdbac7 961{
e429caa2
KH
962 POINTER end;
963
2c46d29f 964 if (r_alloc_initialized)
dcfdbac7
JB
965 return;
966
2c46d29f 967 r_alloc_initialized = 1;
bbc60227 968 real_morecore = __morecore;
dcfdbac7 969 __morecore = r_alloc_sbrk;
8c7f1e35 970
e429caa2
KH
971 first_heap = last_heap = &heap_base;
972 first_heap->next = first_heap->prev = NIL_HEAP;
973 first_heap->start = first_heap->bloc_start
974 = virtual_break_value = break_value = (*real_morecore) (0);
aef4d570 975 if (break_value == NIL)
2c46d29f 976 abort ();
8c7f1e35 977
7516b7d5
RS
978 page_size = PAGE;
979 extra_bytes = ROUNDUP (50000);
980
e429caa2 981 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
0e93a7cf
RS
982
983 /* The extra call to real_morecore guarantees that the end of the
984 address space is a multiple of page_size, even if page_size is
985 not really the page size of the system running the binary in
986 which page_size is stored. This allows a binary to be built on a
987 system with one page size and run on a system with a smaller page
abe9ff32 988 size. */
e429caa2 989 (*real_morecore) (first_heap->end - first_heap->start);
0e93a7cf 990
2c46d29f
RS
991 /* Clear the rest of the last page; this memory is in our address space
992 even though it is after the sbrk value. */
0e93a7cf
RS
993 /* Doubly true, with the additional call that explicitly adds the
994 rest of that page to the address space. */
e429caa2
KH
995 bzero (first_heap->start, first_heap->end - first_heap->start);
996 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
dcfdbac7 997 use_relocatable_buffers = 1;
2c46d29f 998}
e429caa2
KH
999#ifdef DEBUG
1000#include <assert.h>
1001
1002int
1003r_alloc_check ()
1004{
1005 int found = 0;
1006 heap_ptr h, ph = 0;
1007 bloc_ptr b, pb = 0;
1008
1009 if (!r_alloc_initialized)
1010 return;
1011
abe9ff32
RS
1012 assert (first_heap);
1013 assert (last_heap->end <= (POINTER) sbrk (0));
1014 assert ((POINTER) first_heap < first_heap->start);
1015 assert (first_heap->start <= virtual_break_value);
1016 assert (virtual_break_value <= first_heap->end);
e429caa2
KH
1017
1018 for (h = first_heap; h; h = h->next)
1019 {
abe9ff32
RS
1020 assert (h->prev == ph);
1021 assert ((POINTER) ROUNDUP (h->end) == h->end);
1022 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1023 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1024 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
e429caa2
KH
1025
1026 if (ph)
1027 {
1028 assert (ph->end < h->start);
1029 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1030 }
1031
1032 if (h->bloc_start <= break_value && break_value <= h->end)
1033 found = 1;
1034
1035 ph = h;
1036 }
1037
abe9ff32
RS
1038 assert (found);
1039 assert (last_heap == ph);
e429caa2
KH
1040
1041 for (b = first_bloc; b; b = b->next)
1042 {
abe9ff32
RS
1043 assert (b->prev == pb);
1044 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1045 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
e429caa2
KH
1046
1047 ph = 0;
1048 for (h = first_heap; h; h = h->next)
1049 {
1050 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1051 break;
1052 ph = h;
1053 }
1054
abe9ff32 1055 assert (h);
e429caa2
KH
1056
1057 if (pb && pb->data + pb->size != b->data)
1058 {
abe9ff32 1059 assert (ph && b->data == h->bloc_start);
e429caa2
KH
1060 while (ph)
1061 {
1062 if (ph->bloc_start <= pb->data
1063 && pb->data + pb->size <= ph->end)
1064 {
abe9ff32 1065 assert (pb->data + pb->size + b->size > ph->end);
e429caa2
KH
1066 break;
1067 }
1068 else
1069 {
abe9ff32 1070 assert (ph->bloc_start + b->size > ph->end);
e429caa2
KH
1071 }
1072 ph = ph->prev;
1073 }
1074 }
1075 pb = b;
1076 }
1077
abe9ff32 1078 assert (last_bloc == pb);
e429caa2
KH
1079
1080 if (last_bloc)
abe9ff32 1081 assert (last_bloc->data + last_bloc->size == break_value);
e429caa2 1082 else
abe9ff32 1083 assert (first_heap->bloc_start == break_value);
e429caa2
KH
1084}
1085#endif /* DEBUG */