(input_poll_signal): Add ignored argument.
[bpt/emacs.git] / src / ralloc.c
CommitLineData
dcfdbac7 1/* Block-relocating memory allocator.
187996a8 2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
dcfdbac7
JB
3
4This file is part of GNU Emacs.
5
6GNU Emacs is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
187996a8 8the Free Software Foundation; either version 2, or (at your option)
dcfdbac7
JB
9any later version.
10
11GNU Emacs is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU Emacs; see the file COPYING. If not, write to
18the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
19
20/* NOTES:
21
eb8c3be9 22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
dcfdbac7 23 rather than all of them. This means allowing for a possible
abe9ff32 24 hole between the first bloc and the end of malloc storage. */
dcfdbac7 25
2c46d29f 26#ifdef emacs
aef4d570 27
18160b98 28#include <config.h>
956ace37 29#include "lisp.h" /* Needed for VALBITS. */
2c46d29f 30
aef4d570
RM
31#undef NULL
32
f275fd9a
RS
33/* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
a8c0e5ea 36#if 0 /* Arithmetic on void* is a GCC extension. */
f275fd9a
RS
37#ifdef __STDC__
38typedef void *POINTER;
39#else
1df181b6
RM
40
41#ifdef HAVE_CONFIG_H
42#include "config.h"
43#endif
44
f275fd9a 45typedef char *POINTER;
1df181b6 46
f275fd9a 47#endif
a8c0e5ea
RS
48#endif /* 0 */
49
50/* Unconditionally use char * for this. */
51typedef char *POINTER;
f275fd9a
RS
52
53typedef unsigned long SIZE;
54
2c46d29f
RS
55/* Declared in dispnew.c, this version doesn't screw up if regions
56 overlap. */
57extern void safe_bcopy ();
2c46d29f 58
aef4d570
RM
59#include "getpagesize.h"
60
61#else /* Not emacs. */
62
2c46d29f 63#include <stddef.h>
aef4d570 64
2c46d29f
RS
65typedef size_t SIZE;
66typedef void *POINTER;
aef4d570 67
aef4d570
RM
68#include <unistd.h>
69#include <malloc.h>
70#include <string.h>
71
2c46d29f 72#define safe_bcopy(x, y, z) memmove (y, x, z)
2c46d29f 73
aef4d570 74#endif /* emacs. */
dcfdbac7
JB
75
76#define NIL ((POINTER) 0)
77
2c46d29f
RS
78/* A flag to indicate whether we have initialized ralloc yet. For
79 Emacs's sake, please do not make this local to malloc_init; on some
80 machines, the dumping procedure makes all static variables
81 read-only. On these machines, the word static is #defined to be
82 the empty string, meaning that r_alloc_initialized becomes an
83 automatic variable, and loses its value each time Emacs is started up. */
84static int r_alloc_initialized = 0;
85
86static void r_alloc_init ();
dcfdbac7 87\f
956ace37
JB
88/* Declarations for working with the malloc, ralloc, and system breaks. */
89
abe9ff32 90/* Function to set the real break value. */
bbc60227 91static POINTER (*real_morecore) ();
dcfdbac7 92
abe9ff32 93/* The break value, as seen by malloc. */
dcfdbac7
JB
94static POINTER virtual_break_value;
95
abe9ff32
RS
96/* The address of the end of the last data in use by ralloc,
97 including relocatable blocs as well as malloc data. */
dcfdbac7
JB
98static POINTER break_value;
99
7516b7d5
RS
100/* This is the size of a page. We round memory requests to this boundary. */
101static int page_size;
102
ad3bb3d2
JB
103/* Whenever we get memory from the system, get this many extra bytes. This
104 must be a multiple of page_size. */
7516b7d5
RS
105static int extra_bytes;
106
dcfdbac7 107/* Macros for rounding. Note that rounding to any value is possible
abe9ff32 108 by changing the definition of PAGE. */
dcfdbac7 109#define PAGE (getpagesize ())
f7a009a5
RM
110#define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
111#define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
112 & ~(page_size - 1))
7516b7d5 113#define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
e429caa2
KH
114
115#define MEM_ALIGN sizeof(double)
116#define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
117 & ~(MEM_ALIGN - 1))
118\f
abe9ff32
RS
119/* Data structures of heaps and blocs. */
120
121/* The relocatable objects, or blocs, and the malloc data
122 both reside within one or more heaps.
123 Each heap contains malloc data, running from `start' to `bloc_start',
124 and relocatable objects, running from `bloc_start' to `free'.
125
126 Relocatable objects may relocate within the same heap
127 or may move into another heap; the heaps themselves may grow
128 but they never move.
129
130 We try to make just one heap and make it larger as necessary.
131 But sometimes we can't do that, because we can't get continguous
132 space to add onto the heap. When that happens, we start a new heap. */
133
e429caa2
KH
134typedef struct heap
135{
136 struct heap *next;
137 struct heap *prev;
abe9ff32 138 /* Start of memory range of this heap. */
e429caa2 139 POINTER start;
abe9ff32 140 /* End of memory range of this heap. */
e429caa2 141 POINTER end;
abe9ff32
RS
142 /* Start of relocatable data in this heap. */
143 POINTER bloc_start;
144 /* Start of unused space in this heap. */
145 POINTER free;
47f13333
RS
146 /* First bloc in this heap. */
147 struct bp *first_bloc;
148 /* Last bloc in this heap. */
149 struct bp *last_bloc;
e429caa2
KH
150} *heap_ptr;
151
152#define NIL_HEAP ((heap_ptr) 0)
153#define HEAP_PTR_SIZE (sizeof (struct heap))
154
abe9ff32
RS
155/* This is the first heap object.
156 If we need additional heap objects, each one resides at the beginning of
157 the space it covers. */
158static struct heap heap_base;
159
160/* Head and tail of the list of heaps. */
e429caa2
KH
161static heap_ptr first_heap, last_heap;
162
163/* These structures are allocated in the malloc arena.
164 The linked list is kept in order of increasing '.data' members.
165 The data blocks abut each other; if b->next is non-nil, then
166 b->data + b->size == b->next->data. */
167typedef struct bp
168{
169 struct bp *next;
170 struct bp *prev;
171 POINTER *variable;
172 POINTER data;
173 SIZE size;
174 POINTER new_data; /* tmporarily used for relocation */
47f13333
RS
175 /* Heap this bloc is in. */
176 struct heap *heap;
e429caa2
KH
177} *bloc_ptr;
178
179#define NIL_BLOC ((bloc_ptr) 0)
180#define BLOC_PTR_SIZE (sizeof (struct bp))
181
abe9ff32 182/* Head and tail of the list of relocatable blocs. */
e429caa2
KH
183static bloc_ptr first_bloc, last_bloc;
184
dcfdbac7 185\f
956ace37
JB
186/* Functions to get and return memory from the system. */
187
abe9ff32
RS
188/* Find the heap that ADDRESS falls within. */
189
190static heap_ptr
191find_heap (address)
192 POINTER address;
193{
194 heap_ptr heap;
195
196 for (heap = last_heap; heap; heap = heap->prev)
197 {
198 if (heap->start <= address && address <= heap->end)
199 return heap;
200 }
201
202 return NIL_HEAP;
203}
204
205/* Find SIZE bytes of space in a heap.
206 Try to get them at ADDRESS (which must fall within some heap's range)
207 if we can get that many within one heap.
208
e429caa2
KH
209 If enough space is not presently available in our reserve, this means
210 getting more page-aligned space from the system. If the retuned space
211 is not contiguos to the last heap, allocate a new heap, and append it
abe9ff32
RS
212
213 obtain does not try to keep track of whether space is in use
214 or not in use. It just returns the address of SIZE bytes that
215 fall within a single heap. If you call obtain twice in a row
216 with the same arguments, you typically get the same value.
217 to the heap list. It's the caller's responsibility to keep
218 track of what space is in use.
dcfdbac7 219
e429caa2
KH
220 Return the address of the space if all went well, or zero if we couldn't
221 allocate the memory. */
abe9ff32 222
e429caa2
KH
223static POINTER
224obtain (address, size)
225 POINTER address;
226 SIZE size;
dcfdbac7 227{
e429caa2
KH
228 heap_ptr heap;
229 SIZE already_available;
dcfdbac7 230
abe9ff32 231 /* Find the heap that ADDRESS falls within. */
e429caa2 232 for (heap = last_heap; heap; heap = heap->prev)
dcfdbac7 233 {
e429caa2
KH
234 if (heap->start <= address && address <= heap->end)
235 break;
236 }
dcfdbac7 237
e429caa2 238 if (! heap)
abe9ff32 239 abort ();
dcfdbac7 240
abe9ff32
RS
241 /* If we can't fit SIZE bytes in that heap,
242 try successive later heaps. */
e429caa2
KH
243 while (heap && address + size > heap->end)
244 {
245 heap = heap->next;
246 if (heap == NIL_HEAP)
247 break;
248 address = heap->bloc_start;
dcfdbac7
JB
249 }
250
abe9ff32
RS
251 /* If we can't fit them within any existing heap,
252 get more space. */
e429caa2
KH
253 if (heap == NIL_HEAP)
254 {
255 POINTER new = (*real_morecore)(0);
256 SIZE get;
98b7fe02 257
e429caa2 258 already_available = (char *)last_heap->end - (char *)address;
dcfdbac7 259
e429caa2
KH
260 if (new != last_heap->end)
261 {
abe9ff32
RS
262 /* Someone else called sbrk. Make a new heap. */
263
264 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
265 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
e429caa2
KH
266
267 if ((*real_morecore) (bloc_start - new) != new)
268 return 0;
269
270 new_heap->start = new;
271 new_heap->end = bloc_start;
272 new_heap->bloc_start = bloc_start;
abe9ff32 273 new_heap->free = bloc_start;
e429caa2
KH
274 new_heap->next = NIL_HEAP;
275 new_heap->prev = last_heap;
47f13333
RS
276 new_heap->first_bloc = NIL_BLOC;
277 new_heap->last_bloc = NIL_BLOC;
e429caa2
KH
278 last_heap->next = new_heap;
279 last_heap = new_heap;
280
281 address = bloc_start;
282 already_available = 0;
283 }
dcfdbac7 284
abe9ff32
RS
285 /* Add space to the last heap (which we may have just created).
286 Get some extra, so we can come here less often. */
287
e429caa2 288 get = size + extra_bytes - already_available;
abe9ff32 289 get = (char *) ROUNDUP ((char *)last_heap->end + get)
e429caa2 290 - (char *) last_heap->end;
dcfdbac7 291
e429caa2
KH
292 if ((*real_morecore) (get) != last_heap->end)
293 return 0;
294
295 last_heap->end += get;
296 }
297
298 return address;
299}
dcfdbac7 300
abe9ff32
RS
301/* Return unused heap space to the system
302 if there is a lot of unused space now.
303 This can make the last heap smaller;
304 it can also eliminate the last heap entirely. */
305
dcfdbac7 306static void
e429caa2 307relinquish ()
dcfdbac7 308{
e429caa2
KH
309 register heap_ptr h;
310 int excess = 0;
311
abe9ff32
RS
312 /* Add the amount of space beyond break_value
313 in all heaps which have extend beyond break_value at all. */
314
e429caa2
KH
315 for (h = last_heap; h && break_value < h->end; h = h->prev)
316 {
317 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
318 ? h->bloc_start : break_value);
319 }
320
321 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
dcfdbac7 322 {
7516b7d5
RS
323 /* Keep extra_bytes worth of empty space.
324 And don't free anything unless we can free at least extra_bytes. */
e429caa2 325 excess -= extra_bytes;
dcfdbac7 326
e429caa2
KH
327 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
328 {
47f13333
RS
329 /* This heap should have no blocs in it. */
330 if (last_heap->first_bloc != NIL_BLOC
331 || last_heap->last_bloc != NIL_BLOC)
332 abort ();
333
abe9ff32 334 /* Return the last heap, with its header, to the system. */
e429caa2
KH
335 excess = (char *)last_heap->end - (char *)last_heap->start;
336 last_heap = last_heap->prev;
337 last_heap->next = NIL_HEAP;
338 }
339 else
340 {
341 excess = (char *) last_heap->end
abe9ff32 342 - (char *) ROUNDUP ((char *)last_heap->end - excess);
e429caa2
KH
343 last_heap->end -= excess;
344 }
dcfdbac7 345
e429caa2
KH
346 if ((*real_morecore) (- excess) == 0)
347 abort ();
348 }
dcfdbac7 349}
719b242f
RS
350
351/* Return the total size in use by relocating allocator,
352 above where malloc gets space. */
353
354long
355r_alloc_size_in_use ()
356{
357 return break_value - virtual_break_value;
358}
dcfdbac7 359\f
956ace37
JB
360/* The meat - allocating, freeing, and relocating blocs. */
361
956ace37 362/* Find the bloc referenced by the address in PTR. Returns a pointer
abe9ff32 363 to that block. */
dcfdbac7
JB
364
365static bloc_ptr
366find_bloc (ptr)
367 POINTER *ptr;
368{
369 register bloc_ptr p = first_bloc;
370
371 while (p != NIL_BLOC)
372 {
373 if (p->variable == ptr && p->data == *ptr)
374 return p;
375
376 p = p->next;
377 }
378
379 return p;
380}
381
382/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
98b7fe02
JB
383 Returns a pointer to the new bloc, or zero if we couldn't allocate
384 memory for the new block. */
dcfdbac7
JB
385
386static bloc_ptr
387get_bloc (size)
388 SIZE size;
389{
98b7fe02 390 register bloc_ptr new_bloc;
abe9ff32 391 register heap_ptr heap;
98b7fe02
JB
392
393 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
e429caa2 394 || ! (new_bloc->data = obtain (break_value, size)))
98b7fe02
JB
395 {
396 if (new_bloc)
397 free (new_bloc);
398
399 return 0;
400 }
dcfdbac7 401
e429caa2
KH
402 break_value = new_bloc->data + size;
403
dcfdbac7
JB
404 new_bloc->size = size;
405 new_bloc->next = NIL_BLOC;
8c7f1e35 406 new_bloc->variable = (POINTER *) NIL;
e429caa2 407 new_bloc->new_data = 0;
dcfdbac7 408
abe9ff32
RS
409 /* Record in the heap that this space is in use. */
410 heap = find_heap (new_bloc->data);
411 heap->free = break_value;
412
47f13333
RS
413 /* Maintain the correspondence between heaps and blocs. */
414 new_bloc->heap = heap;
415 heap->last_bloc = new_bloc;
416 if (heap->first_bloc == NIL_BLOC)
417 heap->first_bloc = new_bloc;
418
abe9ff32 419 /* Put this bloc on the doubly-linked list of blocs. */
dcfdbac7
JB
420 if (first_bloc)
421 {
422 new_bloc->prev = last_bloc;
423 last_bloc->next = new_bloc;
424 last_bloc = new_bloc;
425 }
426 else
427 {
428 first_bloc = last_bloc = new_bloc;
429 new_bloc->prev = NIL_BLOC;
430 }
431
432 return new_bloc;
433}
47f13333 434\f
abe9ff32
RS
435/* Calculate new locations of blocs in the list beginning with BLOC,
436 relocating it to start at ADDRESS, in heap HEAP. If enough space is
437 not presently available in our reserve, call obtain for
e429caa2
KH
438 more space.
439
abe9ff32
RS
440 Store the new location of each bloc in its new_data field.
441 Do not touch the contents of blocs or break_value. */
dcfdbac7 442
e429caa2
KH
443static int
444relocate_blocs (bloc, heap, address)
445 bloc_ptr bloc;
446 heap_ptr heap;
447 POINTER address;
448{
449 register bloc_ptr b = bloc;
ad3bb3d2 450
e429caa2
KH
451 while (b)
452 {
abe9ff32
RS
453 /* If bloc B won't fit within HEAP,
454 move to the next heap and try again. */
e429caa2
KH
455 while (heap && address + b->size > heap->end)
456 {
457 heap = heap->next;
458 if (heap == NIL_HEAP)
459 break;
460 address = heap->bloc_start;
461 }
dcfdbac7 462
abe9ff32
RS
463 /* If BLOC won't fit in any heap,
464 get enough new space to hold BLOC and all following blocs. */
e429caa2
KH
465 if (heap == NIL_HEAP)
466 {
467 register bloc_ptr tb = b;
468 register SIZE s = 0;
469
abe9ff32 470 /* Add up the size of all the following blocs. */
e429caa2
KH
471 while (tb != NIL_BLOC)
472 {
473 s += tb->size;
474 tb = tb->next;
475 }
476
abe9ff32
RS
477 /* Get that space. */
478 address = obtain (address, s);
479 if (address == 0)
e429caa2
KH
480 return 0;
481
482 heap = last_heap;
483 }
484
abe9ff32
RS
485 /* Record the new address of this bloc
486 and update where the next bloc can start. */
e429caa2
KH
487 b->new_data = address;
488 address += b->size;
489 b = b->next;
490 }
491
492 return 1;
493}
494
47f13333
RS
495/* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
496 This is necessary if we put the memory of space of BLOC
497 before that of BEFORE. */
498
499static void
500reorder_bloc (bloc, before)
501 bloc_ptr bloc, before;
502{
503 bloc_ptr prev, next;
504
505 /* Splice BLOC out from where it is. */
506 prev = bloc->prev;
507 next = bloc->next;
508
509 if (prev)
510 prev->next = next;
511 if (next)
512 next->prev = prev;
513
514 /* Splice it in before BEFORE. */
515 prev = before->prev;
abe9ff32 516
47f13333
RS
517 if (prev)
518 prev->next = bloc;
519 bloc->prev = prev;
520
521 before->prev = bloc;
522 bloc->next = before;
523}
524\f
525/* Update the records of which heaps contain which blocs, starting
526 with heap HEAP and bloc BLOC. */
527
528static void
529update_heap_bloc_correspondence (bloc, heap)
abe9ff32
RS
530 bloc_ptr bloc;
531 heap_ptr heap;
532{
533 register bloc_ptr b;
534
47f13333
RS
535 /* Initialize HEAP's status to reflect blocs before BLOC. */
536 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
537 {
538 /* The previous bloc is in HEAP. */
539 heap->last_bloc = bloc->prev;
540 heap->free = bloc->prev->data + bloc->prev->size;
541 }
542 else
543 {
544 /* HEAP contains no blocs before BLOC. */
545 heap->first_bloc = NIL_BLOC;
546 heap->last_bloc = NIL_BLOC;
547 heap->free = heap->bloc_start;
548 }
549
abe9ff32
RS
550 /* Advance through blocs one by one. */
551 for (b = bloc; b != NIL_BLOC; b = b->next)
552 {
47f13333
RS
553 /* Advance through heaps, marking them empty,
554 till we get to the one that B is in. */
abe9ff32
RS
555 while (heap)
556 {
557 if (heap->bloc_start <= b->data && b->data <= heap->end)
558 break;
559 heap = heap->next;
47f13333
RS
560 /* We know HEAP is not null now,
561 because there has to be space for bloc B. */
562 heap->first_bloc = NIL_BLOC;
563 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
564 heap->free = heap->bloc_start;
565 }
47f13333
RS
566
567 /* Update HEAP's status for bloc B. */
abe9ff32 568 heap->free = b->data + b->size;
47f13333
RS
569 heap->last_bloc = b;
570 if (heap->first_bloc == NIL_BLOC)
571 heap->first_bloc = b;
572
573 /* Record that B is in HEAP. */
574 b->heap = heap;
abe9ff32
RS
575 }
576
577 /* If there are any remaining heaps and no blocs left,
47f13333 578 mark those heaps as empty. */
abe9ff32
RS
579 heap = heap->next;
580 while (heap)
581 {
47f13333
RS
582 heap->first_bloc = NIL_BLOC;
583 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
584 heap->free = heap->bloc_start;
585 heap = heap->next;
586 }
587}
47f13333 588\f
abe9ff32
RS
589/* Resize BLOC to SIZE bytes. This relocates the blocs
590 that come after BLOC in memory. */
591
e429caa2
KH
592static int
593resize_bloc (bloc, size)
594 bloc_ptr bloc;
595 SIZE size;
dcfdbac7 596{
e429caa2
KH
597 register bloc_ptr b;
598 heap_ptr heap;
599 POINTER address;
600 SIZE old_size;
601
602 if (bloc == NIL_BLOC || size == bloc->size)
603 return 1;
604
605 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
606 {
607 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
608 break;
609 }
610
611 if (heap == NIL_HEAP)
abe9ff32 612 abort ();
e429caa2
KH
613
614 old_size = bloc->size;
615 bloc->size = size;
616
abe9ff32
RS
617 /* Note that bloc could be moved into the previous heap. */
618 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
619 : first_heap->bloc_start);
e429caa2
KH
620 while (heap)
621 {
622 if (heap->bloc_start <= address && address <= heap->end)
623 break;
624 heap = heap->prev;
625 }
626
627 if (! relocate_blocs (bloc, heap, address))
628 {
629 bloc->size = old_size;
630 return 0;
631 }
632
633 if (size > old_size)
634 {
635 for (b = last_bloc; b != bloc; b = b->prev)
636 {
637 safe_bcopy (b->data, b->new_data, b->size);
638 *b->variable = b->data = b->new_data;
639 }
640 safe_bcopy (bloc->data, bloc->new_data, old_size);
641 bzero (bloc->new_data + old_size, size - old_size);
642 *bloc->variable = bloc->data = bloc->new_data;
643 }
644 else
dcfdbac7 645 {
ad3bb3d2
JB
646 for (b = bloc; b != NIL_BLOC; b = b->next)
647 {
e429caa2
KH
648 safe_bcopy (b->data, b->new_data, b->size);
649 *b->variable = b->data = b->new_data;
ad3bb3d2 650 }
ad3bb3d2 651 }
dcfdbac7 652
47f13333 653 update_heap_bloc_correspondence (bloc, heap);
abe9ff32
RS
654
655 break_value = (last_bloc ? last_bloc->data + last_bloc->size
656 : first_heap->bloc_start);
e429caa2
KH
657 return 1;
658}
47f13333 659\f
abe9ff32
RS
660/* Free BLOC from the chain of blocs, relocating any blocs above it.
661 This may return space to the system. */
dcfdbac7
JB
662
663static void
664free_bloc (bloc)
665 bloc_ptr bloc;
666{
47f13333
RS
667 heap_ptr heap = bloc->heap;
668
e429caa2
KH
669 resize_bloc (bloc, 0);
670
dcfdbac7
JB
671 if (bloc == first_bloc && bloc == last_bloc)
672 {
673 first_bloc = last_bloc = NIL_BLOC;
674 }
675 else if (bloc == last_bloc)
676 {
677 last_bloc = bloc->prev;
678 last_bloc->next = NIL_BLOC;
679 }
680 else if (bloc == first_bloc)
681 {
682 first_bloc = bloc->next;
683 first_bloc->prev = NIL_BLOC;
dcfdbac7
JB
684 }
685 else
686 {
687 bloc->next->prev = bloc->prev;
688 bloc->prev->next = bloc->next;
dcfdbac7
JB
689 }
690
47f13333
RS
691 /* Update the records of which blocs are in HEAP. */
692 if (heap->first_bloc == bloc)
693 {
694 if (bloc->next->heap == heap)
695 heap->first_bloc = bloc->next;
696 else
697 heap->first_bloc = heap->last_bloc = NIL_BLOC;
698 }
699 if (heap->last_bloc == bloc)
700 {
701 if (bloc->prev->heap == heap)
702 heap->last_bloc = bloc->prev;
703 else
704 heap->first_bloc = heap->last_bloc = NIL_BLOC;
705 }
706
e429caa2 707 relinquish ();
dcfdbac7
JB
708 free (bloc);
709}
710\f
956ace37
JB
711/* Interface routines. */
712
dcfdbac7 713static int use_relocatable_buffers;
81bd58e8 714static int r_alloc_freeze_level;
dcfdbac7 715
98b7fe02 716/* Obtain SIZE bytes of storage from the free pool, or the system, as
2c46d29f 717 necessary. If relocatable blocs are in use, this means relocating
98b7fe02
JB
718 them. This function gets plugged into the GNU malloc's __morecore
719 hook.
720
7516b7d5
RS
721 We provide hysteresis, never relocating by less than extra_bytes.
722
98b7fe02
JB
723 If we're out of memory, we should return zero, to imitate the other
724 __morecore hook values - in particular, __default_morecore in the
725 GNU malloc package. */
dcfdbac7
JB
726
727POINTER
728r_alloc_sbrk (size)
729 long size;
730{
e429caa2
KH
731 register bloc_ptr b;
732 POINTER address;
dcfdbac7
JB
733
734 if (! use_relocatable_buffers)
bbc60227 735 return (*real_morecore) (size);
dcfdbac7 736
e429caa2
KH
737 if (size == 0)
738 return virtual_break_value;
7516b7d5 739
e429caa2 740 if (size > 0)
dcfdbac7 741 {
abe9ff32
RS
742 /* Allocate a page-aligned space. GNU malloc would reclaim an
743 extra space if we passed an unaligned one. But we could
744 not always find a space which is contiguos to the previous. */
e429caa2
KH
745 POINTER new_bloc_start;
746 heap_ptr h = first_heap;
abe9ff32 747 SIZE get = ROUNDUP (size);
7516b7d5 748
abe9ff32 749 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 750
abe9ff32
RS
751 /* Search the list upward for a heap which is large enough. */
752 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
e429caa2
KH
753 {
754 h = h->next;
755 if (h == NIL_HEAP)
756 break;
abe9ff32 757 address = (POINTER) ROUNDUP (h->start);
e429caa2
KH
758 }
759
abe9ff32 760 /* If not found, obtain more space. */
e429caa2
KH
761 if (h == NIL_HEAP)
762 {
763 get += extra_bytes + page_size;
764
abe9ff32 765 if (r_alloc_freeze_level > 0 || ! obtain (address, get))
e429caa2 766 return 0;
98b7fe02 767
e429caa2 768 if (first_heap == last_heap)
abe9ff32 769 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 770 else
abe9ff32 771 address = (POINTER) ROUNDUP (last_heap->start);
e429caa2
KH
772 h = last_heap;
773 }
774
abe9ff32 775 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
e429caa2
KH
776
777 if (first_heap->bloc_start < new_bloc_start)
778 {
abe9ff32 779 /* Move all blocs upward. */
e429caa2
KH
780 if (r_alloc_freeze_level > 0
781 || ! relocate_blocs (first_bloc, h, new_bloc_start))
782 return 0;
783
784 /* Note that (POINTER)(h+1) <= new_bloc_start since
785 get >= page_size, so the following does not destroy the heap
abe9ff32 786 header. */
e429caa2
KH
787 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
788 {
789 safe_bcopy (b->data, b->new_data, b->size);
790 *b->variable = b->data = b->new_data;
791 }
792
793 h->bloc_start = new_bloc_start;
abe9ff32 794
47f13333 795 update_heap_bloc_correspondence (first_bloc, h);
e429caa2 796 }
956ace37 797
e429caa2
KH
798 if (h != first_heap)
799 {
800 /* Give up managing heaps below the one the new
abe9ff32 801 virtual_break_value points to. */
e429caa2
KH
802 first_heap->prev = NIL_HEAP;
803 first_heap->next = h->next;
804 first_heap->start = h->start;
805 first_heap->end = h->end;
abe9ff32 806 first_heap->free = h->free;
47f13333
RS
807 first_heap->first_bloc = h->first_bloc;
808 first_heap->last_bloc = h->last_bloc;
e429caa2
KH
809 first_heap->bloc_start = h->bloc_start;
810
811 if (first_heap->next)
812 first_heap->next->prev = first_heap;
813 else
814 last_heap = first_heap;
815 }
816
817 bzero (address, size);
dcfdbac7 818 }
e429caa2 819 else /* size < 0 */
dcfdbac7 820 {
e429caa2
KH
821 SIZE excess = (char *)first_heap->bloc_start
822 - ((char *)virtual_break_value + size);
823
824 address = virtual_break_value;
825
826 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
827 {
828 excess -= extra_bytes;
829 first_heap->bloc_start
47f13333 830 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
e429caa2 831
abe9ff32 832 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
7516b7d5 833
e429caa2
KH
834 for (b = first_bloc; b != NIL_BLOC; b = b->next)
835 {
836 safe_bcopy (b->data, b->new_data, b->size);
837 *b->variable = b->data = b->new_data;
838 }
839 }
840
841 if ((char *)virtual_break_value + size < (char *)first_heap->start)
842 {
843 /* We found an additional space below the first heap */
844 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
845 }
dcfdbac7
JB
846 }
847
e429caa2 848 virtual_break_value = (POINTER) ((char *)address + size);
47f13333
RS
849 break_value = (last_bloc
850 ? last_bloc->data + last_bloc->size
851 : first_heap->bloc_start);
e429caa2 852 if (size < 0)
abe9ff32 853 relinquish ();
7516b7d5 854
e429caa2 855 return address;
dcfdbac7
JB
856}
857
858/* Allocate a relocatable bloc of storage of size SIZE. A pointer to
859 the data is returned in *PTR. PTR is thus the address of some variable
98b7fe02
JB
860 which will use the data area.
861
862 If we can't allocate the necessary memory, set *PTR to zero, and
863 return zero. */
dcfdbac7
JB
864
865POINTER
866r_alloc (ptr, size)
867 POINTER *ptr;
868 SIZE size;
869{
870 register bloc_ptr new_bloc;
871
2c46d29f
RS
872 if (! r_alloc_initialized)
873 r_alloc_init ();
874
abe9ff32 875 new_bloc = get_bloc (MEM_ROUNDUP (size));
98b7fe02
JB
876 if (new_bloc)
877 {
878 new_bloc->variable = ptr;
879 *ptr = new_bloc->data;
880 }
881 else
882 *ptr = 0;
dcfdbac7
JB
883
884 return *ptr;
885}
886
2c46d29f
RS
887/* Free a bloc of relocatable storage whose data is pointed to by PTR.
888 Store 0 in *PTR to show there's no block allocated. */
dcfdbac7
JB
889
890void
891r_alloc_free (ptr)
892 register POINTER *ptr;
893{
894 register bloc_ptr dead_bloc;
895
dcfdbac7
JB
896 dead_bloc = find_bloc (ptr);
897 if (dead_bloc == NIL_BLOC)
898 abort ();
899
900 free_bloc (dead_bloc);
2c46d29f 901 *ptr = 0;
719b242f
RS
902
903 refill_memory_reserve ();
dcfdbac7
JB
904}
905
16a5c729 906/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
98b7fe02
JB
907 Do this by shifting all blocks above this one up in memory, unless
908 SIZE is less than or equal to the current bloc size, in which case
909 do nothing.
dcfdbac7 910
98b7fe02
JB
911 Change *PTR to reflect the new bloc, and return this value.
912
913 If more memory cannot be allocated, then leave *PTR unchanged, and
914 return zero. */
dcfdbac7
JB
915
916POINTER
917r_re_alloc (ptr, size)
918 POINTER *ptr;
919 SIZE size;
920{
16a5c729 921 register bloc_ptr bloc;
dcfdbac7 922
16a5c729
JB
923 bloc = find_bloc (ptr);
924 if (bloc == NIL_BLOC)
dcfdbac7
JB
925 abort ();
926
16a5c729 927 if (size <= bloc->size)
956ace37 928 /* Wouldn't it be useful to actually resize the bloc here? */
dcfdbac7
JB
929 return *ptr;
930
abe9ff32 931 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
98b7fe02
JB
932 return 0;
933
dcfdbac7
JB
934 return *ptr;
935}
81bd58e8
KH
936
937/* Disable relocations, after making room for at least SIZE bytes
938 of non-relocatable heap if possible. The relocatable blocs are
939 guaranteed to hold still until thawed, even if this means that
940 malloc must return a null pointer. */
abe9ff32 941
81bd58e8
KH
942void
943r_alloc_freeze (size)
944 long size;
945{
946 /* If already frozen, we can't make any more room, so don't try. */
947 if (r_alloc_freeze_level > 0)
948 size = 0;
949 /* If we can't get the amount requested, half is better than nothing. */
950 while (size > 0 && r_alloc_sbrk (size) == 0)
951 size /= 2;
952 ++r_alloc_freeze_level;
953 if (size > 0)
954 r_alloc_sbrk (-size);
955}
956
957void
958r_alloc_thaw ()
959{
960 if (--r_alloc_freeze_level < 0)
961 abort ();
962}
dcfdbac7
JB
963\f
964/* The hook `malloc' uses for the function which gets more space
965 from the system. */
966extern POINTER (*__morecore) ();
967
abe9ff32 968/* Initialize various things for memory allocation. */
dcfdbac7 969
2c46d29f
RS
970static void
971r_alloc_init ()
dcfdbac7 972{
2c46d29f 973 if (r_alloc_initialized)
dcfdbac7
JB
974 return;
975
2c46d29f 976 r_alloc_initialized = 1;
bbc60227 977 real_morecore = __morecore;
dcfdbac7 978 __morecore = r_alloc_sbrk;
8c7f1e35 979
e429caa2
KH
980 first_heap = last_heap = &heap_base;
981 first_heap->next = first_heap->prev = NIL_HEAP;
982 first_heap->start = first_heap->bloc_start
983 = virtual_break_value = break_value = (*real_morecore) (0);
aef4d570 984 if (break_value == NIL)
2c46d29f 985 abort ();
8c7f1e35 986
7516b7d5
RS
987 page_size = PAGE;
988 extra_bytes = ROUNDUP (50000);
989
e429caa2 990 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
0e93a7cf
RS
991
992 /* The extra call to real_morecore guarantees that the end of the
993 address space is a multiple of page_size, even if page_size is
994 not really the page size of the system running the binary in
995 which page_size is stored. This allows a binary to be built on a
996 system with one page size and run on a system with a smaller page
abe9ff32 997 size. */
e429caa2 998 (*real_morecore) (first_heap->end - first_heap->start);
0e93a7cf 999
2c46d29f
RS
1000 /* Clear the rest of the last page; this memory is in our address space
1001 even though it is after the sbrk value. */
0e93a7cf
RS
1002 /* Doubly true, with the additional call that explicitly adds the
1003 rest of that page to the address space. */
e429caa2
KH
1004 bzero (first_heap->start, first_heap->end - first_heap->start);
1005 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
dcfdbac7 1006 use_relocatable_buffers = 1;
2c46d29f 1007}
e429caa2
KH
1008#ifdef DEBUG
1009#include <assert.h>
1010
1011int
1012r_alloc_check ()
1013{
1014 int found = 0;
1015 heap_ptr h, ph = 0;
1016 bloc_ptr b, pb = 0;
1017
1018 if (!r_alloc_initialized)
1019 return;
1020
abe9ff32
RS
1021 assert (first_heap);
1022 assert (last_heap->end <= (POINTER) sbrk (0));
1023 assert ((POINTER) first_heap < first_heap->start);
1024 assert (first_heap->start <= virtual_break_value);
1025 assert (virtual_break_value <= first_heap->end);
e429caa2
KH
1026
1027 for (h = first_heap; h; h = h->next)
1028 {
abe9ff32
RS
1029 assert (h->prev == ph);
1030 assert ((POINTER) ROUNDUP (h->end) == h->end);
1031 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1032 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1033 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
e429caa2
KH
1034
1035 if (ph)
1036 {
1037 assert (ph->end < h->start);
1038 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1039 }
1040
1041 if (h->bloc_start <= break_value && break_value <= h->end)
1042 found = 1;
1043
1044 ph = h;
1045 }
1046
abe9ff32
RS
1047 assert (found);
1048 assert (last_heap == ph);
e429caa2
KH
1049
1050 for (b = first_bloc; b; b = b->next)
1051 {
abe9ff32
RS
1052 assert (b->prev == pb);
1053 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1054 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
e429caa2
KH
1055
1056 ph = 0;
1057 for (h = first_heap; h; h = h->next)
1058 {
1059 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1060 break;
1061 ph = h;
1062 }
1063
abe9ff32 1064 assert (h);
e429caa2
KH
1065
1066 if (pb && pb->data + pb->size != b->data)
1067 {
abe9ff32 1068 assert (ph && b->data == h->bloc_start);
e429caa2
KH
1069 while (ph)
1070 {
1071 if (ph->bloc_start <= pb->data
1072 && pb->data + pb->size <= ph->end)
1073 {
abe9ff32 1074 assert (pb->data + pb->size + b->size > ph->end);
e429caa2
KH
1075 break;
1076 }
1077 else
1078 {
abe9ff32 1079 assert (ph->bloc_start + b->size > ph->end);
e429caa2
KH
1080 }
1081 ph = ph->prev;
1082 }
1083 }
1084 pb = b;
1085 }
1086
abe9ff32 1087 assert (last_bloc == pb);
e429caa2
KH
1088
1089 if (last_bloc)
abe9ff32 1090 assert (last_bloc->data + last_bloc->size == break_value);
e429caa2 1091 else
abe9ff32 1092 assert (first_heap->bloc_start == break_value);
e429caa2
KH
1093}
1094#endif /* DEBUG */