(displaying-byte-compile-warnings): Show
[bpt/emacs.git] / src / ralloc.c
CommitLineData
dcfdbac7 1/* Block-relocating memory allocator.
187996a8 2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
dcfdbac7
JB
3
4This file is part of GNU Emacs.
5
6GNU Emacs is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
187996a8 8the Free Software Foundation; either version 2, or (at your option)
dcfdbac7
JB
9any later version.
10
11GNU Emacs is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU Emacs; see the file COPYING. If not, write to
3b7ad313
EN
18the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19Boston, MA 02111-1307, USA. */
dcfdbac7
JB
20
21/* NOTES:
22
eb8c3be9 23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
dcfdbac7 24 rather than all of them. This means allowing for a possible
abe9ff32 25 hole between the first bloc and the end of malloc storage. */
dcfdbac7 26
2c46d29f 27#ifdef emacs
aef4d570 28
18160b98 29#include <config.h>
956ace37 30#include "lisp.h" /* Needed for VALBITS. */
2c46d29f 31
aef4d570
RM
32#undef NULL
33
f275fd9a
RS
34/* The important properties of this type are that 1) it's a pointer, and
35 2) arithmetic on it should work as if the size of the object pointed
36 to has a size of 1. */
a8c0e5ea 37#if 0 /* Arithmetic on void* is a GCC extension. */
f275fd9a
RS
38#ifdef __STDC__
39typedef void *POINTER;
40#else
1df181b6
RM
41
42#ifdef HAVE_CONFIG_H
43#include "config.h"
44#endif
45
f275fd9a 46typedef char *POINTER;
1df181b6 47
f275fd9a 48#endif
a8c0e5ea
RS
49#endif /* 0 */
50
51/* Unconditionally use char * for this. */
52typedef char *POINTER;
f275fd9a
RS
53
54typedef unsigned long SIZE;
55
2c46d29f
RS
56/* Declared in dispnew.c, this version doesn't screw up if regions
57 overlap. */
58extern void safe_bcopy ();
2c46d29f 59
b0119c68
RS
60#ifdef DOUG_LEA_MALLOC
61#define M_TOP_PAD -2
62extern int mallopt ();
63#else
49081834 64extern int __malloc_extra_blocks;
b0119c68 65#endif
49081834 66
d5179acc 67#else /* not emacs */
aef4d570 68
2c46d29f 69#include <stddef.h>
aef4d570 70
2c46d29f
RS
71typedef size_t SIZE;
72typedef void *POINTER;
aef4d570 73
aef4d570
RM
74#include <unistd.h>
75#include <malloc.h>
76#include <string.h>
77
2c46d29f 78#define safe_bcopy(x, y, z) memmove (y, x, z)
d5179acc
RS
79#define bzero(x, len) memset (x, 0, len)
80
81#endif /* not emacs */
2c46d29f 82
d5179acc 83#include "getpagesize.h"
dcfdbac7
JB
84
85#define NIL ((POINTER) 0)
86
2c46d29f
RS
87/* A flag to indicate whether we have initialized ralloc yet. For
88 Emacs's sake, please do not make this local to malloc_init; on some
89 machines, the dumping procedure makes all static variables
90 read-only. On these machines, the word static is #defined to be
91 the empty string, meaning that r_alloc_initialized becomes an
92 automatic variable, and loses its value each time Emacs is started up. */
93static int r_alloc_initialized = 0;
94
95static void r_alloc_init ();
dcfdbac7 96\f
956ace37
JB
97/* Declarations for working with the malloc, ralloc, and system breaks. */
98
abe9ff32 99/* Function to set the real break value. */
bbc60227 100static POINTER (*real_morecore) ();
dcfdbac7 101
abe9ff32 102/* The break value, as seen by malloc. */
dcfdbac7
JB
103static POINTER virtual_break_value;
104
abe9ff32
RS
105/* The address of the end of the last data in use by ralloc,
106 including relocatable blocs as well as malloc data. */
dcfdbac7
JB
107static POINTER break_value;
108
7516b7d5
RS
109/* This is the size of a page. We round memory requests to this boundary. */
110static int page_size;
111
ad3bb3d2
JB
112/* Whenever we get memory from the system, get this many extra bytes. This
113 must be a multiple of page_size. */
7516b7d5
RS
114static int extra_bytes;
115
dcfdbac7 116/* Macros for rounding. Note that rounding to any value is possible
abe9ff32 117 by changing the definition of PAGE. */
dcfdbac7 118#define PAGE (getpagesize ())
f7a009a5
RM
119#define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
120#define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
121 & ~(page_size - 1))
7516b7d5 122#define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
e429caa2
KH
123
124#define MEM_ALIGN sizeof(double)
125#define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
126 & ~(MEM_ALIGN - 1))
127\f
abe9ff32
RS
128/* Data structures of heaps and blocs. */
129
130/* The relocatable objects, or blocs, and the malloc data
131 both reside within one or more heaps.
132 Each heap contains malloc data, running from `start' to `bloc_start',
133 and relocatable objects, running from `bloc_start' to `free'.
134
135 Relocatable objects may relocate within the same heap
136 or may move into another heap; the heaps themselves may grow
137 but they never move.
138
139 We try to make just one heap and make it larger as necessary.
8e6208c5 140 But sometimes we can't do that, because we can't get contiguous
abe9ff32
RS
141 space to add onto the heap. When that happens, we start a new heap. */
142
e429caa2
KH
143typedef struct heap
144{
145 struct heap *next;
146 struct heap *prev;
abe9ff32 147 /* Start of memory range of this heap. */
e429caa2 148 POINTER start;
abe9ff32 149 /* End of memory range of this heap. */
e429caa2 150 POINTER end;
abe9ff32
RS
151 /* Start of relocatable data in this heap. */
152 POINTER bloc_start;
153 /* Start of unused space in this heap. */
154 POINTER free;
47f13333
RS
155 /* First bloc in this heap. */
156 struct bp *first_bloc;
157 /* Last bloc in this heap. */
158 struct bp *last_bloc;
e429caa2
KH
159} *heap_ptr;
160
161#define NIL_HEAP ((heap_ptr) 0)
162#define HEAP_PTR_SIZE (sizeof (struct heap))
163
abe9ff32
RS
164/* This is the first heap object.
165 If we need additional heap objects, each one resides at the beginning of
166 the space it covers. */
167static struct heap heap_base;
168
169/* Head and tail of the list of heaps. */
e429caa2
KH
170static heap_ptr first_heap, last_heap;
171
172/* These structures are allocated in the malloc arena.
173 The linked list is kept in order of increasing '.data' members.
174 The data blocks abut each other; if b->next is non-nil, then
49f82b3d
RS
175 b->data + b->size == b->next->data.
176
177 An element with variable==NIL denotes a freed block, which has not yet
178 been collected. They may only appear while r_alloc_freeze > 0, and will be
179 freed when the arena is thawed. Currently, these blocs are not reusable,
8e6208c5 180 while the arena is frozen. Very inefficient. */
49f82b3d 181
e429caa2
KH
182typedef struct bp
183{
184 struct bp *next;
185 struct bp *prev;
186 POINTER *variable;
187 POINTER data;
188 SIZE size;
8e6208c5 189 POINTER new_data; /* temporarily used for relocation */
49f82b3d 190 struct heap *heap; /* Heap this bloc is in. */
e429caa2
KH
191} *bloc_ptr;
192
193#define NIL_BLOC ((bloc_ptr) 0)
194#define BLOC_PTR_SIZE (sizeof (struct bp))
195
abe9ff32 196/* Head and tail of the list of relocatable blocs. */
e429caa2
KH
197static bloc_ptr first_bloc, last_bloc;
198
49f82b3d
RS
199static int use_relocatable_buffers;
200
201/* If >0, no relocation whatsoever takes place. */
202static int r_alloc_freeze_level;
203
dcfdbac7 204\f
956ace37
JB
205/* Functions to get and return memory from the system. */
206
abe9ff32
RS
207/* Find the heap that ADDRESS falls within. */
208
209static heap_ptr
210find_heap (address)
211 POINTER address;
212{
213 heap_ptr heap;
214
215 for (heap = last_heap; heap; heap = heap->prev)
216 {
217 if (heap->start <= address && address <= heap->end)
218 return heap;
219 }
220
221 return NIL_HEAP;
222}
223
224/* Find SIZE bytes of space in a heap.
225 Try to get them at ADDRESS (which must fall within some heap's range)
226 if we can get that many within one heap.
227
e429caa2 228 If enough space is not presently available in our reserve, this means
8e6208c5
KH
229 getting more page-aligned space from the system. If the returned space
230 is not contiguous to the last heap, allocate a new heap, and append it
abe9ff32
RS
231
232 obtain does not try to keep track of whether space is in use
233 or not in use. It just returns the address of SIZE bytes that
234 fall within a single heap. If you call obtain twice in a row
235 with the same arguments, you typically get the same value.
236 to the heap list. It's the caller's responsibility to keep
237 track of what space is in use.
dcfdbac7 238
e429caa2
KH
239 Return the address of the space if all went well, or zero if we couldn't
240 allocate the memory. */
abe9ff32 241
e429caa2
KH
242static POINTER
243obtain (address, size)
244 POINTER address;
245 SIZE size;
dcfdbac7 246{
e429caa2
KH
247 heap_ptr heap;
248 SIZE already_available;
dcfdbac7 249
abe9ff32 250 /* Find the heap that ADDRESS falls within. */
e429caa2 251 for (heap = last_heap; heap; heap = heap->prev)
dcfdbac7 252 {
e429caa2
KH
253 if (heap->start <= address && address <= heap->end)
254 break;
255 }
dcfdbac7 256
e429caa2 257 if (! heap)
abe9ff32 258 abort ();
dcfdbac7 259
abe9ff32
RS
260 /* If we can't fit SIZE bytes in that heap,
261 try successive later heaps. */
e429caa2
KH
262 while (heap && address + size > heap->end)
263 {
264 heap = heap->next;
265 if (heap == NIL_HEAP)
266 break;
267 address = heap->bloc_start;
dcfdbac7
JB
268 }
269
abe9ff32
RS
270 /* If we can't fit them within any existing heap,
271 get more space. */
e429caa2
KH
272 if (heap == NIL_HEAP)
273 {
274 POINTER new = (*real_morecore)(0);
275 SIZE get;
98b7fe02 276
e429caa2 277 already_available = (char *)last_heap->end - (char *)address;
dcfdbac7 278
e429caa2
KH
279 if (new != last_heap->end)
280 {
abe9ff32
RS
281 /* Someone else called sbrk. Make a new heap. */
282
283 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
284 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
e429caa2
KH
285
286 if ((*real_morecore) (bloc_start - new) != new)
287 return 0;
288
289 new_heap->start = new;
290 new_heap->end = bloc_start;
291 new_heap->bloc_start = bloc_start;
abe9ff32 292 new_heap->free = bloc_start;
e429caa2
KH
293 new_heap->next = NIL_HEAP;
294 new_heap->prev = last_heap;
47f13333
RS
295 new_heap->first_bloc = NIL_BLOC;
296 new_heap->last_bloc = NIL_BLOC;
e429caa2
KH
297 last_heap->next = new_heap;
298 last_heap = new_heap;
299
300 address = bloc_start;
301 already_available = 0;
302 }
dcfdbac7 303
abe9ff32
RS
304 /* Add space to the last heap (which we may have just created).
305 Get some extra, so we can come here less often. */
306
e429caa2 307 get = size + extra_bytes - already_available;
abe9ff32 308 get = (char *) ROUNDUP ((char *)last_heap->end + get)
e429caa2 309 - (char *) last_heap->end;
dcfdbac7 310
e429caa2
KH
311 if ((*real_morecore) (get) != last_heap->end)
312 return 0;
313
314 last_heap->end += get;
315 }
316
317 return address;
318}
dcfdbac7 319
abe9ff32
RS
320/* Return unused heap space to the system
321 if there is a lot of unused space now.
322 This can make the last heap smaller;
323 it can also eliminate the last heap entirely. */
324
dcfdbac7 325static void
e429caa2 326relinquish ()
dcfdbac7 327{
e429caa2
KH
328 register heap_ptr h;
329 int excess = 0;
330
abe9ff32
RS
331 /* Add the amount of space beyond break_value
332 in all heaps which have extend beyond break_value at all. */
333
e429caa2
KH
334 for (h = last_heap; h && break_value < h->end; h = h->prev)
335 {
336 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
337 ? h->bloc_start : break_value);
338 }
339
340 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
dcfdbac7 341 {
7516b7d5
RS
342 /* Keep extra_bytes worth of empty space.
343 And don't free anything unless we can free at least extra_bytes. */
e429caa2 344 excess -= extra_bytes;
dcfdbac7 345
e429caa2
KH
346 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
347 {
47f13333
RS
348 /* This heap should have no blocs in it. */
349 if (last_heap->first_bloc != NIL_BLOC
350 || last_heap->last_bloc != NIL_BLOC)
351 abort ();
352
abe9ff32 353 /* Return the last heap, with its header, to the system. */
e429caa2
KH
354 excess = (char *)last_heap->end - (char *)last_heap->start;
355 last_heap = last_heap->prev;
356 last_heap->next = NIL_HEAP;
357 }
358 else
359 {
360 excess = (char *) last_heap->end
abe9ff32 361 - (char *) ROUNDUP ((char *)last_heap->end - excess);
e429caa2
KH
362 last_heap->end -= excess;
363 }
dcfdbac7 364
e429caa2
KH
365 if ((*real_morecore) (- excess) == 0)
366 abort ();
367 }
dcfdbac7 368}
719b242f
RS
369
370/* Return the total size in use by relocating allocator,
371 above where malloc gets space. */
372
373long
374r_alloc_size_in_use ()
375{
376 return break_value - virtual_break_value;
377}
dcfdbac7 378\f
956ace37
JB
379/* The meat - allocating, freeing, and relocating blocs. */
380
956ace37 381/* Find the bloc referenced by the address in PTR. Returns a pointer
abe9ff32 382 to that block. */
dcfdbac7
JB
383
384static bloc_ptr
385find_bloc (ptr)
386 POINTER *ptr;
387{
388 register bloc_ptr p = first_bloc;
389
390 while (p != NIL_BLOC)
391 {
392 if (p->variable == ptr && p->data == *ptr)
393 return p;
394
395 p = p->next;
396 }
397
398 return p;
399}
400
401/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
98b7fe02
JB
402 Returns a pointer to the new bloc, or zero if we couldn't allocate
403 memory for the new block. */
dcfdbac7
JB
404
405static bloc_ptr
406get_bloc (size)
407 SIZE size;
408{
98b7fe02 409 register bloc_ptr new_bloc;
abe9ff32 410 register heap_ptr heap;
98b7fe02
JB
411
412 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
e429caa2 413 || ! (new_bloc->data = obtain (break_value, size)))
98b7fe02
JB
414 {
415 if (new_bloc)
416 free (new_bloc);
417
418 return 0;
419 }
dcfdbac7 420
e429caa2
KH
421 break_value = new_bloc->data + size;
422
dcfdbac7
JB
423 new_bloc->size = size;
424 new_bloc->next = NIL_BLOC;
8c7f1e35 425 new_bloc->variable = (POINTER *) NIL;
e429caa2 426 new_bloc->new_data = 0;
dcfdbac7 427
abe9ff32
RS
428 /* Record in the heap that this space is in use. */
429 heap = find_heap (new_bloc->data);
430 heap->free = break_value;
431
47f13333
RS
432 /* Maintain the correspondence between heaps and blocs. */
433 new_bloc->heap = heap;
434 heap->last_bloc = new_bloc;
435 if (heap->first_bloc == NIL_BLOC)
436 heap->first_bloc = new_bloc;
437
abe9ff32 438 /* Put this bloc on the doubly-linked list of blocs. */
dcfdbac7
JB
439 if (first_bloc)
440 {
441 new_bloc->prev = last_bloc;
442 last_bloc->next = new_bloc;
443 last_bloc = new_bloc;
444 }
445 else
446 {
447 first_bloc = last_bloc = new_bloc;
448 new_bloc->prev = NIL_BLOC;
449 }
450
451 return new_bloc;
452}
47f13333 453\f
abe9ff32
RS
454/* Calculate new locations of blocs in the list beginning with BLOC,
455 relocating it to start at ADDRESS, in heap HEAP. If enough space is
456 not presently available in our reserve, call obtain for
e429caa2
KH
457 more space.
458
abe9ff32
RS
459 Store the new location of each bloc in its new_data field.
460 Do not touch the contents of blocs or break_value. */
dcfdbac7 461
e429caa2
KH
462static int
463relocate_blocs (bloc, heap, address)
464 bloc_ptr bloc;
465 heap_ptr heap;
466 POINTER address;
467{
468 register bloc_ptr b = bloc;
ad3bb3d2 469
49f82b3d
RS
470 /* No need to ever call this if arena is frozen, bug somewhere! */
471 if (r_alloc_freeze_level)
472 abort();
473
e429caa2
KH
474 while (b)
475 {
abe9ff32
RS
476 /* If bloc B won't fit within HEAP,
477 move to the next heap and try again. */
e429caa2
KH
478 while (heap && address + b->size > heap->end)
479 {
480 heap = heap->next;
481 if (heap == NIL_HEAP)
482 break;
483 address = heap->bloc_start;
484 }
dcfdbac7 485
abe9ff32
RS
486 /* If BLOC won't fit in any heap,
487 get enough new space to hold BLOC and all following blocs. */
e429caa2
KH
488 if (heap == NIL_HEAP)
489 {
490 register bloc_ptr tb = b;
491 register SIZE s = 0;
492
abe9ff32 493 /* Add up the size of all the following blocs. */
e429caa2
KH
494 while (tb != NIL_BLOC)
495 {
49f82b3d
RS
496 if (tb->variable)
497 s += tb->size;
498
e429caa2
KH
499 tb = tb->next;
500 }
501
abe9ff32
RS
502 /* Get that space. */
503 address = obtain (address, s);
504 if (address == 0)
e429caa2
KH
505 return 0;
506
507 heap = last_heap;
508 }
509
abe9ff32
RS
510 /* Record the new address of this bloc
511 and update where the next bloc can start. */
e429caa2 512 b->new_data = address;
49f82b3d
RS
513 if (b->variable)
514 address += b->size;
e429caa2
KH
515 b = b->next;
516 }
517
518 return 1;
519}
520
47f13333
RS
521/* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
522 This is necessary if we put the memory of space of BLOC
523 before that of BEFORE. */
524
525static void
526reorder_bloc (bloc, before)
527 bloc_ptr bloc, before;
528{
529 bloc_ptr prev, next;
530
531 /* Splice BLOC out from where it is. */
532 prev = bloc->prev;
533 next = bloc->next;
534
535 if (prev)
536 prev->next = next;
537 if (next)
538 next->prev = prev;
539
540 /* Splice it in before BEFORE. */
541 prev = before->prev;
abe9ff32 542
47f13333
RS
543 if (prev)
544 prev->next = bloc;
545 bloc->prev = prev;
546
547 before->prev = bloc;
548 bloc->next = before;
549}
550\f
551/* Update the records of which heaps contain which blocs, starting
552 with heap HEAP and bloc BLOC. */
553
554static void
555update_heap_bloc_correspondence (bloc, heap)
abe9ff32
RS
556 bloc_ptr bloc;
557 heap_ptr heap;
558{
559 register bloc_ptr b;
560
47f13333
RS
561 /* Initialize HEAP's status to reflect blocs before BLOC. */
562 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
563 {
564 /* The previous bloc is in HEAP. */
565 heap->last_bloc = bloc->prev;
566 heap->free = bloc->prev->data + bloc->prev->size;
567 }
568 else
569 {
570 /* HEAP contains no blocs before BLOC. */
571 heap->first_bloc = NIL_BLOC;
572 heap->last_bloc = NIL_BLOC;
573 heap->free = heap->bloc_start;
574 }
575
abe9ff32
RS
576 /* Advance through blocs one by one. */
577 for (b = bloc; b != NIL_BLOC; b = b->next)
578 {
47f13333
RS
579 /* Advance through heaps, marking them empty,
580 till we get to the one that B is in. */
abe9ff32
RS
581 while (heap)
582 {
583 if (heap->bloc_start <= b->data && b->data <= heap->end)
584 break;
585 heap = heap->next;
47f13333
RS
586 /* We know HEAP is not null now,
587 because there has to be space for bloc B. */
588 heap->first_bloc = NIL_BLOC;
589 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
590 heap->free = heap->bloc_start;
591 }
47f13333
RS
592
593 /* Update HEAP's status for bloc B. */
abe9ff32 594 heap->free = b->data + b->size;
47f13333
RS
595 heap->last_bloc = b;
596 if (heap->first_bloc == NIL_BLOC)
597 heap->first_bloc = b;
598
599 /* Record that B is in HEAP. */
600 b->heap = heap;
abe9ff32
RS
601 }
602
603 /* If there are any remaining heaps and no blocs left,
47f13333 604 mark those heaps as empty. */
abe9ff32
RS
605 heap = heap->next;
606 while (heap)
607 {
47f13333
RS
608 heap->first_bloc = NIL_BLOC;
609 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
610 heap->free = heap->bloc_start;
611 heap = heap->next;
612 }
613}
47f13333 614\f
abe9ff32
RS
615/* Resize BLOC to SIZE bytes. This relocates the blocs
616 that come after BLOC in memory. */
617
e429caa2
KH
618static int
619resize_bloc (bloc, size)
620 bloc_ptr bloc;
621 SIZE size;
dcfdbac7 622{
e429caa2
KH
623 register bloc_ptr b;
624 heap_ptr heap;
625 POINTER address;
626 SIZE old_size;
627
49f82b3d
RS
628 /* No need to ever call this if arena is frozen, bug somewhere! */
629 if (r_alloc_freeze_level)
630 abort();
631
e429caa2
KH
632 if (bloc == NIL_BLOC || size == bloc->size)
633 return 1;
634
635 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
636 {
637 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
638 break;
639 }
640
641 if (heap == NIL_HEAP)
abe9ff32 642 abort ();
e429caa2
KH
643
644 old_size = bloc->size;
645 bloc->size = size;
646
abe9ff32
RS
647 /* Note that bloc could be moved into the previous heap. */
648 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
649 : first_heap->bloc_start);
e429caa2
KH
650 while (heap)
651 {
652 if (heap->bloc_start <= address && address <= heap->end)
653 break;
654 heap = heap->prev;
655 }
656
657 if (! relocate_blocs (bloc, heap, address))
658 {
659 bloc->size = old_size;
660 return 0;
661 }
662
663 if (size > old_size)
664 {
665 for (b = last_bloc; b != bloc; b = b->prev)
666 {
49f82b3d
RS
667 if (!b->variable)
668 {
669 b->size = 0;
670 b->data = b->new_data;
671 }
672 else
673 {
674 safe_bcopy (b->data, b->new_data, b->size);
675 *b->variable = b->data = b->new_data;
676 }
677 }
678 if (!bloc->variable)
679 {
680 bloc->size = 0;
681 bloc->data = bloc->new_data;
682 }
683 else
684 {
685 safe_bcopy (bloc->data, bloc->new_data, old_size);
686 bzero (bloc->new_data + old_size, size - old_size);
687 *bloc->variable = bloc->data = bloc->new_data;
e429caa2 688 }
e429caa2
KH
689 }
690 else
dcfdbac7 691 {
ad3bb3d2
JB
692 for (b = bloc; b != NIL_BLOC; b = b->next)
693 {
49f82b3d
RS
694 if (!b->variable)
695 {
696 b->size = 0;
697 b->data = b->new_data;
698 }
699 else
700 {
701 safe_bcopy (b->data, b->new_data, b->size);
702 *b->variable = b->data = b->new_data;
703 }
ad3bb3d2 704 }
ad3bb3d2 705 }
dcfdbac7 706
47f13333 707 update_heap_bloc_correspondence (bloc, heap);
abe9ff32
RS
708
709 break_value = (last_bloc ? last_bloc->data + last_bloc->size
710 : first_heap->bloc_start);
e429caa2
KH
711 return 1;
712}
47f13333 713\f
abe9ff32
RS
714/* Free BLOC from the chain of blocs, relocating any blocs above it.
715 This may return space to the system. */
dcfdbac7
JB
716
717static void
718free_bloc (bloc)
719 bloc_ptr bloc;
720{
47f13333
RS
721 heap_ptr heap = bloc->heap;
722
49f82b3d
RS
723 if (r_alloc_freeze_level)
724 {
725 bloc->variable = (POINTER *) NIL;
726 return;
727 }
728
e429caa2
KH
729 resize_bloc (bloc, 0);
730
dcfdbac7
JB
731 if (bloc == first_bloc && bloc == last_bloc)
732 {
733 first_bloc = last_bloc = NIL_BLOC;
734 }
735 else if (bloc == last_bloc)
736 {
737 last_bloc = bloc->prev;
738 last_bloc->next = NIL_BLOC;
739 }
740 else if (bloc == first_bloc)
741 {
742 first_bloc = bloc->next;
743 first_bloc->prev = NIL_BLOC;
dcfdbac7
JB
744 }
745 else
746 {
747 bloc->next->prev = bloc->prev;
748 bloc->prev->next = bloc->next;
dcfdbac7
JB
749 }
750
47f13333
RS
751 /* Update the records of which blocs are in HEAP. */
752 if (heap->first_bloc == bloc)
753 {
d5179acc 754 if (bloc->next != 0 && bloc->next->heap == heap)
47f13333
RS
755 heap->first_bloc = bloc->next;
756 else
757 heap->first_bloc = heap->last_bloc = NIL_BLOC;
758 }
759 if (heap->last_bloc == bloc)
760 {
d5179acc 761 if (bloc->prev != 0 && bloc->prev->heap == heap)
47f13333
RS
762 heap->last_bloc = bloc->prev;
763 else
764 heap->first_bloc = heap->last_bloc = NIL_BLOC;
765 }
766
e429caa2 767 relinquish ();
dcfdbac7
JB
768 free (bloc);
769}
770\f
956ace37
JB
771/* Interface routines. */
772
98b7fe02 773/* Obtain SIZE bytes of storage from the free pool, or the system, as
2c46d29f 774 necessary. If relocatable blocs are in use, this means relocating
98b7fe02
JB
775 them. This function gets plugged into the GNU malloc's __morecore
776 hook.
777
7516b7d5
RS
778 We provide hysteresis, never relocating by less than extra_bytes.
779
98b7fe02
JB
780 If we're out of memory, we should return zero, to imitate the other
781 __morecore hook values - in particular, __default_morecore in the
782 GNU malloc package. */
dcfdbac7
JB
783
784POINTER
785r_alloc_sbrk (size)
786 long size;
787{
e429caa2
KH
788 register bloc_ptr b;
789 POINTER address;
dcfdbac7 790
44d3dec0
RS
791 if (! r_alloc_initialized)
792 r_alloc_init ();
793
dcfdbac7 794 if (! use_relocatable_buffers)
bbc60227 795 return (*real_morecore) (size);
dcfdbac7 796
e429caa2
KH
797 if (size == 0)
798 return virtual_break_value;
7516b7d5 799
e429caa2 800 if (size > 0)
dcfdbac7 801 {
abe9ff32
RS
802 /* Allocate a page-aligned space. GNU malloc would reclaim an
803 extra space if we passed an unaligned one. But we could
8e6208c5 804 not always find a space which is contiguous to the previous. */
e429caa2
KH
805 POINTER new_bloc_start;
806 heap_ptr h = first_heap;
abe9ff32 807 SIZE get = ROUNDUP (size);
7516b7d5 808
abe9ff32 809 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 810
abe9ff32
RS
811 /* Search the list upward for a heap which is large enough. */
812 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
e429caa2
KH
813 {
814 h = h->next;
815 if (h == NIL_HEAP)
816 break;
abe9ff32 817 address = (POINTER) ROUNDUP (h->start);
e429caa2
KH
818 }
819
abe9ff32 820 /* If not found, obtain more space. */
e429caa2
KH
821 if (h == NIL_HEAP)
822 {
823 get += extra_bytes + page_size;
824
49f82b3d 825 if (! obtain (address, get))
e429caa2 826 return 0;
98b7fe02 827
e429caa2 828 if (first_heap == last_heap)
abe9ff32 829 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 830 else
abe9ff32 831 address = (POINTER) ROUNDUP (last_heap->start);
e429caa2
KH
832 h = last_heap;
833 }
834
abe9ff32 835 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
e429caa2
KH
836
837 if (first_heap->bloc_start < new_bloc_start)
838 {
49f82b3d
RS
839 /* This is no clean solution - no idea how to do it better. */
840 if (r_alloc_freeze_level)
841 return NIL;
842
843 /* There is a bug here: if the above obtain call succeeded, but the
844 relocate_blocs call below does not succeed, we need to free
845 the memory that we got with obtain. */
846
abe9ff32 847 /* Move all blocs upward. */
49f82b3d 848 if (! relocate_blocs (first_bloc, h, new_bloc_start))
e429caa2
KH
849 return 0;
850
851 /* Note that (POINTER)(h+1) <= new_bloc_start since
852 get >= page_size, so the following does not destroy the heap
abe9ff32 853 header. */
e429caa2
KH
854 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
855 {
856 safe_bcopy (b->data, b->new_data, b->size);
857 *b->variable = b->data = b->new_data;
858 }
859
860 h->bloc_start = new_bloc_start;
abe9ff32 861
47f13333 862 update_heap_bloc_correspondence (first_bloc, h);
e429caa2 863 }
e429caa2
KH
864 if (h != first_heap)
865 {
866 /* Give up managing heaps below the one the new
abe9ff32 867 virtual_break_value points to. */
e429caa2
KH
868 first_heap->prev = NIL_HEAP;
869 first_heap->next = h->next;
870 first_heap->start = h->start;
871 first_heap->end = h->end;
abe9ff32 872 first_heap->free = h->free;
47f13333
RS
873 first_heap->first_bloc = h->first_bloc;
874 first_heap->last_bloc = h->last_bloc;
e429caa2
KH
875 first_heap->bloc_start = h->bloc_start;
876
877 if (first_heap->next)
878 first_heap->next->prev = first_heap;
879 else
880 last_heap = first_heap;
881 }
882
883 bzero (address, size);
dcfdbac7 884 }
e429caa2 885 else /* size < 0 */
dcfdbac7 886 {
e429caa2
KH
887 SIZE excess = (char *)first_heap->bloc_start
888 - ((char *)virtual_break_value + size);
889
890 address = virtual_break_value;
891
892 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
893 {
894 excess -= extra_bytes;
895 first_heap->bloc_start
47f13333 896 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
e429caa2 897
abe9ff32 898 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
7516b7d5 899
e429caa2
KH
900 for (b = first_bloc; b != NIL_BLOC; b = b->next)
901 {
902 safe_bcopy (b->data, b->new_data, b->size);
903 *b->variable = b->data = b->new_data;
904 }
905 }
906
907 if ((char *)virtual_break_value + size < (char *)first_heap->start)
908 {
909 /* We found an additional space below the first heap */
910 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
911 }
dcfdbac7
JB
912 }
913
e429caa2 914 virtual_break_value = (POINTER) ((char *)address + size);
47f13333
RS
915 break_value = (last_bloc
916 ? last_bloc->data + last_bloc->size
917 : first_heap->bloc_start);
e429caa2 918 if (size < 0)
abe9ff32 919 relinquish ();
7516b7d5 920
e429caa2 921 return address;
dcfdbac7
JB
922}
923
924/* Allocate a relocatable bloc of storage of size SIZE. A pointer to
925 the data is returned in *PTR. PTR is thus the address of some variable
98b7fe02
JB
926 which will use the data area.
927
49f82b3d
RS
928 The allocation of 0 bytes is valid.
929 In case r_alloc_freeze is set, a best fit of unused blocs could be done
930 before allocating a new area. Not yet done.
931
98b7fe02
JB
932 If we can't allocate the necessary memory, set *PTR to zero, and
933 return zero. */
dcfdbac7
JB
934
935POINTER
936r_alloc (ptr, size)
937 POINTER *ptr;
938 SIZE size;
939{
940 register bloc_ptr new_bloc;
941
2c46d29f
RS
942 if (! r_alloc_initialized)
943 r_alloc_init ();
944
abe9ff32 945 new_bloc = get_bloc (MEM_ROUNDUP (size));
98b7fe02
JB
946 if (new_bloc)
947 {
948 new_bloc->variable = ptr;
949 *ptr = new_bloc->data;
950 }
951 else
952 *ptr = 0;
dcfdbac7
JB
953
954 return *ptr;
955}
956
2c46d29f
RS
957/* Free a bloc of relocatable storage whose data is pointed to by PTR.
958 Store 0 in *PTR to show there's no block allocated. */
dcfdbac7
JB
959
960void
961r_alloc_free (ptr)
962 register POINTER *ptr;
963{
964 register bloc_ptr dead_bloc;
965
44d3dec0
RS
966 if (! r_alloc_initialized)
967 r_alloc_init ();
968
dcfdbac7
JB
969 dead_bloc = find_bloc (ptr);
970 if (dead_bloc == NIL_BLOC)
971 abort ();
972
973 free_bloc (dead_bloc);
2c46d29f 974 *ptr = 0;
719b242f 975
d5179acc 976#ifdef emacs
719b242f 977 refill_memory_reserve ();
d5179acc 978#endif
dcfdbac7
JB
979}
980
16a5c729 981/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
98b7fe02
JB
982 Do this by shifting all blocks above this one up in memory, unless
983 SIZE is less than or equal to the current bloc size, in which case
984 do nothing.
dcfdbac7 985
49f82b3d 986 In case r_alloc_freeze is set, a new bloc is allocated, and the
8e6208c5 987 memory copied to it. Not very efficient. We could traverse the
49f82b3d
RS
988 bloc_list for a best fit of free blocs first.
989
98b7fe02
JB
990 Change *PTR to reflect the new bloc, and return this value.
991
992 If more memory cannot be allocated, then leave *PTR unchanged, and
993 return zero. */
dcfdbac7
JB
994
995POINTER
996r_re_alloc (ptr, size)
997 POINTER *ptr;
998 SIZE size;
999{
16a5c729 1000 register bloc_ptr bloc;
dcfdbac7 1001
44d3dec0
RS
1002 if (! r_alloc_initialized)
1003 r_alloc_init ();
1004
49f82b3d
RS
1005 if (!*ptr)
1006 return r_alloc (ptr, size);
1007 if (!size)
1008 {
1009 r_alloc_free (ptr);
1010 return r_alloc (ptr, 0);
1011 }
1012
16a5c729
JB
1013 bloc = find_bloc (ptr);
1014 if (bloc == NIL_BLOC)
dcfdbac7
JB
1015 abort ();
1016
49f82b3d
RS
1017 if (size < bloc->size)
1018 {
1019 /* Wouldn't it be useful to actually resize the bloc here? */
1020 /* I think so too, but not if it's too expensive... */
1021 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1022 && r_alloc_freeze_level == 0)
1023 {
1024 resize_bloc (bloc, MEM_ROUNDUP (size));
1025 /* Never mind if this fails, just do nothing... */
1026 /* It *should* be infallible! */
1027 }
1028 }
1029 else if (size > bloc->size)
1030 {
1031 if (r_alloc_freeze_level)
1032 {
1033 bloc_ptr new_bloc;
1034 new_bloc = get_bloc (MEM_ROUNDUP (size));
1035 if (new_bloc)
1036 {
1037 new_bloc->variable = ptr;
1038 *ptr = new_bloc->data;
1039 bloc->variable = (POINTER *) NIL;
1040 }
1041 else
1042 return NIL;
1043 }
1044 else
1045 {
1046 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1047 return NIL;
1048 }
1049 }
dcfdbac7
JB
1050 return *ptr;
1051}
81bd58e8
KH
1052
1053/* Disable relocations, after making room for at least SIZE bytes
1054 of non-relocatable heap if possible. The relocatable blocs are
1055 guaranteed to hold still until thawed, even if this means that
1056 malloc must return a null pointer. */
abe9ff32 1057
81bd58e8
KH
1058void
1059r_alloc_freeze (size)
1060 long size;
1061{
44d3dec0
RS
1062 if (! r_alloc_initialized)
1063 r_alloc_init ();
1064
81bd58e8
KH
1065 /* If already frozen, we can't make any more room, so don't try. */
1066 if (r_alloc_freeze_level > 0)
1067 size = 0;
1068 /* If we can't get the amount requested, half is better than nothing. */
1069 while (size > 0 && r_alloc_sbrk (size) == 0)
1070 size /= 2;
1071 ++r_alloc_freeze_level;
1072 if (size > 0)
1073 r_alloc_sbrk (-size);
1074}
1075
1076void
1077r_alloc_thaw ()
1078{
49f82b3d
RS
1079
1080 if (! r_alloc_initialized)
1081 r_alloc_init ();
1082
81bd58e8
KH
1083 if (--r_alloc_freeze_level < 0)
1084 abort ();
49f82b3d 1085
8e6208c5 1086 /* This frees all unused blocs. It is not too inefficient, as the resize
49f82b3d
RS
1087 and bcopy is done only once. Afterwards, all unreferenced blocs are
1088 already shrunk to zero size. */
1089 if (!r_alloc_freeze_level)
1090 {
1091 bloc_ptr *b = &first_bloc;
1092 while (*b)
1093 if (!(*b)->variable)
1094 free_bloc (*b);
1095 else
1096 b = &(*b)->next;
1097 }
81bd58e8 1098}
49f82b3d 1099
dcfdbac7
JB
1100\f
1101/* The hook `malloc' uses for the function which gets more space
1102 from the system. */
1103extern POINTER (*__morecore) ();
1104
abe9ff32 1105/* Initialize various things for memory allocation. */
dcfdbac7 1106
2c46d29f
RS
1107static void
1108r_alloc_init ()
dcfdbac7 1109{
2c46d29f 1110 if (r_alloc_initialized)
dcfdbac7
JB
1111 return;
1112
2c46d29f 1113 r_alloc_initialized = 1;
bbc60227 1114 real_morecore = __morecore;
dcfdbac7 1115 __morecore = r_alloc_sbrk;
8c7f1e35 1116
e429caa2
KH
1117 first_heap = last_heap = &heap_base;
1118 first_heap->next = first_heap->prev = NIL_HEAP;
1119 first_heap->start = first_heap->bloc_start
1120 = virtual_break_value = break_value = (*real_morecore) (0);
aef4d570 1121 if (break_value == NIL)
2c46d29f 1122 abort ();
8c7f1e35 1123
7516b7d5
RS
1124 page_size = PAGE;
1125 extra_bytes = ROUNDUP (50000);
1126
b0119c68
RS
1127#ifdef DOUG_LEA_MALLOC
1128 mallopt (M_TOP_PAD, 64 * 4096);
1129#else
49081834
RS
1130 /* Give GNU malloc's morecore some hysteresis
1131 so that we move all the relocatable blocks much less often. */
1132 __malloc_extra_blocks = 64;
b0119c68 1133#endif
49081834 1134
e429caa2 1135 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
0e93a7cf
RS
1136
1137 /* The extra call to real_morecore guarantees that the end of the
1138 address space is a multiple of page_size, even if page_size is
1139 not really the page size of the system running the binary in
1140 which page_size is stored. This allows a binary to be built on a
1141 system with one page size and run on a system with a smaller page
abe9ff32 1142 size. */
e429caa2 1143 (*real_morecore) (first_heap->end - first_heap->start);
0e93a7cf 1144
2c46d29f
RS
1145 /* Clear the rest of the last page; this memory is in our address space
1146 even though it is after the sbrk value. */
0e93a7cf
RS
1147 /* Doubly true, with the additional call that explicitly adds the
1148 rest of that page to the address space. */
e429caa2
KH
1149 bzero (first_heap->start, first_heap->end - first_heap->start);
1150 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
dcfdbac7 1151 use_relocatable_buffers = 1;
2c46d29f 1152}
dec41418
RS
1153
1154#if defined (emacs) && defined (DOUG_LEA_MALLOC)
1155
1156/* Reinitialize the morecore hook variables after restarting a dumped
1157 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1158void
1159r_alloc_reinit ()
1160{
1161 /* Only do this if the hook has been reset, so that we don't get an
1162 infinite loop, in case Emacs was linked statically. */
1163 if (__morecore != r_alloc_sbrk)
1164 {
1165 real_morecore = __morecore;
1166 __morecore = r_alloc_sbrk;
1167 }
1168}
1169#endif
1170
e429caa2
KH
1171#ifdef DEBUG
1172#include <assert.h>
1173
44d3dec0 1174void
e429caa2
KH
1175r_alloc_check ()
1176{
6d16dd06
RS
1177 int found = 0;
1178 heap_ptr h, ph = 0;
1179 bloc_ptr b, pb = 0;
1180
1181 if (!r_alloc_initialized)
1182 return;
1183
1184 assert (first_heap);
1185 assert (last_heap->end <= (POINTER) sbrk (0));
1186 assert ((POINTER) first_heap < first_heap->start);
1187 assert (first_heap->start <= virtual_break_value);
1188 assert (virtual_break_value <= first_heap->end);
1189
1190 for (h = first_heap; h; h = h->next)
1191 {
1192 assert (h->prev == ph);
1193 assert ((POINTER) ROUNDUP (h->end) == h->end);
40f3f04b
RS
1194#if 0 /* ??? The code in ralloc.c does not really try to ensure
1195 the heap start has any sort of alignment.
1196 Perhaps it should. */
6d16dd06 1197 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
40f3f04b 1198#endif
6d16dd06
RS
1199 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1200 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1201
1202 if (ph)
1203 {
1204 assert (ph->end < h->start);
1205 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1206 }
1207
1208 if (h->bloc_start <= break_value && break_value <= h->end)
1209 found = 1;
1210
1211 ph = h;
1212 }
1213
1214 assert (found);
1215 assert (last_heap == ph);
1216
1217 for (b = first_bloc; b; b = b->next)
1218 {
1219 assert (b->prev == pb);
1220 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1221 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1222
1223 ph = 0;
1224 for (h = first_heap; h; h = h->next)
1225 {
1226 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1227 break;
1228 ph = h;
1229 }
1230
1231 assert (h);
1232
1233 if (pb && pb->data + pb->size != b->data)
1234 {
1235 assert (ph && b->data == h->bloc_start);
1236 while (ph)
1237 {
1238 if (ph->bloc_start <= pb->data
1239 && pb->data + pb->size <= ph->end)
1240 {
1241 assert (pb->data + pb->size + b->size > ph->end);
1242 break;
1243 }
1244 else
1245 {
1246 assert (ph->bloc_start + b->size > ph->end);
1247 }
1248 ph = ph->prev;
1249 }
1250 }
1251 pb = b;
1252 }
1253
1254 assert (last_bloc == pb);
1255
1256 if (last_bloc)
1257 assert (last_bloc->data + last_bloc->size == break_value);
1258 else
1259 assert (first_heap->bloc_start == break_value);
e429caa2
KH
1260}
1261#endif /* DEBUG */