Update FSF's address.
[bpt/emacs.git] / src / ralloc.c
CommitLineData
177c0ea7 1/* Block-relocating memory allocator.
0a58f946 2 Copyright (C) 1993, 1995, 2000 Free Software Foundation, Inc.
dcfdbac7
JB
3
4This file is part of GNU Emacs.
5
6GNU Emacs is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
187996a8 8the Free Software Foundation; either version 2, or (at your option)
dcfdbac7
JB
9any later version.
10
11GNU Emacs is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU Emacs; see the file COPYING. If not, write to
4fc5845f
LK
18the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
19Boston, MA 02110-1301, USA. */
dcfdbac7
JB
20
21/* NOTES:
22
eb8c3be9 23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
dcfdbac7 24 rather than all of them. This means allowing for a possible
abe9ff32 25 hole between the first bloc and the end of malloc storage. */
dcfdbac7 26
2c46d29f 27#ifdef emacs
aef4d570 28
18160b98 29#include <config.h>
956ace37 30#include "lisp.h" /* Needed for VALBITS. */
a4766fd5 31#include "blockinput.h"
0a58f946 32
642a1733
DL
33#ifdef HAVE_UNISTD_H
34#include <unistd.h>
35#endif
a8c0e5ea 36
0a58f946
GM
37typedef POINTER_TYPE *POINTER;
38typedef size_t SIZE;
f275fd9a 39
2c46d29f
RS
40/* Declared in dispnew.c, this version doesn't screw up if regions
41 overlap. */
0a58f946 42
2c46d29f 43extern void safe_bcopy ();
2c46d29f 44
b0119c68 45#ifdef DOUG_LEA_MALLOC
177c0ea7 46#define M_TOP_PAD -2
b0119c68 47extern int mallopt ();
0a58f946 48#else /* not DOUG_LEA_MALLOC */
a2c23c92 49#ifndef SYSTEM_MALLOC
b1685c5f 50extern size_t __malloc_extra_blocks;
a2c23c92 51#endif /* SYSTEM_MALLOC */
0a58f946 52#endif /* not DOUG_LEA_MALLOC */
49081834 53
d5179acc 54#else /* not emacs */
aef4d570 55
2c46d29f 56#include <stddef.h>
aef4d570 57
2c46d29f
RS
58typedef size_t SIZE;
59typedef void *POINTER;
aef4d570 60
aef4d570
RM
61#include <unistd.h>
62#include <malloc.h>
aef4d570 63
2c46d29f 64#define safe_bcopy(x, y, z) memmove (y, x, z)
d5179acc
RS
65#define bzero(x, len) memset (x, 0, len)
66
67#endif /* not emacs */
2c46d29f 68
0a58f946 69
d5179acc 70#include "getpagesize.h"
dcfdbac7
JB
71
72#define NIL ((POINTER) 0)
73
2c46d29f
RS
74/* A flag to indicate whether we have initialized ralloc yet. For
75 Emacs's sake, please do not make this local to malloc_init; on some
76 machines, the dumping procedure makes all static variables
77 read-only. On these machines, the word static is #defined to be
78 the empty string, meaning that r_alloc_initialized becomes an
0a58f946
GM
79 automatic variable, and loses its value each time Emacs is started
80 up. */
81
2c46d29f
RS
82static int r_alloc_initialized = 0;
83
84static void r_alloc_init ();
0a58f946 85
dcfdbac7 86\f
956ace37
JB
87/* Declarations for working with the malloc, ralloc, and system breaks. */
88
abe9ff32 89/* Function to set the real break value. */
321ed47b 90POINTER (*real_morecore) ();
dcfdbac7 91
abe9ff32 92/* The break value, as seen by malloc. */
dcfdbac7
JB
93static POINTER virtual_break_value;
94
abe9ff32
RS
95/* The address of the end of the last data in use by ralloc,
96 including relocatable blocs as well as malloc data. */
dcfdbac7
JB
97static POINTER break_value;
98
7516b7d5
RS
99/* This is the size of a page. We round memory requests to this boundary. */
100static int page_size;
101
177c0ea7 102/* Whenever we get memory from the system, get this many extra bytes. This
ad3bb3d2 103 must be a multiple of page_size. */
7516b7d5
RS
104static int extra_bytes;
105
dcfdbac7 106/* Macros for rounding. Note that rounding to any value is possible
abe9ff32 107 by changing the definition of PAGE. */
dcfdbac7 108#define PAGE (getpagesize ())
f7a009a5
RM
109#define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
110#define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
111 & ~(page_size - 1))
7516b7d5 112#define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
e429caa2
KH
113
114#define MEM_ALIGN sizeof(double)
115#define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
116 & ~(MEM_ALIGN - 1))
0a58f946 117
aeac019e
GM
118/* The hook `malloc' uses for the function which gets more space
119 from the system. */
120
121#ifndef SYSTEM_MALLOC
122extern POINTER (*__morecore) ();
123#endif
124
125
e429caa2 126\f
0a58f946
GM
127/***********************************************************************
128 Implementation using sbrk
129 ***********************************************************************/
130
abe9ff32
RS
131/* Data structures of heaps and blocs. */
132
133/* The relocatable objects, or blocs, and the malloc data
134 both reside within one or more heaps.
135 Each heap contains malloc data, running from `start' to `bloc_start',
136 and relocatable objects, running from `bloc_start' to `free'.
137
138 Relocatable objects may relocate within the same heap
139 or may move into another heap; the heaps themselves may grow
140 but they never move.
141
142 We try to make just one heap and make it larger as necessary.
8e6208c5 143 But sometimes we can't do that, because we can't get contiguous
abe9ff32 144 space to add onto the heap. When that happens, we start a new heap. */
177c0ea7 145
e429caa2
KH
146typedef struct heap
147{
148 struct heap *next;
149 struct heap *prev;
abe9ff32 150 /* Start of memory range of this heap. */
e429caa2 151 POINTER start;
abe9ff32 152 /* End of memory range of this heap. */
e429caa2 153 POINTER end;
abe9ff32
RS
154 /* Start of relocatable data in this heap. */
155 POINTER bloc_start;
156 /* Start of unused space in this heap. */
157 POINTER free;
47f13333
RS
158 /* First bloc in this heap. */
159 struct bp *first_bloc;
160 /* Last bloc in this heap. */
161 struct bp *last_bloc;
e429caa2
KH
162} *heap_ptr;
163
164#define NIL_HEAP ((heap_ptr) 0)
165#define HEAP_PTR_SIZE (sizeof (struct heap))
166
abe9ff32
RS
167/* This is the first heap object.
168 If we need additional heap objects, each one resides at the beginning of
169 the space it covers. */
170static struct heap heap_base;
171
172/* Head and tail of the list of heaps. */
e429caa2
KH
173static heap_ptr first_heap, last_heap;
174
175/* These structures are allocated in the malloc arena.
176 The linked list is kept in order of increasing '.data' members.
177 The data blocks abut each other; if b->next is non-nil, then
177c0ea7 178 b->data + b->size == b->next->data.
49f82b3d
RS
179
180 An element with variable==NIL denotes a freed block, which has not yet
181 been collected. They may only appear while r_alloc_freeze > 0, and will be
182 freed when the arena is thawed. Currently, these blocs are not reusable,
8e6208c5 183 while the arena is frozen. Very inefficient. */
49f82b3d 184
e429caa2
KH
185typedef struct bp
186{
187 struct bp *next;
188 struct bp *prev;
189 POINTER *variable;
190 POINTER data;
191 SIZE size;
8e6208c5 192 POINTER new_data; /* temporarily used for relocation */
49f82b3d 193 struct heap *heap; /* Heap this bloc is in. */
e429caa2
KH
194} *bloc_ptr;
195
196#define NIL_BLOC ((bloc_ptr) 0)
197#define BLOC_PTR_SIZE (sizeof (struct bp))
198
abe9ff32 199/* Head and tail of the list of relocatable blocs. */
e429caa2
KH
200static bloc_ptr first_bloc, last_bloc;
201
49f82b3d
RS
202static int use_relocatable_buffers;
203
204/* If >0, no relocation whatsoever takes place. */
205static int r_alloc_freeze_level;
206
dcfdbac7 207\f
956ace37
JB
208/* Functions to get and return memory from the system. */
209
abe9ff32
RS
210/* Find the heap that ADDRESS falls within. */
211
212static heap_ptr
213find_heap (address)
214 POINTER address;
215{
216 heap_ptr heap;
217
218 for (heap = last_heap; heap; heap = heap->prev)
219 {
220 if (heap->start <= address && address <= heap->end)
221 return heap;
222 }
223
224 return NIL_HEAP;
225}
226
227/* Find SIZE bytes of space in a heap.
228 Try to get them at ADDRESS (which must fall within some heap's range)
229 if we can get that many within one heap.
230
e429caa2 231 If enough space is not presently available in our reserve, this means
8e6208c5
KH
232 getting more page-aligned space from the system. If the returned space
233 is not contiguous to the last heap, allocate a new heap, and append it
abe9ff32
RS
234
235 obtain does not try to keep track of whether space is in use
236 or not in use. It just returns the address of SIZE bytes that
237 fall within a single heap. If you call obtain twice in a row
238 with the same arguments, you typically get the same value.
239 to the heap list. It's the caller's responsibility to keep
240 track of what space is in use.
dcfdbac7 241
e429caa2
KH
242 Return the address of the space if all went well, or zero if we couldn't
243 allocate the memory. */
abe9ff32 244
e429caa2
KH
245static POINTER
246obtain (address, size)
247 POINTER address;
248 SIZE size;
dcfdbac7 249{
e429caa2
KH
250 heap_ptr heap;
251 SIZE already_available;
dcfdbac7 252
abe9ff32 253 /* Find the heap that ADDRESS falls within. */
e429caa2 254 for (heap = last_heap; heap; heap = heap->prev)
dcfdbac7 255 {
e429caa2
KH
256 if (heap->start <= address && address <= heap->end)
257 break;
258 }
dcfdbac7 259
e429caa2 260 if (! heap)
abe9ff32 261 abort ();
dcfdbac7 262
abe9ff32
RS
263 /* If we can't fit SIZE bytes in that heap,
264 try successive later heaps. */
91a211b5 265 while (heap && (char *) address + size > (char *) heap->end)
e429caa2
KH
266 {
267 heap = heap->next;
268 if (heap == NIL_HEAP)
269 break;
270 address = heap->bloc_start;
dcfdbac7
JB
271 }
272
abe9ff32
RS
273 /* If we can't fit them within any existing heap,
274 get more space. */
e429caa2
KH
275 if (heap == NIL_HEAP)
276 {
277 POINTER new = (*real_morecore)(0);
278 SIZE get;
98b7fe02 279
e429caa2 280 already_available = (char *)last_heap->end - (char *)address;
dcfdbac7 281
e429caa2
KH
282 if (new != last_heap->end)
283 {
abe9ff32
RS
284 /* Someone else called sbrk. Make a new heap. */
285
286 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
287 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
e429caa2 288
91a211b5 289 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
e429caa2
KH
290 return 0;
291
292 new_heap->start = new;
293 new_heap->end = bloc_start;
294 new_heap->bloc_start = bloc_start;
abe9ff32 295 new_heap->free = bloc_start;
e429caa2
KH
296 new_heap->next = NIL_HEAP;
297 new_heap->prev = last_heap;
47f13333
RS
298 new_heap->first_bloc = NIL_BLOC;
299 new_heap->last_bloc = NIL_BLOC;
e429caa2
KH
300 last_heap->next = new_heap;
301 last_heap = new_heap;
302
303 address = bloc_start;
304 already_available = 0;
305 }
dcfdbac7 306
abe9ff32
RS
307 /* Add space to the last heap (which we may have just created).
308 Get some extra, so we can come here less often. */
309
e429caa2 310 get = size + extra_bytes - already_available;
abe9ff32 311 get = (char *) ROUNDUP ((char *)last_heap->end + get)
e429caa2 312 - (char *) last_heap->end;
dcfdbac7 313
e429caa2
KH
314 if ((*real_morecore) (get) != last_heap->end)
315 return 0;
316
91a211b5 317 last_heap->end = (char *) last_heap->end + get;
e429caa2
KH
318 }
319
320 return address;
321}
dcfdbac7 322
abe9ff32
RS
323/* Return unused heap space to the system
324 if there is a lot of unused space now.
325 This can make the last heap smaller;
326 it can also eliminate the last heap entirely. */
327
dcfdbac7 328static void
e429caa2 329relinquish ()
dcfdbac7 330{
e429caa2
KH
331 register heap_ptr h;
332 int excess = 0;
333
abe9ff32
RS
334 /* Add the amount of space beyond break_value
335 in all heaps which have extend beyond break_value at all. */
336
e429caa2
KH
337 for (h = last_heap; h && break_value < h->end; h = h->prev)
338 {
339 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
340 ? h->bloc_start : break_value);
341 }
342
343 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
dcfdbac7 344 {
7516b7d5
RS
345 /* Keep extra_bytes worth of empty space.
346 And don't free anything unless we can free at least extra_bytes. */
e429caa2 347 excess -= extra_bytes;
dcfdbac7 348
e429caa2
KH
349 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
350 {
47f13333
RS
351 /* This heap should have no blocs in it. */
352 if (last_heap->first_bloc != NIL_BLOC
353 || last_heap->last_bloc != NIL_BLOC)
354 abort ();
355
abe9ff32 356 /* Return the last heap, with its header, to the system. */
e429caa2
KH
357 excess = (char *)last_heap->end - (char *)last_heap->start;
358 last_heap = last_heap->prev;
359 last_heap->next = NIL_HEAP;
360 }
361 else
362 {
363 excess = (char *) last_heap->end
abe9ff32 364 - (char *) ROUNDUP ((char *)last_heap->end - excess);
91a211b5 365 last_heap->end = (char *) last_heap->end - excess;
e429caa2 366 }
dcfdbac7 367
e429caa2 368 if ((*real_morecore) (- excess) == 0)
21532667
KH
369 {
370 /* If the system didn't want that much memory back, adjust
371 the end of the last heap to reflect that. This can occur
372 if break_value is still within the original data segment. */
91a211b5 373 last_heap->end = (char *) last_heap->end + excess;
21532667
KH
374 /* Make sure that the result of the adjustment is accurate.
375 It should be, for the else clause above; the other case,
376 which returns the entire last heap to the system, seems
377 unlikely to trigger this mode of failure. */
378 if (last_heap->end != (*real_morecore) (0))
379 abort ();
380 }
e429caa2 381 }
dcfdbac7 382}
719b242f
RS
383
384/* Return the total size in use by relocating allocator,
385 above where malloc gets space. */
386
387long
388r_alloc_size_in_use ()
389{
91a211b5 390 return (char *) break_value - (char *) virtual_break_value;
719b242f 391}
dcfdbac7 392\f
956ace37
JB
393/* The meat - allocating, freeing, and relocating blocs. */
394
956ace37 395/* Find the bloc referenced by the address in PTR. Returns a pointer
abe9ff32 396 to that block. */
dcfdbac7
JB
397
398static bloc_ptr
399find_bloc (ptr)
400 POINTER *ptr;
401{
402 register bloc_ptr p = first_bloc;
403
404 while (p != NIL_BLOC)
405 {
406 if (p->variable == ptr && p->data == *ptr)
407 return p;
408
409 p = p->next;
410 }
411
412 return p;
413}
414
415/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
98b7fe02
JB
416 Returns a pointer to the new bloc, or zero if we couldn't allocate
417 memory for the new block. */
dcfdbac7
JB
418
419static bloc_ptr
420get_bloc (size)
421 SIZE size;
422{
98b7fe02 423 register bloc_ptr new_bloc;
abe9ff32 424 register heap_ptr heap;
98b7fe02
JB
425
426 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
e429caa2 427 || ! (new_bloc->data = obtain (break_value, size)))
98b7fe02
JB
428 {
429 if (new_bloc)
430 free (new_bloc);
431
432 return 0;
433 }
dcfdbac7 434
91a211b5 435 break_value = (char *) new_bloc->data + size;
e429caa2 436
dcfdbac7
JB
437 new_bloc->size = size;
438 new_bloc->next = NIL_BLOC;
8c7f1e35 439 new_bloc->variable = (POINTER *) NIL;
e429caa2 440 new_bloc->new_data = 0;
dcfdbac7 441
abe9ff32
RS
442 /* Record in the heap that this space is in use. */
443 heap = find_heap (new_bloc->data);
444 heap->free = break_value;
445
47f13333
RS
446 /* Maintain the correspondence between heaps and blocs. */
447 new_bloc->heap = heap;
448 heap->last_bloc = new_bloc;
449 if (heap->first_bloc == NIL_BLOC)
450 heap->first_bloc = new_bloc;
451
abe9ff32 452 /* Put this bloc on the doubly-linked list of blocs. */
dcfdbac7
JB
453 if (first_bloc)
454 {
455 new_bloc->prev = last_bloc;
456 last_bloc->next = new_bloc;
457 last_bloc = new_bloc;
458 }
459 else
460 {
461 first_bloc = last_bloc = new_bloc;
462 new_bloc->prev = NIL_BLOC;
463 }
464
465 return new_bloc;
466}
47f13333 467\f
abe9ff32
RS
468/* Calculate new locations of blocs in the list beginning with BLOC,
469 relocating it to start at ADDRESS, in heap HEAP. If enough space is
470 not presently available in our reserve, call obtain for
177c0ea7
JB
471 more space.
472
abe9ff32
RS
473 Store the new location of each bloc in its new_data field.
474 Do not touch the contents of blocs or break_value. */
dcfdbac7 475
e429caa2
KH
476static int
477relocate_blocs (bloc, heap, address)
478 bloc_ptr bloc;
479 heap_ptr heap;
480 POINTER address;
481{
482 register bloc_ptr b = bloc;
ad3bb3d2 483
49f82b3d 484 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 485 if (r_alloc_freeze_level)
49f82b3d
RS
486 abort();
487
e429caa2
KH
488 while (b)
489 {
abe9ff32
RS
490 /* If bloc B won't fit within HEAP,
491 move to the next heap and try again. */
91a211b5 492 while (heap && (char *) address + b->size > (char *) heap->end)
e429caa2
KH
493 {
494 heap = heap->next;
495 if (heap == NIL_HEAP)
496 break;
497 address = heap->bloc_start;
498 }
dcfdbac7 499
abe9ff32
RS
500 /* If BLOC won't fit in any heap,
501 get enough new space to hold BLOC and all following blocs. */
e429caa2
KH
502 if (heap == NIL_HEAP)
503 {
504 register bloc_ptr tb = b;
505 register SIZE s = 0;
506
abe9ff32 507 /* Add up the size of all the following blocs. */
e429caa2
KH
508 while (tb != NIL_BLOC)
509 {
177c0ea7 510 if (tb->variable)
49f82b3d
RS
511 s += tb->size;
512
e429caa2
KH
513 tb = tb->next;
514 }
515
abe9ff32
RS
516 /* Get that space. */
517 address = obtain (address, s);
518 if (address == 0)
e429caa2
KH
519 return 0;
520
521 heap = last_heap;
522 }
523
abe9ff32
RS
524 /* Record the new address of this bloc
525 and update where the next bloc can start. */
e429caa2 526 b->new_data = address;
177c0ea7 527 if (b->variable)
91a211b5 528 address = (char *) address + b->size;
e429caa2
KH
529 b = b->next;
530 }
531
532 return 1;
533}
534
47f13333
RS
535/* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
536 This is necessary if we put the memory of space of BLOC
537 before that of BEFORE. */
538
539static void
540reorder_bloc (bloc, before)
541 bloc_ptr bloc, before;
542{
543 bloc_ptr prev, next;
544
545 /* Splice BLOC out from where it is. */
546 prev = bloc->prev;
547 next = bloc->next;
548
549 if (prev)
550 prev->next = next;
551 if (next)
552 next->prev = prev;
553
554 /* Splice it in before BEFORE. */
555 prev = before->prev;
abe9ff32 556
47f13333
RS
557 if (prev)
558 prev->next = bloc;
559 bloc->prev = prev;
560
561 before->prev = bloc;
562 bloc->next = before;
563}
564\f
565/* Update the records of which heaps contain which blocs, starting
566 with heap HEAP and bloc BLOC. */
567
568static void
569update_heap_bloc_correspondence (bloc, heap)
abe9ff32
RS
570 bloc_ptr bloc;
571 heap_ptr heap;
572{
573 register bloc_ptr b;
574
47f13333
RS
575 /* Initialize HEAP's status to reflect blocs before BLOC. */
576 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
577 {
578 /* The previous bloc is in HEAP. */
579 heap->last_bloc = bloc->prev;
91a211b5 580 heap->free = (char *) bloc->prev->data + bloc->prev->size;
47f13333
RS
581 }
582 else
583 {
584 /* HEAP contains no blocs before BLOC. */
585 heap->first_bloc = NIL_BLOC;
586 heap->last_bloc = NIL_BLOC;
587 heap->free = heap->bloc_start;
588 }
589
abe9ff32
RS
590 /* Advance through blocs one by one. */
591 for (b = bloc; b != NIL_BLOC; b = b->next)
592 {
47f13333
RS
593 /* Advance through heaps, marking them empty,
594 till we get to the one that B is in. */
abe9ff32
RS
595 while (heap)
596 {
597 if (heap->bloc_start <= b->data && b->data <= heap->end)
598 break;
599 heap = heap->next;
47f13333
RS
600 /* We know HEAP is not null now,
601 because there has to be space for bloc B. */
602 heap->first_bloc = NIL_BLOC;
603 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
604 heap->free = heap->bloc_start;
605 }
47f13333
RS
606
607 /* Update HEAP's status for bloc B. */
91a211b5 608 heap->free = (char *) b->data + b->size;
47f13333
RS
609 heap->last_bloc = b;
610 if (heap->first_bloc == NIL_BLOC)
611 heap->first_bloc = b;
612
613 /* Record that B is in HEAP. */
614 b->heap = heap;
abe9ff32
RS
615 }
616
617 /* If there are any remaining heaps and no blocs left,
47f13333 618 mark those heaps as empty. */
abe9ff32
RS
619 heap = heap->next;
620 while (heap)
621 {
47f13333
RS
622 heap->first_bloc = NIL_BLOC;
623 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
624 heap->free = heap->bloc_start;
625 heap = heap->next;
626 }
627}
47f13333 628\f
abe9ff32
RS
629/* Resize BLOC to SIZE bytes. This relocates the blocs
630 that come after BLOC in memory. */
631
e429caa2
KH
632static int
633resize_bloc (bloc, size)
634 bloc_ptr bloc;
635 SIZE size;
dcfdbac7 636{
e429caa2
KH
637 register bloc_ptr b;
638 heap_ptr heap;
639 POINTER address;
640 SIZE old_size;
641
49f82b3d 642 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 643 if (r_alloc_freeze_level)
49f82b3d
RS
644 abort();
645
e429caa2
KH
646 if (bloc == NIL_BLOC || size == bloc->size)
647 return 1;
648
649 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
650 {
651 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
652 break;
653 }
654
655 if (heap == NIL_HEAP)
abe9ff32 656 abort ();
e429caa2
KH
657
658 old_size = bloc->size;
659 bloc->size = size;
660
abe9ff32 661 /* Note that bloc could be moved into the previous heap. */
91a211b5
GM
662 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
663 : (char *) first_heap->bloc_start);
e429caa2
KH
664 while (heap)
665 {
666 if (heap->bloc_start <= address && address <= heap->end)
667 break;
668 heap = heap->prev;
669 }
670
671 if (! relocate_blocs (bloc, heap, address))
672 {
673 bloc->size = old_size;
674 return 0;
675 }
676
677 if (size > old_size)
678 {
679 for (b = last_bloc; b != bloc; b = b->prev)
680 {
49f82b3d
RS
681 if (!b->variable)
682 {
683 b->size = 0;
684 b->data = b->new_data;
177c0ea7
JB
685 }
686 else
49f82b3d
RS
687 {
688 safe_bcopy (b->data, b->new_data, b->size);
689 *b->variable = b->data = b->new_data;
690 }
691 }
692 if (!bloc->variable)
693 {
694 bloc->size = 0;
695 bloc->data = bloc->new_data;
696 }
697 else
698 {
699 safe_bcopy (bloc->data, bloc->new_data, old_size);
91a211b5 700 bzero ((char *) bloc->new_data + old_size, size - old_size);
49f82b3d 701 *bloc->variable = bloc->data = bloc->new_data;
e429caa2 702 }
e429caa2
KH
703 }
704 else
dcfdbac7 705 {
ad3bb3d2
JB
706 for (b = bloc; b != NIL_BLOC; b = b->next)
707 {
49f82b3d
RS
708 if (!b->variable)
709 {
710 b->size = 0;
711 b->data = b->new_data;
177c0ea7
JB
712 }
713 else
49f82b3d
RS
714 {
715 safe_bcopy (b->data, b->new_data, b->size);
716 *b->variable = b->data = b->new_data;
717 }
ad3bb3d2 718 }
ad3bb3d2 719 }
dcfdbac7 720
47f13333 721 update_heap_bloc_correspondence (bloc, heap);
abe9ff32 722
91a211b5
GM
723 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
724 : (char *) first_heap->bloc_start);
e429caa2
KH
725 return 1;
726}
47f13333 727\f
abe9ff32
RS
728/* Free BLOC from the chain of blocs, relocating any blocs above it.
729 This may return space to the system. */
dcfdbac7
JB
730
731static void
732free_bloc (bloc)
733 bloc_ptr bloc;
734{
47f13333
RS
735 heap_ptr heap = bloc->heap;
736
49f82b3d
RS
737 if (r_alloc_freeze_level)
738 {
739 bloc->variable = (POINTER *) NIL;
740 return;
741 }
177c0ea7 742
e429caa2
KH
743 resize_bloc (bloc, 0);
744
dcfdbac7
JB
745 if (bloc == first_bloc && bloc == last_bloc)
746 {
747 first_bloc = last_bloc = NIL_BLOC;
748 }
749 else if (bloc == last_bloc)
750 {
751 last_bloc = bloc->prev;
752 last_bloc->next = NIL_BLOC;
753 }
754 else if (bloc == first_bloc)
755 {
756 first_bloc = bloc->next;
757 first_bloc->prev = NIL_BLOC;
dcfdbac7
JB
758 }
759 else
760 {
761 bloc->next->prev = bloc->prev;
762 bloc->prev->next = bloc->next;
dcfdbac7
JB
763 }
764
47f13333
RS
765 /* Update the records of which blocs are in HEAP. */
766 if (heap->first_bloc == bloc)
767 {
d5179acc 768 if (bloc->next != 0 && bloc->next->heap == heap)
47f13333
RS
769 heap->first_bloc = bloc->next;
770 else
771 heap->first_bloc = heap->last_bloc = NIL_BLOC;
772 }
773 if (heap->last_bloc == bloc)
774 {
d5179acc 775 if (bloc->prev != 0 && bloc->prev->heap == heap)
47f13333
RS
776 heap->last_bloc = bloc->prev;
777 else
778 heap->first_bloc = heap->last_bloc = NIL_BLOC;
779 }
780
e429caa2 781 relinquish ();
dcfdbac7
JB
782 free (bloc);
783}
784\f
956ace37
JB
785/* Interface routines. */
786
98b7fe02 787/* Obtain SIZE bytes of storage from the free pool, or the system, as
2c46d29f 788 necessary. If relocatable blocs are in use, this means relocating
98b7fe02
JB
789 them. This function gets plugged into the GNU malloc's __morecore
790 hook.
791
7516b7d5
RS
792 We provide hysteresis, never relocating by less than extra_bytes.
793
98b7fe02
JB
794 If we're out of memory, we should return zero, to imitate the other
795 __morecore hook values - in particular, __default_morecore in the
796 GNU malloc package. */
dcfdbac7 797
177c0ea7 798POINTER
dcfdbac7
JB
799r_alloc_sbrk (size)
800 long size;
801{
e429caa2
KH
802 register bloc_ptr b;
803 POINTER address;
dcfdbac7 804
44d3dec0
RS
805 if (! r_alloc_initialized)
806 r_alloc_init ();
807
dcfdbac7 808 if (! use_relocatable_buffers)
bbc60227 809 return (*real_morecore) (size);
dcfdbac7 810
e429caa2
KH
811 if (size == 0)
812 return virtual_break_value;
7516b7d5 813
e429caa2 814 if (size > 0)
dcfdbac7 815 {
abe9ff32
RS
816 /* Allocate a page-aligned space. GNU malloc would reclaim an
817 extra space if we passed an unaligned one. But we could
8e6208c5 818 not always find a space which is contiguous to the previous. */
e429caa2
KH
819 POINTER new_bloc_start;
820 heap_ptr h = first_heap;
abe9ff32 821 SIZE get = ROUNDUP (size);
7516b7d5 822
abe9ff32 823 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 824
abe9ff32
RS
825 /* Search the list upward for a heap which is large enough. */
826 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
e429caa2
KH
827 {
828 h = h->next;
829 if (h == NIL_HEAP)
830 break;
abe9ff32 831 address = (POINTER) ROUNDUP (h->start);
e429caa2
KH
832 }
833
abe9ff32 834 /* If not found, obtain more space. */
e429caa2
KH
835 if (h == NIL_HEAP)
836 {
837 get += extra_bytes + page_size;
838
49f82b3d 839 if (! obtain (address, get))
e429caa2 840 return 0;
98b7fe02 841
e429caa2 842 if (first_heap == last_heap)
abe9ff32 843 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 844 else
abe9ff32 845 address = (POINTER) ROUNDUP (last_heap->start);
e429caa2
KH
846 h = last_heap;
847 }
848
abe9ff32 849 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
e429caa2
KH
850
851 if (first_heap->bloc_start < new_bloc_start)
852 {
49f82b3d 853 /* This is no clean solution - no idea how to do it better. */
177c0ea7 854 if (r_alloc_freeze_level)
49f82b3d
RS
855 return NIL;
856
857 /* There is a bug here: if the above obtain call succeeded, but the
858 relocate_blocs call below does not succeed, we need to free
859 the memory that we got with obtain. */
860
abe9ff32 861 /* Move all blocs upward. */
49f82b3d 862 if (! relocate_blocs (first_bloc, h, new_bloc_start))
e429caa2
KH
863 return 0;
864
865 /* Note that (POINTER)(h+1) <= new_bloc_start since
866 get >= page_size, so the following does not destroy the heap
abe9ff32 867 header. */
e429caa2
KH
868 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
869 {
870 safe_bcopy (b->data, b->new_data, b->size);
871 *b->variable = b->data = b->new_data;
872 }
873
874 h->bloc_start = new_bloc_start;
abe9ff32 875
47f13333 876 update_heap_bloc_correspondence (first_bloc, h);
e429caa2 877 }
e429caa2
KH
878 if (h != first_heap)
879 {
880 /* Give up managing heaps below the one the new
abe9ff32 881 virtual_break_value points to. */
e429caa2
KH
882 first_heap->prev = NIL_HEAP;
883 first_heap->next = h->next;
884 first_heap->start = h->start;
885 first_heap->end = h->end;
abe9ff32 886 first_heap->free = h->free;
47f13333
RS
887 first_heap->first_bloc = h->first_bloc;
888 first_heap->last_bloc = h->last_bloc;
e429caa2
KH
889 first_heap->bloc_start = h->bloc_start;
890
891 if (first_heap->next)
892 first_heap->next->prev = first_heap;
893 else
894 last_heap = first_heap;
895 }
896
897 bzero (address, size);
dcfdbac7 898 }
e429caa2 899 else /* size < 0 */
dcfdbac7 900 {
e429caa2
KH
901 SIZE excess = (char *)first_heap->bloc_start
902 - ((char *)virtual_break_value + size);
903
904 address = virtual_break_value;
905
906 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
907 {
908 excess -= extra_bytes;
909 first_heap->bloc_start
47f13333 910 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
e429caa2 911
abe9ff32 912 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
7516b7d5 913
e429caa2
KH
914 for (b = first_bloc; b != NIL_BLOC; b = b->next)
915 {
916 safe_bcopy (b->data, b->new_data, b->size);
917 *b->variable = b->data = b->new_data;
918 }
919 }
920
921 if ((char *)virtual_break_value + size < (char *)first_heap->start)
922 {
923 /* We found an additional space below the first heap */
924 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
925 }
dcfdbac7
JB
926 }
927
e429caa2 928 virtual_break_value = (POINTER) ((char *)address + size);
47f13333 929 break_value = (last_bloc
91a211b5
GM
930 ? (char *) last_bloc->data + last_bloc->size
931 : (char *) first_heap->bloc_start);
e429caa2 932 if (size < 0)
abe9ff32 933 relinquish ();
7516b7d5 934
e429caa2 935 return address;
dcfdbac7
JB
936}
937
0a58f946 938
dcfdbac7
JB
939/* Allocate a relocatable bloc of storage of size SIZE. A pointer to
940 the data is returned in *PTR. PTR is thus the address of some variable
98b7fe02
JB
941 which will use the data area.
942
49f82b3d
RS
943 The allocation of 0 bytes is valid.
944 In case r_alloc_freeze is set, a best fit of unused blocs could be done
945 before allocating a new area. Not yet done.
946
98b7fe02
JB
947 If we can't allocate the necessary memory, set *PTR to zero, and
948 return zero. */
dcfdbac7
JB
949
950POINTER
951r_alloc (ptr, size)
952 POINTER *ptr;
953 SIZE size;
954{
955 register bloc_ptr new_bloc;
956
2c46d29f
RS
957 if (! r_alloc_initialized)
958 r_alloc_init ();
959
abe9ff32 960 new_bloc = get_bloc (MEM_ROUNDUP (size));
98b7fe02
JB
961 if (new_bloc)
962 {
963 new_bloc->variable = ptr;
964 *ptr = new_bloc->data;
965 }
966 else
967 *ptr = 0;
dcfdbac7
JB
968
969 return *ptr;
970}
971
2c46d29f
RS
972/* Free a bloc of relocatable storage whose data is pointed to by PTR.
973 Store 0 in *PTR to show there's no block allocated. */
dcfdbac7
JB
974
975void
976r_alloc_free (ptr)
977 register POINTER *ptr;
978{
979 register bloc_ptr dead_bloc;
980
44d3dec0
RS
981 if (! r_alloc_initialized)
982 r_alloc_init ();
983
dcfdbac7
JB
984 dead_bloc = find_bloc (ptr);
985 if (dead_bloc == NIL_BLOC)
986 abort ();
987
988 free_bloc (dead_bloc);
2c46d29f 989 *ptr = 0;
719b242f 990
d5179acc 991#ifdef emacs
719b242f 992 refill_memory_reserve ();
d5179acc 993#endif
dcfdbac7
JB
994}
995
16a5c729 996/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
98b7fe02
JB
997 Do this by shifting all blocks above this one up in memory, unless
998 SIZE is less than or equal to the current bloc size, in which case
999 do nothing.
dcfdbac7 1000
49f82b3d 1001 In case r_alloc_freeze is set, a new bloc is allocated, and the
8e6208c5 1002 memory copied to it. Not very efficient. We could traverse the
49f82b3d
RS
1003 bloc_list for a best fit of free blocs first.
1004
98b7fe02
JB
1005 Change *PTR to reflect the new bloc, and return this value.
1006
1007 If more memory cannot be allocated, then leave *PTR unchanged, and
1008 return zero. */
dcfdbac7
JB
1009
1010POINTER
1011r_re_alloc (ptr, size)
1012 POINTER *ptr;
1013 SIZE size;
1014{
16a5c729 1015 register bloc_ptr bloc;
dcfdbac7 1016
44d3dec0
RS
1017 if (! r_alloc_initialized)
1018 r_alloc_init ();
1019
49f82b3d
RS
1020 if (!*ptr)
1021 return r_alloc (ptr, size);
177c0ea7 1022 if (!size)
49f82b3d
RS
1023 {
1024 r_alloc_free (ptr);
1025 return r_alloc (ptr, 0);
1026 }
1027
16a5c729
JB
1028 bloc = find_bloc (ptr);
1029 if (bloc == NIL_BLOC)
dcfdbac7
JB
1030 abort ();
1031
177c0ea7 1032 if (size < bloc->size)
49f82b3d
RS
1033 {
1034 /* Wouldn't it be useful to actually resize the bloc here? */
1035 /* I think so too, but not if it's too expensive... */
177c0ea7
JB
1036 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1037 && r_alloc_freeze_level == 0)
49f82b3d
RS
1038 {
1039 resize_bloc (bloc, MEM_ROUNDUP (size));
1040 /* Never mind if this fails, just do nothing... */
1041 /* It *should* be infallible! */
1042 }
1043 }
1044 else if (size > bloc->size)
1045 {
1046 if (r_alloc_freeze_level)
1047 {
1048 bloc_ptr new_bloc;
1049 new_bloc = get_bloc (MEM_ROUNDUP (size));
1050 if (new_bloc)
1051 {
1052 new_bloc->variable = ptr;
1053 *ptr = new_bloc->data;
1054 bloc->variable = (POINTER *) NIL;
1055 }
1056 else
1057 return NIL;
1058 }
177c0ea7 1059 else
49f82b3d
RS
1060 {
1061 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1062 return NIL;
1063 }
1064 }
dcfdbac7
JB
1065 return *ptr;
1066}
81bd58e8
KH
1067
1068/* Disable relocations, after making room for at least SIZE bytes
1069 of non-relocatable heap if possible. The relocatable blocs are
1070 guaranteed to hold still until thawed, even if this means that
1071 malloc must return a null pointer. */
abe9ff32 1072
81bd58e8
KH
1073void
1074r_alloc_freeze (size)
1075 long size;
1076{
44d3dec0
RS
1077 if (! r_alloc_initialized)
1078 r_alloc_init ();
1079
81bd58e8
KH
1080 /* If already frozen, we can't make any more room, so don't try. */
1081 if (r_alloc_freeze_level > 0)
1082 size = 0;
1083 /* If we can't get the amount requested, half is better than nothing. */
1084 while (size > 0 && r_alloc_sbrk (size) == 0)
1085 size /= 2;
1086 ++r_alloc_freeze_level;
1087 if (size > 0)
1088 r_alloc_sbrk (-size);
1089}
1090
1091void
1092r_alloc_thaw ()
1093{
49f82b3d 1094
177c0ea7 1095 if (! r_alloc_initialized)
49f82b3d
RS
1096 r_alloc_init ();
1097
81bd58e8
KH
1098 if (--r_alloc_freeze_level < 0)
1099 abort ();
49f82b3d 1100
177c0ea7
JB
1101 /* This frees all unused blocs. It is not too inefficient, as the resize
1102 and bcopy is done only once. Afterwards, all unreferenced blocs are
49f82b3d 1103 already shrunk to zero size. */
177c0ea7 1104 if (!r_alloc_freeze_level)
49f82b3d
RS
1105 {
1106 bloc_ptr *b = &first_bloc;
177c0ea7
JB
1107 while (*b)
1108 if (!(*b)->variable)
1109 free_bloc (*b);
1110 else
49f82b3d
RS
1111 b = &(*b)->next;
1112 }
81bd58e8 1113}
49f82b3d 1114
dec41418
RS
1115
1116#if defined (emacs) && defined (DOUG_LEA_MALLOC)
1117
1118/* Reinitialize the morecore hook variables after restarting a dumped
1119 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1120void
1121r_alloc_reinit ()
1122{
1123 /* Only do this if the hook has been reset, so that we don't get an
1124 infinite loop, in case Emacs was linked statically. */
1125 if (__morecore != r_alloc_sbrk)
1126 {
1127 real_morecore = __morecore;
1128 __morecore = r_alloc_sbrk;
1129 }
1130}
0a58f946
GM
1131
1132#endif /* emacs && DOUG_LEA_MALLOC */
dec41418 1133
e429caa2 1134#ifdef DEBUG
0a58f946 1135
e429caa2
KH
1136#include <assert.h>
1137
44d3dec0 1138void
e429caa2
KH
1139r_alloc_check ()
1140{
6d16dd06
RS
1141 int found = 0;
1142 heap_ptr h, ph = 0;
1143 bloc_ptr b, pb = 0;
1144
1145 if (!r_alloc_initialized)
1146 return;
1147
1148 assert (first_heap);
1149 assert (last_heap->end <= (POINTER) sbrk (0));
1150 assert ((POINTER) first_heap < first_heap->start);
1151 assert (first_heap->start <= virtual_break_value);
1152 assert (virtual_break_value <= first_heap->end);
1153
1154 for (h = first_heap; h; h = h->next)
1155 {
1156 assert (h->prev == ph);
1157 assert ((POINTER) ROUNDUP (h->end) == h->end);
40f3f04b
RS
1158#if 0 /* ??? The code in ralloc.c does not really try to ensure
1159 the heap start has any sort of alignment.
1160 Perhaps it should. */
6d16dd06 1161 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
40f3f04b 1162#endif
6d16dd06
RS
1163 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1164 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1165
1166 if (ph)
1167 {
1168 assert (ph->end < h->start);
1169 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1170 }
1171
1172 if (h->bloc_start <= break_value && break_value <= h->end)
1173 found = 1;
1174
1175 ph = h;
1176 }
1177
1178 assert (found);
1179 assert (last_heap == ph);
1180
1181 for (b = first_bloc; b; b = b->next)
1182 {
1183 assert (b->prev == pb);
1184 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1185 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1186
1187 ph = 0;
1188 for (h = first_heap; h; h = h->next)
1189 {
1190 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1191 break;
1192 ph = h;
1193 }
1194
1195 assert (h);
1196
1197 if (pb && pb->data + pb->size != b->data)
1198 {
1199 assert (ph && b->data == h->bloc_start);
1200 while (ph)
1201 {
1202 if (ph->bloc_start <= pb->data
1203 && pb->data + pb->size <= ph->end)
1204 {
1205 assert (pb->data + pb->size + b->size > ph->end);
1206 break;
1207 }
1208 else
1209 {
1210 assert (ph->bloc_start + b->size > ph->end);
1211 }
1212 ph = ph->prev;
1213 }
1214 }
1215 pb = b;
1216 }
1217
1218 assert (last_bloc == pb);
1219
1220 if (last_bloc)
1221 assert (last_bloc->data + last_bloc->size == break_value);
1222 else
1223 assert (first_heap->bloc_start == break_value);
e429caa2 1224}
0a58f946 1225
e429caa2 1226#endif /* DEBUG */
0a58f946 1227
0a58f946
GM
1228
1229\f
1230/***********************************************************************
1231 Initialization
1232 ***********************************************************************/
1233
0a58f946
GM
1234/* Initialize various things for memory allocation. */
1235
1236static void
1237r_alloc_init ()
1238{
1239 if (r_alloc_initialized)
1240 return;
0a58f946 1241 r_alloc_initialized = 1;
177c0ea7 1242
a2c23c92
DL
1243 page_size = PAGE;
1244#ifndef SYSTEM_MALLOC
0a58f946
GM
1245 real_morecore = __morecore;
1246 __morecore = r_alloc_sbrk;
1247
1248 first_heap = last_heap = &heap_base;
1249 first_heap->next = first_heap->prev = NIL_HEAP;
1250 first_heap->start = first_heap->bloc_start
1251 = virtual_break_value = break_value = (*real_morecore) (0);
1252 if (break_value == NIL)
1253 abort ();
1254
0a58f946 1255 extra_bytes = ROUNDUP (50000);
a2c23c92 1256#endif
0a58f946
GM
1257
1258#ifdef DOUG_LEA_MALLOC
1673df2e
JD
1259 BLOCK_INPUT;
1260 mallopt (M_TOP_PAD, 64 * 4096);
1261 UNBLOCK_INPUT;
0a58f946 1262#else
a2c23c92 1263#ifndef SYSTEM_MALLOC
0a58f946
GM
1264 /* Give GNU malloc's morecore some hysteresis
1265 so that we move all the relocatable blocks much less often. */
1266 __malloc_extra_blocks = 64;
1267#endif
a2c23c92 1268#endif
0a58f946 1269
5ad25b24 1270#ifndef SYSTEM_MALLOC
0a58f946
GM
1271 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1272
1273 /* The extra call to real_morecore guarantees that the end of the
1274 address space is a multiple of page_size, even if page_size is
1275 not really the page size of the system running the binary in
1276 which page_size is stored. This allows a binary to be built on a
1277 system with one page size and run on a system with a smaller page
1278 size. */
91a211b5 1279 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
0a58f946
GM
1280
1281 /* Clear the rest of the last page; this memory is in our address space
1282 even though it is after the sbrk value. */
1283 /* Doubly true, with the additional call that explicitly adds the
1284 rest of that page to the address space. */
91a211b5
GM
1285 bzero (first_heap->start,
1286 (char *) first_heap->end - (char *) first_heap->start);
0a58f946 1287 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
a2c23c92 1288#endif
177c0ea7 1289
0a58f946
GM
1290 use_relocatable_buffers = 1;
1291}
ab5796a9
MB
1292
1293/* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1294 (do not change this comment) */