Trailing whitespace deleted.
[bpt/emacs.git] / src / ralloc.c
CommitLineData
177c0ea7 1/* Block-relocating memory allocator.
0a58f946 2 Copyright (C) 1993, 1995, 2000 Free Software Foundation, Inc.
dcfdbac7
JB
3
4This file is part of GNU Emacs.
5
6GNU Emacs is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
187996a8 8the Free Software Foundation; either version 2, or (at your option)
dcfdbac7
JB
9any later version.
10
11GNU Emacs is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU Emacs; see the file COPYING. If not, write to
3b7ad313
EN
18the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19Boston, MA 02111-1307, USA. */
dcfdbac7
JB
20
21/* NOTES:
22
eb8c3be9 23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
dcfdbac7 24 rather than all of them. This means allowing for a possible
abe9ff32 25 hole between the first bloc and the end of malloc storage. */
dcfdbac7 26
2c46d29f 27#ifdef emacs
aef4d570 28
18160b98 29#include <config.h>
956ace37 30#include "lisp.h" /* Needed for VALBITS. */
0a58f946 31
642a1733
DL
32#ifdef HAVE_UNISTD_H
33#include <unistd.h>
34#endif
a8c0e5ea 35
0a58f946
GM
36typedef POINTER_TYPE *POINTER;
37typedef size_t SIZE;
f275fd9a 38
2c46d29f
RS
39/* Declared in dispnew.c, this version doesn't screw up if regions
40 overlap. */
0a58f946 41
2c46d29f 42extern void safe_bcopy ();
2c46d29f 43
b0119c68 44#ifdef DOUG_LEA_MALLOC
177c0ea7 45#define M_TOP_PAD -2
b0119c68 46extern int mallopt ();
0a58f946 47#else /* not DOUG_LEA_MALLOC */
a2c23c92 48#ifndef SYSTEM_MALLOC
b1685c5f 49extern size_t __malloc_extra_blocks;
a2c23c92 50#endif /* SYSTEM_MALLOC */
0a58f946 51#endif /* not DOUG_LEA_MALLOC */
49081834 52
d5179acc 53#else /* not emacs */
aef4d570 54
2c46d29f 55#include <stddef.h>
aef4d570 56
2c46d29f
RS
57typedef size_t SIZE;
58typedef void *POINTER;
aef4d570 59
aef4d570
RM
60#include <unistd.h>
61#include <malloc.h>
aef4d570 62
2c46d29f 63#define safe_bcopy(x, y, z) memmove (y, x, z)
d5179acc
RS
64#define bzero(x, len) memset (x, 0, len)
65
66#endif /* not emacs */
2c46d29f 67
0a58f946 68
d5179acc 69#include "getpagesize.h"
dcfdbac7
JB
70
71#define NIL ((POINTER) 0)
72
2c46d29f
RS
73/* A flag to indicate whether we have initialized ralloc yet. For
74 Emacs's sake, please do not make this local to malloc_init; on some
75 machines, the dumping procedure makes all static variables
76 read-only. On these machines, the word static is #defined to be
77 the empty string, meaning that r_alloc_initialized becomes an
0a58f946
GM
78 automatic variable, and loses its value each time Emacs is started
79 up. */
80
2c46d29f
RS
81static int r_alloc_initialized = 0;
82
83static void r_alloc_init ();
0a58f946 84
dcfdbac7 85\f
956ace37
JB
86/* Declarations for working with the malloc, ralloc, and system breaks. */
87
abe9ff32 88/* Function to set the real break value. */
321ed47b 89POINTER (*real_morecore) ();
dcfdbac7 90
abe9ff32 91/* The break value, as seen by malloc. */
dcfdbac7
JB
92static POINTER virtual_break_value;
93
abe9ff32
RS
94/* The address of the end of the last data in use by ralloc,
95 including relocatable blocs as well as malloc data. */
dcfdbac7
JB
96static POINTER break_value;
97
7516b7d5
RS
98/* This is the size of a page. We round memory requests to this boundary. */
99static int page_size;
100
177c0ea7 101/* Whenever we get memory from the system, get this many extra bytes. This
ad3bb3d2 102 must be a multiple of page_size. */
7516b7d5
RS
103static int extra_bytes;
104
dcfdbac7 105/* Macros for rounding. Note that rounding to any value is possible
abe9ff32 106 by changing the definition of PAGE. */
dcfdbac7 107#define PAGE (getpagesize ())
f7a009a5
RM
108#define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
109#define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
110 & ~(page_size - 1))
7516b7d5 111#define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
e429caa2
KH
112
113#define MEM_ALIGN sizeof(double)
114#define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
115 & ~(MEM_ALIGN - 1))
0a58f946 116
aeac019e
GM
117/* The hook `malloc' uses for the function which gets more space
118 from the system. */
119
120#ifndef SYSTEM_MALLOC
121extern POINTER (*__morecore) ();
122#endif
123
124
e429caa2 125\f
0a58f946
GM
126/***********************************************************************
127 Implementation using sbrk
128 ***********************************************************************/
129
abe9ff32
RS
130/* Data structures of heaps and blocs. */
131
132/* The relocatable objects, or blocs, and the malloc data
133 both reside within one or more heaps.
134 Each heap contains malloc data, running from `start' to `bloc_start',
135 and relocatable objects, running from `bloc_start' to `free'.
136
137 Relocatable objects may relocate within the same heap
138 or may move into another heap; the heaps themselves may grow
139 but they never move.
140
141 We try to make just one heap and make it larger as necessary.
8e6208c5 142 But sometimes we can't do that, because we can't get contiguous
abe9ff32 143 space to add onto the heap. When that happens, we start a new heap. */
177c0ea7 144
e429caa2
KH
145typedef struct heap
146{
147 struct heap *next;
148 struct heap *prev;
abe9ff32 149 /* Start of memory range of this heap. */
e429caa2 150 POINTER start;
abe9ff32 151 /* End of memory range of this heap. */
e429caa2 152 POINTER end;
abe9ff32
RS
153 /* Start of relocatable data in this heap. */
154 POINTER bloc_start;
155 /* Start of unused space in this heap. */
156 POINTER free;
47f13333
RS
157 /* First bloc in this heap. */
158 struct bp *first_bloc;
159 /* Last bloc in this heap. */
160 struct bp *last_bloc;
e429caa2
KH
161} *heap_ptr;
162
163#define NIL_HEAP ((heap_ptr) 0)
164#define HEAP_PTR_SIZE (sizeof (struct heap))
165
abe9ff32
RS
166/* This is the first heap object.
167 If we need additional heap objects, each one resides at the beginning of
168 the space it covers. */
169static struct heap heap_base;
170
171/* Head and tail of the list of heaps. */
e429caa2
KH
172static heap_ptr first_heap, last_heap;
173
174/* These structures are allocated in the malloc arena.
175 The linked list is kept in order of increasing '.data' members.
176 The data blocks abut each other; if b->next is non-nil, then
177c0ea7 177 b->data + b->size == b->next->data.
49f82b3d
RS
178
179 An element with variable==NIL denotes a freed block, which has not yet
180 been collected. They may only appear while r_alloc_freeze > 0, and will be
181 freed when the arena is thawed. Currently, these blocs are not reusable,
8e6208c5 182 while the arena is frozen. Very inefficient. */
49f82b3d 183
e429caa2
KH
184typedef struct bp
185{
186 struct bp *next;
187 struct bp *prev;
188 POINTER *variable;
189 POINTER data;
190 SIZE size;
8e6208c5 191 POINTER new_data; /* temporarily used for relocation */
49f82b3d 192 struct heap *heap; /* Heap this bloc is in. */
e429caa2
KH
193} *bloc_ptr;
194
195#define NIL_BLOC ((bloc_ptr) 0)
196#define BLOC_PTR_SIZE (sizeof (struct bp))
197
abe9ff32 198/* Head and tail of the list of relocatable blocs. */
e429caa2
KH
199static bloc_ptr first_bloc, last_bloc;
200
49f82b3d
RS
201static int use_relocatable_buffers;
202
203/* If >0, no relocation whatsoever takes place. */
204static int r_alloc_freeze_level;
205
dcfdbac7 206\f
956ace37
JB
207/* Functions to get and return memory from the system. */
208
abe9ff32
RS
209/* Find the heap that ADDRESS falls within. */
210
211static heap_ptr
212find_heap (address)
213 POINTER address;
214{
215 heap_ptr heap;
216
217 for (heap = last_heap; heap; heap = heap->prev)
218 {
219 if (heap->start <= address && address <= heap->end)
220 return heap;
221 }
222
223 return NIL_HEAP;
224}
225
226/* Find SIZE bytes of space in a heap.
227 Try to get them at ADDRESS (which must fall within some heap's range)
228 if we can get that many within one heap.
229
e429caa2 230 If enough space is not presently available in our reserve, this means
8e6208c5
KH
231 getting more page-aligned space from the system. If the returned space
232 is not contiguous to the last heap, allocate a new heap, and append it
abe9ff32
RS
233
234 obtain does not try to keep track of whether space is in use
235 or not in use. It just returns the address of SIZE bytes that
236 fall within a single heap. If you call obtain twice in a row
237 with the same arguments, you typically get the same value.
238 to the heap list. It's the caller's responsibility to keep
239 track of what space is in use.
dcfdbac7 240
e429caa2
KH
241 Return the address of the space if all went well, or zero if we couldn't
242 allocate the memory. */
abe9ff32 243
e429caa2
KH
244static POINTER
245obtain (address, size)
246 POINTER address;
247 SIZE size;
dcfdbac7 248{
e429caa2
KH
249 heap_ptr heap;
250 SIZE already_available;
dcfdbac7 251
abe9ff32 252 /* Find the heap that ADDRESS falls within. */
e429caa2 253 for (heap = last_heap; heap; heap = heap->prev)
dcfdbac7 254 {
e429caa2
KH
255 if (heap->start <= address && address <= heap->end)
256 break;
257 }
dcfdbac7 258
e429caa2 259 if (! heap)
abe9ff32 260 abort ();
dcfdbac7 261
abe9ff32
RS
262 /* If we can't fit SIZE bytes in that heap,
263 try successive later heaps. */
91a211b5 264 while (heap && (char *) address + size > (char *) heap->end)
e429caa2
KH
265 {
266 heap = heap->next;
267 if (heap == NIL_HEAP)
268 break;
269 address = heap->bloc_start;
dcfdbac7
JB
270 }
271
abe9ff32
RS
272 /* If we can't fit them within any existing heap,
273 get more space. */
e429caa2
KH
274 if (heap == NIL_HEAP)
275 {
276 POINTER new = (*real_morecore)(0);
277 SIZE get;
98b7fe02 278
e429caa2 279 already_available = (char *)last_heap->end - (char *)address;
dcfdbac7 280
e429caa2
KH
281 if (new != last_heap->end)
282 {
abe9ff32
RS
283 /* Someone else called sbrk. Make a new heap. */
284
285 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
286 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
e429caa2 287
91a211b5 288 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
e429caa2
KH
289 return 0;
290
291 new_heap->start = new;
292 new_heap->end = bloc_start;
293 new_heap->bloc_start = bloc_start;
abe9ff32 294 new_heap->free = bloc_start;
e429caa2
KH
295 new_heap->next = NIL_HEAP;
296 new_heap->prev = last_heap;
47f13333
RS
297 new_heap->first_bloc = NIL_BLOC;
298 new_heap->last_bloc = NIL_BLOC;
e429caa2
KH
299 last_heap->next = new_heap;
300 last_heap = new_heap;
301
302 address = bloc_start;
303 already_available = 0;
304 }
dcfdbac7 305
abe9ff32
RS
306 /* Add space to the last heap (which we may have just created).
307 Get some extra, so we can come here less often. */
308
e429caa2 309 get = size + extra_bytes - already_available;
abe9ff32 310 get = (char *) ROUNDUP ((char *)last_heap->end + get)
e429caa2 311 - (char *) last_heap->end;
dcfdbac7 312
e429caa2
KH
313 if ((*real_morecore) (get) != last_heap->end)
314 return 0;
315
91a211b5 316 last_heap->end = (char *) last_heap->end + get;
e429caa2
KH
317 }
318
319 return address;
320}
dcfdbac7 321
abe9ff32
RS
322/* Return unused heap space to the system
323 if there is a lot of unused space now.
324 This can make the last heap smaller;
325 it can also eliminate the last heap entirely. */
326
dcfdbac7 327static void
e429caa2 328relinquish ()
dcfdbac7 329{
e429caa2
KH
330 register heap_ptr h;
331 int excess = 0;
332
abe9ff32
RS
333 /* Add the amount of space beyond break_value
334 in all heaps which have extend beyond break_value at all. */
335
e429caa2
KH
336 for (h = last_heap; h && break_value < h->end; h = h->prev)
337 {
338 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
339 ? h->bloc_start : break_value);
340 }
341
342 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
dcfdbac7 343 {
7516b7d5
RS
344 /* Keep extra_bytes worth of empty space.
345 And don't free anything unless we can free at least extra_bytes. */
e429caa2 346 excess -= extra_bytes;
dcfdbac7 347
e429caa2
KH
348 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
349 {
47f13333
RS
350 /* This heap should have no blocs in it. */
351 if (last_heap->first_bloc != NIL_BLOC
352 || last_heap->last_bloc != NIL_BLOC)
353 abort ();
354
abe9ff32 355 /* Return the last heap, with its header, to the system. */
e429caa2
KH
356 excess = (char *)last_heap->end - (char *)last_heap->start;
357 last_heap = last_heap->prev;
358 last_heap->next = NIL_HEAP;
359 }
360 else
361 {
362 excess = (char *) last_heap->end
abe9ff32 363 - (char *) ROUNDUP ((char *)last_heap->end - excess);
91a211b5 364 last_heap->end = (char *) last_heap->end - excess;
e429caa2 365 }
dcfdbac7 366
e429caa2 367 if ((*real_morecore) (- excess) == 0)
21532667
KH
368 {
369 /* If the system didn't want that much memory back, adjust
370 the end of the last heap to reflect that. This can occur
371 if break_value is still within the original data segment. */
91a211b5 372 last_heap->end = (char *) last_heap->end + excess;
21532667
KH
373 /* Make sure that the result of the adjustment is accurate.
374 It should be, for the else clause above; the other case,
375 which returns the entire last heap to the system, seems
376 unlikely to trigger this mode of failure. */
377 if (last_heap->end != (*real_morecore) (0))
378 abort ();
379 }
e429caa2 380 }
dcfdbac7 381}
719b242f
RS
382
383/* Return the total size in use by relocating allocator,
384 above where malloc gets space. */
385
386long
387r_alloc_size_in_use ()
388{
91a211b5 389 return (char *) break_value - (char *) virtual_break_value;
719b242f 390}
dcfdbac7 391\f
956ace37
JB
392/* The meat - allocating, freeing, and relocating blocs. */
393
956ace37 394/* Find the bloc referenced by the address in PTR. Returns a pointer
abe9ff32 395 to that block. */
dcfdbac7
JB
396
397static bloc_ptr
398find_bloc (ptr)
399 POINTER *ptr;
400{
401 register bloc_ptr p = first_bloc;
402
403 while (p != NIL_BLOC)
404 {
405 if (p->variable == ptr && p->data == *ptr)
406 return p;
407
408 p = p->next;
409 }
410
411 return p;
412}
413
414/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
98b7fe02
JB
415 Returns a pointer to the new bloc, or zero if we couldn't allocate
416 memory for the new block. */
dcfdbac7
JB
417
418static bloc_ptr
419get_bloc (size)
420 SIZE size;
421{
98b7fe02 422 register bloc_ptr new_bloc;
abe9ff32 423 register heap_ptr heap;
98b7fe02
JB
424
425 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
e429caa2 426 || ! (new_bloc->data = obtain (break_value, size)))
98b7fe02
JB
427 {
428 if (new_bloc)
429 free (new_bloc);
430
431 return 0;
432 }
dcfdbac7 433
91a211b5 434 break_value = (char *) new_bloc->data + size;
e429caa2 435
dcfdbac7
JB
436 new_bloc->size = size;
437 new_bloc->next = NIL_BLOC;
8c7f1e35 438 new_bloc->variable = (POINTER *) NIL;
e429caa2 439 new_bloc->new_data = 0;
dcfdbac7 440
abe9ff32
RS
441 /* Record in the heap that this space is in use. */
442 heap = find_heap (new_bloc->data);
443 heap->free = break_value;
444
47f13333
RS
445 /* Maintain the correspondence between heaps and blocs. */
446 new_bloc->heap = heap;
447 heap->last_bloc = new_bloc;
448 if (heap->first_bloc == NIL_BLOC)
449 heap->first_bloc = new_bloc;
450
abe9ff32 451 /* Put this bloc on the doubly-linked list of blocs. */
dcfdbac7
JB
452 if (first_bloc)
453 {
454 new_bloc->prev = last_bloc;
455 last_bloc->next = new_bloc;
456 last_bloc = new_bloc;
457 }
458 else
459 {
460 first_bloc = last_bloc = new_bloc;
461 new_bloc->prev = NIL_BLOC;
462 }
463
464 return new_bloc;
465}
47f13333 466\f
abe9ff32
RS
467/* Calculate new locations of blocs in the list beginning with BLOC,
468 relocating it to start at ADDRESS, in heap HEAP. If enough space is
469 not presently available in our reserve, call obtain for
177c0ea7
JB
470 more space.
471
abe9ff32
RS
472 Store the new location of each bloc in its new_data field.
473 Do not touch the contents of blocs or break_value. */
dcfdbac7 474
e429caa2
KH
475static int
476relocate_blocs (bloc, heap, address)
477 bloc_ptr bloc;
478 heap_ptr heap;
479 POINTER address;
480{
481 register bloc_ptr b = bloc;
ad3bb3d2 482
49f82b3d 483 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 484 if (r_alloc_freeze_level)
49f82b3d
RS
485 abort();
486
e429caa2
KH
487 while (b)
488 {
abe9ff32
RS
489 /* If bloc B won't fit within HEAP,
490 move to the next heap and try again. */
91a211b5 491 while (heap && (char *) address + b->size > (char *) heap->end)
e429caa2
KH
492 {
493 heap = heap->next;
494 if (heap == NIL_HEAP)
495 break;
496 address = heap->bloc_start;
497 }
dcfdbac7 498
abe9ff32
RS
499 /* If BLOC won't fit in any heap,
500 get enough new space to hold BLOC and all following blocs. */
e429caa2
KH
501 if (heap == NIL_HEAP)
502 {
503 register bloc_ptr tb = b;
504 register SIZE s = 0;
505
abe9ff32 506 /* Add up the size of all the following blocs. */
e429caa2
KH
507 while (tb != NIL_BLOC)
508 {
177c0ea7 509 if (tb->variable)
49f82b3d
RS
510 s += tb->size;
511
e429caa2
KH
512 tb = tb->next;
513 }
514
abe9ff32
RS
515 /* Get that space. */
516 address = obtain (address, s);
517 if (address == 0)
e429caa2
KH
518 return 0;
519
520 heap = last_heap;
521 }
522
abe9ff32
RS
523 /* Record the new address of this bloc
524 and update where the next bloc can start. */
e429caa2 525 b->new_data = address;
177c0ea7 526 if (b->variable)
91a211b5 527 address = (char *) address + b->size;
e429caa2
KH
528 b = b->next;
529 }
530
531 return 1;
532}
533
47f13333
RS
534/* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
535 This is necessary if we put the memory of space of BLOC
536 before that of BEFORE. */
537
538static void
539reorder_bloc (bloc, before)
540 bloc_ptr bloc, before;
541{
542 bloc_ptr prev, next;
543
544 /* Splice BLOC out from where it is. */
545 prev = bloc->prev;
546 next = bloc->next;
547
548 if (prev)
549 prev->next = next;
550 if (next)
551 next->prev = prev;
552
553 /* Splice it in before BEFORE. */
554 prev = before->prev;
abe9ff32 555
47f13333
RS
556 if (prev)
557 prev->next = bloc;
558 bloc->prev = prev;
559
560 before->prev = bloc;
561 bloc->next = before;
562}
563\f
564/* Update the records of which heaps contain which blocs, starting
565 with heap HEAP and bloc BLOC. */
566
567static void
568update_heap_bloc_correspondence (bloc, heap)
abe9ff32
RS
569 bloc_ptr bloc;
570 heap_ptr heap;
571{
572 register bloc_ptr b;
573
47f13333
RS
574 /* Initialize HEAP's status to reflect blocs before BLOC. */
575 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
576 {
577 /* The previous bloc is in HEAP. */
578 heap->last_bloc = bloc->prev;
91a211b5 579 heap->free = (char *) bloc->prev->data + bloc->prev->size;
47f13333
RS
580 }
581 else
582 {
583 /* HEAP contains no blocs before BLOC. */
584 heap->first_bloc = NIL_BLOC;
585 heap->last_bloc = NIL_BLOC;
586 heap->free = heap->bloc_start;
587 }
588
abe9ff32
RS
589 /* Advance through blocs one by one. */
590 for (b = bloc; b != NIL_BLOC; b = b->next)
591 {
47f13333
RS
592 /* Advance through heaps, marking them empty,
593 till we get to the one that B is in. */
abe9ff32
RS
594 while (heap)
595 {
596 if (heap->bloc_start <= b->data && b->data <= heap->end)
597 break;
598 heap = heap->next;
47f13333
RS
599 /* We know HEAP is not null now,
600 because there has to be space for bloc B. */
601 heap->first_bloc = NIL_BLOC;
602 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
603 heap->free = heap->bloc_start;
604 }
47f13333
RS
605
606 /* Update HEAP's status for bloc B. */
91a211b5 607 heap->free = (char *) b->data + b->size;
47f13333
RS
608 heap->last_bloc = b;
609 if (heap->first_bloc == NIL_BLOC)
610 heap->first_bloc = b;
611
612 /* Record that B is in HEAP. */
613 b->heap = heap;
abe9ff32
RS
614 }
615
616 /* If there are any remaining heaps and no blocs left,
47f13333 617 mark those heaps as empty. */
abe9ff32
RS
618 heap = heap->next;
619 while (heap)
620 {
47f13333
RS
621 heap->first_bloc = NIL_BLOC;
622 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
623 heap->free = heap->bloc_start;
624 heap = heap->next;
625 }
626}
47f13333 627\f
abe9ff32
RS
628/* Resize BLOC to SIZE bytes. This relocates the blocs
629 that come after BLOC in memory. */
630
e429caa2
KH
631static int
632resize_bloc (bloc, size)
633 bloc_ptr bloc;
634 SIZE size;
dcfdbac7 635{
e429caa2
KH
636 register bloc_ptr b;
637 heap_ptr heap;
638 POINTER address;
639 SIZE old_size;
640
49f82b3d 641 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 642 if (r_alloc_freeze_level)
49f82b3d
RS
643 abort();
644
e429caa2
KH
645 if (bloc == NIL_BLOC || size == bloc->size)
646 return 1;
647
648 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
649 {
650 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
651 break;
652 }
653
654 if (heap == NIL_HEAP)
abe9ff32 655 abort ();
e429caa2
KH
656
657 old_size = bloc->size;
658 bloc->size = size;
659
abe9ff32 660 /* Note that bloc could be moved into the previous heap. */
91a211b5
GM
661 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
662 : (char *) first_heap->bloc_start);
e429caa2
KH
663 while (heap)
664 {
665 if (heap->bloc_start <= address && address <= heap->end)
666 break;
667 heap = heap->prev;
668 }
669
670 if (! relocate_blocs (bloc, heap, address))
671 {
672 bloc->size = old_size;
673 return 0;
674 }
675
676 if (size > old_size)
677 {
678 for (b = last_bloc; b != bloc; b = b->prev)
679 {
49f82b3d
RS
680 if (!b->variable)
681 {
682 b->size = 0;
683 b->data = b->new_data;
177c0ea7
JB
684 }
685 else
49f82b3d
RS
686 {
687 safe_bcopy (b->data, b->new_data, b->size);
688 *b->variable = b->data = b->new_data;
689 }
690 }
691 if (!bloc->variable)
692 {
693 bloc->size = 0;
694 bloc->data = bloc->new_data;
695 }
696 else
697 {
698 safe_bcopy (bloc->data, bloc->new_data, old_size);
91a211b5 699 bzero ((char *) bloc->new_data + old_size, size - old_size);
49f82b3d 700 *bloc->variable = bloc->data = bloc->new_data;
e429caa2 701 }
e429caa2
KH
702 }
703 else
dcfdbac7 704 {
ad3bb3d2
JB
705 for (b = bloc; b != NIL_BLOC; b = b->next)
706 {
49f82b3d
RS
707 if (!b->variable)
708 {
709 b->size = 0;
710 b->data = b->new_data;
177c0ea7
JB
711 }
712 else
49f82b3d
RS
713 {
714 safe_bcopy (b->data, b->new_data, b->size);
715 *b->variable = b->data = b->new_data;
716 }
ad3bb3d2 717 }
ad3bb3d2 718 }
dcfdbac7 719
47f13333 720 update_heap_bloc_correspondence (bloc, heap);
abe9ff32 721
91a211b5
GM
722 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
723 : (char *) first_heap->bloc_start);
e429caa2
KH
724 return 1;
725}
47f13333 726\f
abe9ff32
RS
727/* Free BLOC from the chain of blocs, relocating any blocs above it.
728 This may return space to the system. */
dcfdbac7
JB
729
730static void
731free_bloc (bloc)
732 bloc_ptr bloc;
733{
47f13333
RS
734 heap_ptr heap = bloc->heap;
735
49f82b3d
RS
736 if (r_alloc_freeze_level)
737 {
738 bloc->variable = (POINTER *) NIL;
739 return;
740 }
177c0ea7 741
e429caa2
KH
742 resize_bloc (bloc, 0);
743
dcfdbac7
JB
744 if (bloc == first_bloc && bloc == last_bloc)
745 {
746 first_bloc = last_bloc = NIL_BLOC;
747 }
748 else if (bloc == last_bloc)
749 {
750 last_bloc = bloc->prev;
751 last_bloc->next = NIL_BLOC;
752 }
753 else if (bloc == first_bloc)
754 {
755 first_bloc = bloc->next;
756 first_bloc->prev = NIL_BLOC;
dcfdbac7
JB
757 }
758 else
759 {
760 bloc->next->prev = bloc->prev;
761 bloc->prev->next = bloc->next;
dcfdbac7
JB
762 }
763
47f13333
RS
764 /* Update the records of which blocs are in HEAP. */
765 if (heap->first_bloc == bloc)
766 {
d5179acc 767 if (bloc->next != 0 && bloc->next->heap == heap)
47f13333
RS
768 heap->first_bloc = bloc->next;
769 else
770 heap->first_bloc = heap->last_bloc = NIL_BLOC;
771 }
772 if (heap->last_bloc == bloc)
773 {
d5179acc 774 if (bloc->prev != 0 && bloc->prev->heap == heap)
47f13333
RS
775 heap->last_bloc = bloc->prev;
776 else
777 heap->first_bloc = heap->last_bloc = NIL_BLOC;
778 }
779
e429caa2 780 relinquish ();
dcfdbac7
JB
781 free (bloc);
782}
783\f
956ace37
JB
784/* Interface routines. */
785
98b7fe02 786/* Obtain SIZE bytes of storage from the free pool, or the system, as
2c46d29f 787 necessary. If relocatable blocs are in use, this means relocating
98b7fe02
JB
788 them. This function gets plugged into the GNU malloc's __morecore
789 hook.
790
7516b7d5
RS
791 We provide hysteresis, never relocating by less than extra_bytes.
792
98b7fe02
JB
793 If we're out of memory, we should return zero, to imitate the other
794 __morecore hook values - in particular, __default_morecore in the
795 GNU malloc package. */
dcfdbac7 796
177c0ea7 797POINTER
dcfdbac7
JB
798r_alloc_sbrk (size)
799 long size;
800{
e429caa2
KH
801 register bloc_ptr b;
802 POINTER address;
dcfdbac7 803
44d3dec0
RS
804 if (! r_alloc_initialized)
805 r_alloc_init ();
806
dcfdbac7 807 if (! use_relocatable_buffers)
bbc60227 808 return (*real_morecore) (size);
dcfdbac7 809
e429caa2
KH
810 if (size == 0)
811 return virtual_break_value;
7516b7d5 812
e429caa2 813 if (size > 0)
dcfdbac7 814 {
abe9ff32
RS
815 /* Allocate a page-aligned space. GNU malloc would reclaim an
816 extra space if we passed an unaligned one. But we could
8e6208c5 817 not always find a space which is contiguous to the previous. */
e429caa2
KH
818 POINTER new_bloc_start;
819 heap_ptr h = first_heap;
abe9ff32 820 SIZE get = ROUNDUP (size);
7516b7d5 821
abe9ff32 822 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 823
abe9ff32
RS
824 /* Search the list upward for a heap which is large enough. */
825 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
e429caa2
KH
826 {
827 h = h->next;
828 if (h == NIL_HEAP)
829 break;
abe9ff32 830 address = (POINTER) ROUNDUP (h->start);
e429caa2
KH
831 }
832
abe9ff32 833 /* If not found, obtain more space. */
e429caa2
KH
834 if (h == NIL_HEAP)
835 {
836 get += extra_bytes + page_size;
837
49f82b3d 838 if (! obtain (address, get))
e429caa2 839 return 0;
98b7fe02 840
e429caa2 841 if (first_heap == last_heap)
abe9ff32 842 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 843 else
abe9ff32 844 address = (POINTER) ROUNDUP (last_heap->start);
e429caa2
KH
845 h = last_heap;
846 }
847
abe9ff32 848 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
e429caa2
KH
849
850 if (first_heap->bloc_start < new_bloc_start)
851 {
49f82b3d 852 /* This is no clean solution - no idea how to do it better. */
177c0ea7 853 if (r_alloc_freeze_level)
49f82b3d
RS
854 return NIL;
855
856 /* There is a bug here: if the above obtain call succeeded, but the
857 relocate_blocs call below does not succeed, we need to free
858 the memory that we got with obtain. */
859
abe9ff32 860 /* Move all blocs upward. */
49f82b3d 861 if (! relocate_blocs (first_bloc, h, new_bloc_start))
e429caa2
KH
862 return 0;
863
864 /* Note that (POINTER)(h+1) <= new_bloc_start since
865 get >= page_size, so the following does not destroy the heap
abe9ff32 866 header. */
e429caa2
KH
867 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
868 {
869 safe_bcopy (b->data, b->new_data, b->size);
870 *b->variable = b->data = b->new_data;
871 }
872
873 h->bloc_start = new_bloc_start;
abe9ff32 874
47f13333 875 update_heap_bloc_correspondence (first_bloc, h);
e429caa2 876 }
e429caa2
KH
877 if (h != first_heap)
878 {
879 /* Give up managing heaps below the one the new
abe9ff32 880 virtual_break_value points to. */
e429caa2
KH
881 first_heap->prev = NIL_HEAP;
882 first_heap->next = h->next;
883 first_heap->start = h->start;
884 first_heap->end = h->end;
abe9ff32 885 first_heap->free = h->free;
47f13333
RS
886 first_heap->first_bloc = h->first_bloc;
887 first_heap->last_bloc = h->last_bloc;
e429caa2
KH
888 first_heap->bloc_start = h->bloc_start;
889
890 if (first_heap->next)
891 first_heap->next->prev = first_heap;
892 else
893 last_heap = first_heap;
894 }
895
896 bzero (address, size);
dcfdbac7 897 }
e429caa2 898 else /* size < 0 */
dcfdbac7 899 {
e429caa2
KH
900 SIZE excess = (char *)first_heap->bloc_start
901 - ((char *)virtual_break_value + size);
902
903 address = virtual_break_value;
904
905 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
906 {
907 excess -= extra_bytes;
908 first_heap->bloc_start
47f13333 909 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
e429caa2 910
abe9ff32 911 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
7516b7d5 912
e429caa2
KH
913 for (b = first_bloc; b != NIL_BLOC; b = b->next)
914 {
915 safe_bcopy (b->data, b->new_data, b->size);
916 *b->variable = b->data = b->new_data;
917 }
918 }
919
920 if ((char *)virtual_break_value + size < (char *)first_heap->start)
921 {
922 /* We found an additional space below the first heap */
923 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
924 }
dcfdbac7
JB
925 }
926
e429caa2 927 virtual_break_value = (POINTER) ((char *)address + size);
47f13333 928 break_value = (last_bloc
91a211b5
GM
929 ? (char *) last_bloc->data + last_bloc->size
930 : (char *) first_heap->bloc_start);
e429caa2 931 if (size < 0)
abe9ff32 932 relinquish ();
7516b7d5 933
e429caa2 934 return address;
dcfdbac7
JB
935}
936
0a58f946 937
dcfdbac7
JB
938/* Allocate a relocatable bloc of storage of size SIZE. A pointer to
939 the data is returned in *PTR. PTR is thus the address of some variable
98b7fe02
JB
940 which will use the data area.
941
49f82b3d
RS
942 The allocation of 0 bytes is valid.
943 In case r_alloc_freeze is set, a best fit of unused blocs could be done
944 before allocating a new area. Not yet done.
945
98b7fe02
JB
946 If we can't allocate the necessary memory, set *PTR to zero, and
947 return zero. */
dcfdbac7
JB
948
949POINTER
950r_alloc (ptr, size)
951 POINTER *ptr;
952 SIZE size;
953{
954 register bloc_ptr new_bloc;
955
2c46d29f
RS
956 if (! r_alloc_initialized)
957 r_alloc_init ();
958
abe9ff32 959 new_bloc = get_bloc (MEM_ROUNDUP (size));
98b7fe02
JB
960 if (new_bloc)
961 {
962 new_bloc->variable = ptr;
963 *ptr = new_bloc->data;
964 }
965 else
966 *ptr = 0;
dcfdbac7
JB
967
968 return *ptr;
969}
970
2c46d29f
RS
971/* Free a bloc of relocatable storage whose data is pointed to by PTR.
972 Store 0 in *PTR to show there's no block allocated. */
dcfdbac7
JB
973
974void
975r_alloc_free (ptr)
976 register POINTER *ptr;
977{
978 register bloc_ptr dead_bloc;
979
44d3dec0
RS
980 if (! r_alloc_initialized)
981 r_alloc_init ();
982
dcfdbac7
JB
983 dead_bloc = find_bloc (ptr);
984 if (dead_bloc == NIL_BLOC)
985 abort ();
986
987 free_bloc (dead_bloc);
2c46d29f 988 *ptr = 0;
719b242f 989
d5179acc 990#ifdef emacs
719b242f 991 refill_memory_reserve ();
d5179acc 992#endif
dcfdbac7
JB
993}
994
16a5c729 995/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
98b7fe02
JB
996 Do this by shifting all blocks above this one up in memory, unless
997 SIZE is less than or equal to the current bloc size, in which case
998 do nothing.
dcfdbac7 999
49f82b3d 1000 In case r_alloc_freeze is set, a new bloc is allocated, and the
8e6208c5 1001 memory copied to it. Not very efficient. We could traverse the
49f82b3d
RS
1002 bloc_list for a best fit of free blocs first.
1003
98b7fe02
JB
1004 Change *PTR to reflect the new bloc, and return this value.
1005
1006 If more memory cannot be allocated, then leave *PTR unchanged, and
1007 return zero. */
dcfdbac7
JB
1008
1009POINTER
1010r_re_alloc (ptr, size)
1011 POINTER *ptr;
1012 SIZE size;
1013{
16a5c729 1014 register bloc_ptr bloc;
dcfdbac7 1015
44d3dec0
RS
1016 if (! r_alloc_initialized)
1017 r_alloc_init ();
1018
49f82b3d
RS
1019 if (!*ptr)
1020 return r_alloc (ptr, size);
177c0ea7 1021 if (!size)
49f82b3d
RS
1022 {
1023 r_alloc_free (ptr);
1024 return r_alloc (ptr, 0);
1025 }
1026
16a5c729
JB
1027 bloc = find_bloc (ptr);
1028 if (bloc == NIL_BLOC)
dcfdbac7
JB
1029 abort ();
1030
177c0ea7 1031 if (size < bloc->size)
49f82b3d
RS
1032 {
1033 /* Wouldn't it be useful to actually resize the bloc here? */
1034 /* I think so too, but not if it's too expensive... */
177c0ea7
JB
1035 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1036 && r_alloc_freeze_level == 0)
49f82b3d
RS
1037 {
1038 resize_bloc (bloc, MEM_ROUNDUP (size));
1039 /* Never mind if this fails, just do nothing... */
1040 /* It *should* be infallible! */
1041 }
1042 }
1043 else if (size > bloc->size)
1044 {
1045 if (r_alloc_freeze_level)
1046 {
1047 bloc_ptr new_bloc;
1048 new_bloc = get_bloc (MEM_ROUNDUP (size));
1049 if (new_bloc)
1050 {
1051 new_bloc->variable = ptr;
1052 *ptr = new_bloc->data;
1053 bloc->variable = (POINTER *) NIL;
1054 }
1055 else
1056 return NIL;
1057 }
177c0ea7 1058 else
49f82b3d
RS
1059 {
1060 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1061 return NIL;
1062 }
1063 }
dcfdbac7
JB
1064 return *ptr;
1065}
81bd58e8
KH
1066
1067/* Disable relocations, after making room for at least SIZE bytes
1068 of non-relocatable heap if possible. The relocatable blocs are
1069 guaranteed to hold still until thawed, even if this means that
1070 malloc must return a null pointer. */
abe9ff32 1071
81bd58e8
KH
1072void
1073r_alloc_freeze (size)
1074 long size;
1075{
44d3dec0
RS
1076 if (! r_alloc_initialized)
1077 r_alloc_init ();
1078
81bd58e8
KH
1079 /* If already frozen, we can't make any more room, so don't try. */
1080 if (r_alloc_freeze_level > 0)
1081 size = 0;
1082 /* If we can't get the amount requested, half is better than nothing. */
1083 while (size > 0 && r_alloc_sbrk (size) == 0)
1084 size /= 2;
1085 ++r_alloc_freeze_level;
1086 if (size > 0)
1087 r_alloc_sbrk (-size);
1088}
1089
1090void
1091r_alloc_thaw ()
1092{
49f82b3d 1093
177c0ea7 1094 if (! r_alloc_initialized)
49f82b3d
RS
1095 r_alloc_init ();
1096
81bd58e8
KH
1097 if (--r_alloc_freeze_level < 0)
1098 abort ();
49f82b3d 1099
177c0ea7
JB
1100 /* This frees all unused blocs. It is not too inefficient, as the resize
1101 and bcopy is done only once. Afterwards, all unreferenced blocs are
49f82b3d 1102 already shrunk to zero size. */
177c0ea7 1103 if (!r_alloc_freeze_level)
49f82b3d
RS
1104 {
1105 bloc_ptr *b = &first_bloc;
177c0ea7
JB
1106 while (*b)
1107 if (!(*b)->variable)
1108 free_bloc (*b);
1109 else
49f82b3d
RS
1110 b = &(*b)->next;
1111 }
81bd58e8 1112}
49f82b3d 1113
dec41418
RS
1114
1115#if defined (emacs) && defined (DOUG_LEA_MALLOC)
1116
1117/* Reinitialize the morecore hook variables after restarting a dumped
1118 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1119void
1120r_alloc_reinit ()
1121{
1122 /* Only do this if the hook has been reset, so that we don't get an
1123 infinite loop, in case Emacs was linked statically. */
1124 if (__morecore != r_alloc_sbrk)
1125 {
1126 real_morecore = __morecore;
1127 __morecore = r_alloc_sbrk;
1128 }
1129}
0a58f946
GM
1130
1131#endif /* emacs && DOUG_LEA_MALLOC */
dec41418 1132
e429caa2 1133#ifdef DEBUG
0a58f946 1134
e429caa2
KH
1135#include <assert.h>
1136
44d3dec0 1137void
e429caa2
KH
1138r_alloc_check ()
1139{
6d16dd06
RS
1140 int found = 0;
1141 heap_ptr h, ph = 0;
1142 bloc_ptr b, pb = 0;
1143
1144 if (!r_alloc_initialized)
1145 return;
1146
1147 assert (first_heap);
1148 assert (last_heap->end <= (POINTER) sbrk (0));
1149 assert ((POINTER) first_heap < first_heap->start);
1150 assert (first_heap->start <= virtual_break_value);
1151 assert (virtual_break_value <= first_heap->end);
1152
1153 for (h = first_heap; h; h = h->next)
1154 {
1155 assert (h->prev == ph);
1156 assert ((POINTER) ROUNDUP (h->end) == h->end);
40f3f04b
RS
1157#if 0 /* ??? The code in ralloc.c does not really try to ensure
1158 the heap start has any sort of alignment.
1159 Perhaps it should. */
6d16dd06 1160 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
40f3f04b 1161#endif
6d16dd06
RS
1162 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1163 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1164
1165 if (ph)
1166 {
1167 assert (ph->end < h->start);
1168 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1169 }
1170
1171 if (h->bloc_start <= break_value && break_value <= h->end)
1172 found = 1;
1173
1174 ph = h;
1175 }
1176
1177 assert (found);
1178 assert (last_heap == ph);
1179
1180 for (b = first_bloc; b; b = b->next)
1181 {
1182 assert (b->prev == pb);
1183 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1184 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1185
1186 ph = 0;
1187 for (h = first_heap; h; h = h->next)
1188 {
1189 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1190 break;
1191 ph = h;
1192 }
1193
1194 assert (h);
1195
1196 if (pb && pb->data + pb->size != b->data)
1197 {
1198 assert (ph && b->data == h->bloc_start);
1199 while (ph)
1200 {
1201 if (ph->bloc_start <= pb->data
1202 && pb->data + pb->size <= ph->end)
1203 {
1204 assert (pb->data + pb->size + b->size > ph->end);
1205 break;
1206 }
1207 else
1208 {
1209 assert (ph->bloc_start + b->size > ph->end);
1210 }
1211 ph = ph->prev;
1212 }
1213 }
1214 pb = b;
1215 }
1216
1217 assert (last_bloc == pb);
1218
1219 if (last_bloc)
1220 assert (last_bloc->data + last_bloc->size == break_value);
1221 else
1222 assert (first_heap->bloc_start == break_value);
e429caa2 1223}
0a58f946 1224
e429caa2 1225#endif /* DEBUG */
0a58f946 1226
0a58f946
GM
1227
1228\f
1229/***********************************************************************
1230 Initialization
1231 ***********************************************************************/
1232
0a58f946
GM
1233/* Initialize various things for memory allocation. */
1234
1235static void
1236r_alloc_init ()
1237{
1238 if (r_alloc_initialized)
1239 return;
0a58f946 1240 r_alloc_initialized = 1;
177c0ea7 1241
a2c23c92
DL
1242 page_size = PAGE;
1243#ifndef SYSTEM_MALLOC
0a58f946
GM
1244 real_morecore = __morecore;
1245 __morecore = r_alloc_sbrk;
1246
1247 first_heap = last_heap = &heap_base;
1248 first_heap->next = first_heap->prev = NIL_HEAP;
1249 first_heap->start = first_heap->bloc_start
1250 = virtual_break_value = break_value = (*real_morecore) (0);
1251 if (break_value == NIL)
1252 abort ();
1253
0a58f946 1254 extra_bytes = ROUNDUP (50000);
a2c23c92 1255#endif
0a58f946
GM
1256
1257#ifdef DOUG_LEA_MALLOC
1258 mallopt (M_TOP_PAD, 64 * 4096);
1259#else
a2c23c92 1260#ifndef SYSTEM_MALLOC
0a58f946
GM
1261 /* Give GNU malloc's morecore some hysteresis
1262 so that we move all the relocatable blocks much less often. */
1263 __malloc_extra_blocks = 64;
1264#endif
a2c23c92 1265#endif
0a58f946 1266
5ad25b24 1267#ifndef SYSTEM_MALLOC
0a58f946
GM
1268 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1269
1270 /* The extra call to real_morecore guarantees that the end of the
1271 address space is a multiple of page_size, even if page_size is
1272 not really the page size of the system running the binary in
1273 which page_size is stored. This allows a binary to be built on a
1274 system with one page size and run on a system with a smaller page
1275 size. */
91a211b5 1276 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
0a58f946
GM
1277
1278 /* Clear the rest of the last page; this memory is in our address space
1279 even though it is after the sbrk value. */
1280 /* Doubly true, with the additional call that explicitly adds the
1281 rest of that page to the address space. */
91a211b5
GM
1282 bzero (first_heap->start,
1283 (char *) first_heap->end - (char *) first_heap->start);
0a58f946 1284 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
a2c23c92 1285#endif
177c0ea7 1286
0a58f946
GM
1287 use_relocatable_buffers = 1;
1288}