* net/browse-url.el (browse-url): Identify alist with "consp and
[bpt/emacs.git] / src / ralloc.c
CommitLineData
177c0ea7 1/* Block-relocating memory allocator.
429ab54e 2 Copyright (C) 1993, 1995, 2000, 2001, 2002, 2003, 2004,
76b6f707 3 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
dcfdbac7
JB
4
5This file is part of GNU Emacs.
6
9ec0b715 7GNU Emacs is free software: you can redistribute it and/or modify
dcfdbac7 8it under the terms of the GNU General Public License as published by
9ec0b715
GM
9the Free Software Foundation, either version 3 of the License, or
10(at your option) any later version.
dcfdbac7
JB
11
12GNU Emacs is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
9ec0b715 18along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
dcfdbac7
JB
19
20/* NOTES:
21
eb8c3be9 22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
dcfdbac7 23 rather than all of them. This means allowing for a possible
abe9ff32 24 hole between the first bloc and the end of malloc storage. */
dcfdbac7 25
2c46d29f 26#ifdef emacs
aef4d570 27
18160b98 28#include <config.h>
956ace37 29#include "lisp.h" /* Needed for VALBITS. */
a4766fd5 30#include "blockinput.h"
0a58f946 31
642a1733
DL
32#ifdef HAVE_UNISTD_H
33#include <unistd.h>
34#endif
a8c0e5ea 35
0a58f946
GM
36typedef POINTER_TYPE *POINTER;
37typedef size_t SIZE;
f275fd9a 38
2c46d29f
RS
39/* Declared in dispnew.c, this version doesn't screw up if regions
40 overlap. */
0a58f946 41
2c46d29f 42extern void safe_bcopy ();
2c46d29f 43
b0119c68 44#ifdef DOUG_LEA_MALLOC
177c0ea7 45#define M_TOP_PAD -2
b0119c68 46extern int mallopt ();
0a58f946 47#else /* not DOUG_LEA_MALLOC */
a2c23c92 48#ifndef SYSTEM_MALLOC
b1685c5f 49extern size_t __malloc_extra_blocks;
a2c23c92 50#endif /* SYSTEM_MALLOC */
0a58f946 51#endif /* not DOUG_LEA_MALLOC */
49081834 52
d5179acc 53#else /* not emacs */
aef4d570 54
2c46d29f 55#include <stddef.h>
aef4d570 56
2c46d29f
RS
57typedef size_t SIZE;
58typedef void *POINTER;
aef4d570 59
aef4d570
RM
60#include <unistd.h>
61#include <malloc.h>
aef4d570 62
2c46d29f 63#define safe_bcopy(x, y, z) memmove (y, x, z)
d5179acc
RS
64#define bzero(x, len) memset (x, 0, len)
65
66#endif /* not emacs */
2c46d29f 67
0a58f946 68
d5179acc 69#include "getpagesize.h"
dcfdbac7
JB
70
71#define NIL ((POINTER) 0)
72
2c46d29f
RS
73/* A flag to indicate whether we have initialized ralloc yet. For
74 Emacs's sake, please do not make this local to malloc_init; on some
75 machines, the dumping procedure makes all static variables
76 read-only. On these machines, the word static is #defined to be
77 the empty string, meaning that r_alloc_initialized becomes an
0a58f946
GM
78 automatic variable, and loses its value each time Emacs is started
79 up. */
80
2c46d29f
RS
81static int r_alloc_initialized = 0;
82
83static void r_alloc_init ();
0a58f946 84
dcfdbac7 85\f
956ace37
JB
86/* Declarations for working with the malloc, ralloc, and system breaks. */
87
abe9ff32 88/* Function to set the real break value. */
321ed47b 89POINTER (*real_morecore) ();
dcfdbac7 90
abe9ff32 91/* The break value, as seen by malloc. */
dcfdbac7
JB
92static POINTER virtual_break_value;
93
abe9ff32
RS
94/* The address of the end of the last data in use by ralloc,
95 including relocatable blocs as well as malloc data. */
dcfdbac7
JB
96static POINTER break_value;
97
7516b7d5
RS
98/* This is the size of a page. We round memory requests to this boundary. */
99static int page_size;
100
177c0ea7 101/* Whenever we get memory from the system, get this many extra bytes. This
ad3bb3d2 102 must be a multiple of page_size. */
7516b7d5
RS
103static int extra_bytes;
104
dcfdbac7 105/* Macros for rounding. Note that rounding to any value is possible
abe9ff32 106 by changing the definition of PAGE. */
dcfdbac7 107#define PAGE (getpagesize ())
f7a009a5
RM
108#define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
109#define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
110 & ~(page_size - 1))
7516b7d5 111#define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
e429caa2
KH
112
113#define MEM_ALIGN sizeof(double)
114#define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
115 & ~(MEM_ALIGN - 1))
0a58f946 116
aeac019e
GM
117/* The hook `malloc' uses for the function which gets more space
118 from the system. */
119
120#ifndef SYSTEM_MALLOC
121extern POINTER (*__morecore) ();
122#endif
123
124
e429caa2 125\f
0a58f946
GM
126/***********************************************************************
127 Implementation using sbrk
128 ***********************************************************************/
129
abe9ff32
RS
130/* Data structures of heaps and blocs. */
131
132/* The relocatable objects, or blocs, and the malloc data
133 both reside within one or more heaps.
134 Each heap contains malloc data, running from `start' to `bloc_start',
135 and relocatable objects, running from `bloc_start' to `free'.
136
137 Relocatable objects may relocate within the same heap
138 or may move into another heap; the heaps themselves may grow
139 but they never move.
140
141 We try to make just one heap and make it larger as necessary.
8e6208c5 142 But sometimes we can't do that, because we can't get contiguous
abe9ff32 143 space to add onto the heap. When that happens, we start a new heap. */
177c0ea7 144
e429caa2
KH
145typedef struct heap
146{
147 struct heap *next;
148 struct heap *prev;
abe9ff32 149 /* Start of memory range of this heap. */
e429caa2 150 POINTER start;
abe9ff32 151 /* End of memory range of this heap. */
e429caa2 152 POINTER end;
abe9ff32
RS
153 /* Start of relocatable data in this heap. */
154 POINTER bloc_start;
155 /* Start of unused space in this heap. */
156 POINTER free;
47f13333
RS
157 /* First bloc in this heap. */
158 struct bp *first_bloc;
159 /* Last bloc in this heap. */
160 struct bp *last_bloc;
e429caa2
KH
161} *heap_ptr;
162
163#define NIL_HEAP ((heap_ptr) 0)
164#define HEAP_PTR_SIZE (sizeof (struct heap))
165
abe9ff32
RS
166/* This is the first heap object.
167 If we need additional heap objects, each one resides at the beginning of
168 the space it covers. */
169static struct heap heap_base;
170
171/* Head and tail of the list of heaps. */
e429caa2
KH
172static heap_ptr first_heap, last_heap;
173
174/* These structures are allocated in the malloc arena.
175 The linked list is kept in order of increasing '.data' members.
176 The data blocks abut each other; if b->next is non-nil, then
177c0ea7 177 b->data + b->size == b->next->data.
49f82b3d
RS
178
179 An element with variable==NIL denotes a freed block, which has not yet
f96f2c5b
JB
180 been collected. They may only appear while r_alloc_freeze_level > 0,
181 and will be freed when the arena is thawed. Currently, these blocs are
182 not reusable, while the arena is frozen. Very inefficient. */
49f82b3d 183
e429caa2
KH
184typedef struct bp
185{
186 struct bp *next;
187 struct bp *prev;
188 POINTER *variable;
189 POINTER data;
190 SIZE size;
8e6208c5 191 POINTER new_data; /* temporarily used for relocation */
49f82b3d 192 struct heap *heap; /* Heap this bloc is in. */
e429caa2
KH
193} *bloc_ptr;
194
195#define NIL_BLOC ((bloc_ptr) 0)
196#define BLOC_PTR_SIZE (sizeof (struct bp))
197
abe9ff32 198/* Head and tail of the list of relocatable blocs. */
e429caa2
KH
199static bloc_ptr first_bloc, last_bloc;
200
49f82b3d
RS
201static int use_relocatable_buffers;
202
203/* If >0, no relocation whatsoever takes place. */
204static int r_alloc_freeze_level;
205
dcfdbac7 206\f
956ace37
JB
207/* Functions to get and return memory from the system. */
208
abe9ff32
RS
209/* Find the heap that ADDRESS falls within. */
210
211static heap_ptr
212find_heap (address)
213 POINTER address;
214{
215 heap_ptr heap;
216
217 for (heap = last_heap; heap; heap = heap->prev)
218 {
219 if (heap->start <= address && address <= heap->end)
220 return heap;
221 }
222
223 return NIL_HEAP;
224}
225
226/* Find SIZE bytes of space in a heap.
227 Try to get them at ADDRESS (which must fall within some heap's range)
228 if we can get that many within one heap.
229
e429caa2 230 If enough space is not presently available in our reserve, this means
8e6208c5
KH
231 getting more page-aligned space from the system. If the returned space
232 is not contiguous to the last heap, allocate a new heap, and append it
abe9ff32
RS
233
234 obtain does not try to keep track of whether space is in use
235 or not in use. It just returns the address of SIZE bytes that
236 fall within a single heap. If you call obtain twice in a row
237 with the same arguments, you typically get the same value.
238 to the heap list. It's the caller's responsibility to keep
239 track of what space is in use.
dcfdbac7 240
e429caa2
KH
241 Return the address of the space if all went well, or zero if we couldn't
242 allocate the memory. */
abe9ff32 243
e429caa2
KH
244static POINTER
245obtain (address, size)
246 POINTER address;
247 SIZE size;
dcfdbac7 248{
e429caa2
KH
249 heap_ptr heap;
250 SIZE already_available;
dcfdbac7 251
abe9ff32 252 /* Find the heap that ADDRESS falls within. */
e429caa2 253 for (heap = last_heap; heap; heap = heap->prev)
dcfdbac7 254 {
e429caa2
KH
255 if (heap->start <= address && address <= heap->end)
256 break;
257 }
dcfdbac7 258
e429caa2 259 if (! heap)
abe9ff32 260 abort ();
dcfdbac7 261
abe9ff32
RS
262 /* If we can't fit SIZE bytes in that heap,
263 try successive later heaps. */
91a211b5 264 while (heap && (char *) address + size > (char *) heap->end)
e429caa2
KH
265 {
266 heap = heap->next;
267 if (heap == NIL_HEAP)
268 break;
269 address = heap->bloc_start;
dcfdbac7
JB
270 }
271
abe9ff32
RS
272 /* If we can't fit them within any existing heap,
273 get more space. */
e429caa2
KH
274 if (heap == NIL_HEAP)
275 {
276 POINTER new = (*real_morecore)(0);
277 SIZE get;
98b7fe02 278
e429caa2 279 already_available = (char *)last_heap->end - (char *)address;
dcfdbac7 280
e429caa2
KH
281 if (new != last_heap->end)
282 {
abe9ff32
RS
283 /* Someone else called sbrk. Make a new heap. */
284
285 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
286 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
e429caa2 287
91a211b5 288 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
e429caa2
KH
289 return 0;
290
291 new_heap->start = new;
292 new_heap->end = bloc_start;
293 new_heap->bloc_start = bloc_start;
abe9ff32 294 new_heap->free = bloc_start;
e429caa2
KH
295 new_heap->next = NIL_HEAP;
296 new_heap->prev = last_heap;
47f13333
RS
297 new_heap->first_bloc = NIL_BLOC;
298 new_heap->last_bloc = NIL_BLOC;
e429caa2
KH
299 last_heap->next = new_heap;
300 last_heap = new_heap;
301
302 address = bloc_start;
303 already_available = 0;
304 }
dcfdbac7 305
abe9ff32
RS
306 /* Add space to the last heap (which we may have just created).
307 Get some extra, so we can come here less often. */
308
e429caa2 309 get = size + extra_bytes - already_available;
abe9ff32 310 get = (char *) ROUNDUP ((char *)last_heap->end + get)
e429caa2 311 - (char *) last_heap->end;
dcfdbac7 312
e429caa2
KH
313 if ((*real_morecore) (get) != last_heap->end)
314 return 0;
315
91a211b5 316 last_heap->end = (char *) last_heap->end + get;
e429caa2
KH
317 }
318
319 return address;
320}
dcfdbac7 321
abe9ff32
RS
322/* Return unused heap space to the system
323 if there is a lot of unused space now.
324 This can make the last heap smaller;
325 it can also eliminate the last heap entirely. */
326
dcfdbac7 327static void
e429caa2 328relinquish ()
dcfdbac7 329{
e429caa2 330 register heap_ptr h;
8d31e373 331 long excess = 0;
e429caa2 332
abe9ff32
RS
333 /* Add the amount of space beyond break_value
334 in all heaps which have extend beyond break_value at all. */
335
e429caa2
KH
336 for (h = last_heap; h && break_value < h->end; h = h->prev)
337 {
338 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
339 ? h->bloc_start : break_value);
340 }
341
342 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
dcfdbac7 343 {
7516b7d5
RS
344 /* Keep extra_bytes worth of empty space.
345 And don't free anything unless we can free at least extra_bytes. */
e429caa2 346 excess -= extra_bytes;
dcfdbac7 347
e429caa2
KH
348 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
349 {
47f13333
RS
350 /* This heap should have no blocs in it. */
351 if (last_heap->first_bloc != NIL_BLOC
352 || last_heap->last_bloc != NIL_BLOC)
353 abort ();
354
abe9ff32 355 /* Return the last heap, with its header, to the system. */
e429caa2
KH
356 excess = (char *)last_heap->end - (char *)last_heap->start;
357 last_heap = last_heap->prev;
358 last_heap->next = NIL_HEAP;
359 }
360 else
361 {
362 excess = (char *) last_heap->end
abe9ff32 363 - (char *) ROUNDUP ((char *)last_heap->end - excess);
91a211b5 364 last_heap->end = (char *) last_heap->end - excess;
e429caa2 365 }
dcfdbac7 366
e429caa2 367 if ((*real_morecore) (- excess) == 0)
21532667
KH
368 {
369 /* If the system didn't want that much memory back, adjust
370 the end of the last heap to reflect that. This can occur
371 if break_value is still within the original data segment. */
91a211b5 372 last_heap->end = (char *) last_heap->end + excess;
21532667
KH
373 /* Make sure that the result of the adjustment is accurate.
374 It should be, for the else clause above; the other case,
375 which returns the entire last heap to the system, seems
376 unlikely to trigger this mode of failure. */
377 if (last_heap->end != (*real_morecore) (0))
378 abort ();
379 }
e429caa2 380 }
dcfdbac7 381}
719b242f
RS
382
383/* Return the total size in use by relocating allocator,
384 above where malloc gets space. */
385
386long
387r_alloc_size_in_use ()
388{
91a211b5 389 return (char *) break_value - (char *) virtual_break_value;
719b242f 390}
dcfdbac7 391\f
956ace37
JB
392/* The meat - allocating, freeing, and relocating blocs. */
393
956ace37 394/* Find the bloc referenced by the address in PTR. Returns a pointer
abe9ff32 395 to that block. */
dcfdbac7
JB
396
397static bloc_ptr
398find_bloc (ptr)
399 POINTER *ptr;
400{
401 register bloc_ptr p = first_bloc;
402
403 while (p != NIL_BLOC)
404 {
747d9d14
JR
405 /* Consistency check. Don't return inconsistent blocs.
406 Don't abort here, as callers might be expecting this, but
407 callers that always expect a bloc to be returned should abort
408 if one isn't to avoid a memory corruption bug that is
409 difficult to track down. */
dcfdbac7
JB
410 if (p->variable == ptr && p->data == *ptr)
411 return p;
412
413 p = p->next;
414 }
415
416 return p;
417}
418
419/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
98b7fe02
JB
420 Returns a pointer to the new bloc, or zero if we couldn't allocate
421 memory for the new block. */
dcfdbac7
JB
422
423static bloc_ptr
424get_bloc (size)
425 SIZE size;
426{
98b7fe02 427 register bloc_ptr new_bloc;
abe9ff32 428 register heap_ptr heap;
98b7fe02
JB
429
430 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
e429caa2 431 || ! (new_bloc->data = obtain (break_value, size)))
98b7fe02 432 {
c2cd06e6 433 free (new_bloc);
98b7fe02
JB
434
435 return 0;
436 }
dcfdbac7 437
91a211b5 438 break_value = (char *) new_bloc->data + size;
e429caa2 439
dcfdbac7
JB
440 new_bloc->size = size;
441 new_bloc->next = NIL_BLOC;
8c7f1e35 442 new_bloc->variable = (POINTER *) NIL;
e429caa2 443 new_bloc->new_data = 0;
dcfdbac7 444
abe9ff32
RS
445 /* Record in the heap that this space is in use. */
446 heap = find_heap (new_bloc->data);
447 heap->free = break_value;
448
47f13333
RS
449 /* Maintain the correspondence between heaps and blocs. */
450 new_bloc->heap = heap;
451 heap->last_bloc = new_bloc;
452 if (heap->first_bloc == NIL_BLOC)
453 heap->first_bloc = new_bloc;
454
abe9ff32 455 /* Put this bloc on the doubly-linked list of blocs. */
dcfdbac7
JB
456 if (first_bloc)
457 {
458 new_bloc->prev = last_bloc;
459 last_bloc->next = new_bloc;
460 last_bloc = new_bloc;
461 }
462 else
463 {
464 first_bloc = last_bloc = new_bloc;
465 new_bloc->prev = NIL_BLOC;
466 }
467
468 return new_bloc;
469}
47f13333 470\f
abe9ff32
RS
471/* Calculate new locations of blocs in the list beginning with BLOC,
472 relocating it to start at ADDRESS, in heap HEAP. If enough space is
473 not presently available in our reserve, call obtain for
177c0ea7
JB
474 more space.
475
abe9ff32
RS
476 Store the new location of each bloc in its new_data field.
477 Do not touch the contents of blocs or break_value. */
dcfdbac7 478
e429caa2
KH
479static int
480relocate_blocs (bloc, heap, address)
481 bloc_ptr bloc;
482 heap_ptr heap;
483 POINTER address;
484{
485 register bloc_ptr b = bloc;
ad3bb3d2 486
49f82b3d 487 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 488 if (r_alloc_freeze_level)
49f82b3d
RS
489 abort();
490
e429caa2
KH
491 while (b)
492 {
abe9ff32
RS
493 /* If bloc B won't fit within HEAP,
494 move to the next heap and try again. */
91a211b5 495 while (heap && (char *) address + b->size > (char *) heap->end)
e429caa2
KH
496 {
497 heap = heap->next;
498 if (heap == NIL_HEAP)
499 break;
500 address = heap->bloc_start;
501 }
dcfdbac7 502
abe9ff32
RS
503 /* If BLOC won't fit in any heap,
504 get enough new space to hold BLOC and all following blocs. */
e429caa2
KH
505 if (heap == NIL_HEAP)
506 {
507 register bloc_ptr tb = b;
508 register SIZE s = 0;
509
abe9ff32 510 /* Add up the size of all the following blocs. */
e429caa2
KH
511 while (tb != NIL_BLOC)
512 {
177c0ea7 513 if (tb->variable)
49f82b3d
RS
514 s += tb->size;
515
e429caa2
KH
516 tb = tb->next;
517 }
518
abe9ff32
RS
519 /* Get that space. */
520 address = obtain (address, s);
521 if (address == 0)
e429caa2
KH
522 return 0;
523
524 heap = last_heap;
525 }
526
abe9ff32
RS
527 /* Record the new address of this bloc
528 and update where the next bloc can start. */
e429caa2 529 b->new_data = address;
177c0ea7 530 if (b->variable)
91a211b5 531 address = (char *) address + b->size;
e429caa2
KH
532 b = b->next;
533 }
534
535 return 1;
536}
537
47f13333
RS
538/* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
539 This is necessary if we put the memory of space of BLOC
540 before that of BEFORE. */
541
542static void
543reorder_bloc (bloc, before)
544 bloc_ptr bloc, before;
545{
546 bloc_ptr prev, next;
547
548 /* Splice BLOC out from where it is. */
549 prev = bloc->prev;
550 next = bloc->next;
551
552 if (prev)
553 prev->next = next;
554 if (next)
555 next->prev = prev;
556
557 /* Splice it in before BEFORE. */
558 prev = before->prev;
abe9ff32 559
47f13333
RS
560 if (prev)
561 prev->next = bloc;
562 bloc->prev = prev;
563
564 before->prev = bloc;
565 bloc->next = before;
566}
567\f
568/* Update the records of which heaps contain which blocs, starting
569 with heap HEAP and bloc BLOC. */
570
571static void
572update_heap_bloc_correspondence (bloc, heap)
abe9ff32
RS
573 bloc_ptr bloc;
574 heap_ptr heap;
575{
576 register bloc_ptr b;
577
47f13333
RS
578 /* Initialize HEAP's status to reflect blocs before BLOC. */
579 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
580 {
581 /* The previous bloc is in HEAP. */
582 heap->last_bloc = bloc->prev;
91a211b5 583 heap->free = (char *) bloc->prev->data + bloc->prev->size;
47f13333
RS
584 }
585 else
586 {
587 /* HEAP contains no blocs before BLOC. */
588 heap->first_bloc = NIL_BLOC;
589 heap->last_bloc = NIL_BLOC;
590 heap->free = heap->bloc_start;
591 }
592
abe9ff32
RS
593 /* Advance through blocs one by one. */
594 for (b = bloc; b != NIL_BLOC; b = b->next)
595 {
47f13333
RS
596 /* Advance through heaps, marking them empty,
597 till we get to the one that B is in. */
abe9ff32
RS
598 while (heap)
599 {
600 if (heap->bloc_start <= b->data && b->data <= heap->end)
601 break;
602 heap = heap->next;
47f13333
RS
603 /* We know HEAP is not null now,
604 because there has to be space for bloc B. */
605 heap->first_bloc = NIL_BLOC;
606 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
607 heap->free = heap->bloc_start;
608 }
47f13333
RS
609
610 /* Update HEAP's status for bloc B. */
91a211b5 611 heap->free = (char *) b->data + b->size;
47f13333
RS
612 heap->last_bloc = b;
613 if (heap->first_bloc == NIL_BLOC)
614 heap->first_bloc = b;
615
616 /* Record that B is in HEAP. */
617 b->heap = heap;
abe9ff32
RS
618 }
619
620 /* If there are any remaining heaps and no blocs left,
47f13333 621 mark those heaps as empty. */
abe9ff32
RS
622 heap = heap->next;
623 while (heap)
624 {
47f13333
RS
625 heap->first_bloc = NIL_BLOC;
626 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
627 heap->free = heap->bloc_start;
628 heap = heap->next;
629 }
630}
47f13333 631\f
abe9ff32
RS
632/* Resize BLOC to SIZE bytes. This relocates the blocs
633 that come after BLOC in memory. */
634
e429caa2
KH
635static int
636resize_bloc (bloc, size)
637 bloc_ptr bloc;
638 SIZE size;
dcfdbac7 639{
e429caa2
KH
640 register bloc_ptr b;
641 heap_ptr heap;
642 POINTER address;
643 SIZE old_size;
644
49f82b3d 645 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 646 if (r_alloc_freeze_level)
49f82b3d
RS
647 abort();
648
e429caa2
KH
649 if (bloc == NIL_BLOC || size == bloc->size)
650 return 1;
651
652 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
653 {
654 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
655 break;
656 }
657
658 if (heap == NIL_HEAP)
abe9ff32 659 abort ();
e429caa2
KH
660
661 old_size = bloc->size;
662 bloc->size = size;
663
abe9ff32 664 /* Note that bloc could be moved into the previous heap. */
91a211b5
GM
665 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
666 : (char *) first_heap->bloc_start);
e429caa2
KH
667 while (heap)
668 {
669 if (heap->bloc_start <= address && address <= heap->end)
670 break;
671 heap = heap->prev;
672 }
673
674 if (! relocate_blocs (bloc, heap, address))
675 {
676 bloc->size = old_size;
677 return 0;
678 }
679
680 if (size > old_size)
681 {
682 for (b = last_bloc; b != bloc; b = b->prev)
683 {
49f82b3d
RS
684 if (!b->variable)
685 {
686 b->size = 0;
687 b->data = b->new_data;
177c0ea7
JB
688 }
689 else
49f82b3d
RS
690 {
691 safe_bcopy (b->data, b->new_data, b->size);
692 *b->variable = b->data = b->new_data;
693 }
694 }
695 if (!bloc->variable)
696 {
697 bloc->size = 0;
698 bloc->data = bloc->new_data;
699 }
700 else
701 {
702 safe_bcopy (bloc->data, bloc->new_data, old_size);
91a211b5 703 bzero ((char *) bloc->new_data + old_size, size - old_size);
49f82b3d 704 *bloc->variable = bloc->data = bloc->new_data;
e429caa2 705 }
e429caa2
KH
706 }
707 else
dcfdbac7 708 {
ad3bb3d2
JB
709 for (b = bloc; b != NIL_BLOC; b = b->next)
710 {
49f82b3d
RS
711 if (!b->variable)
712 {
713 b->size = 0;
714 b->data = b->new_data;
177c0ea7
JB
715 }
716 else
49f82b3d
RS
717 {
718 safe_bcopy (b->data, b->new_data, b->size);
719 *b->variable = b->data = b->new_data;
720 }
ad3bb3d2 721 }
ad3bb3d2 722 }
dcfdbac7 723
47f13333 724 update_heap_bloc_correspondence (bloc, heap);
abe9ff32 725
91a211b5
GM
726 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
727 : (char *) first_heap->bloc_start);
e429caa2
KH
728 return 1;
729}
47f13333 730\f
abe9ff32
RS
731/* Free BLOC from the chain of blocs, relocating any blocs above it.
732 This may return space to the system. */
dcfdbac7
JB
733
734static void
735free_bloc (bloc)
736 bloc_ptr bloc;
737{
47f13333
RS
738 heap_ptr heap = bloc->heap;
739
49f82b3d
RS
740 if (r_alloc_freeze_level)
741 {
742 bloc->variable = (POINTER *) NIL;
743 return;
744 }
177c0ea7 745
e429caa2
KH
746 resize_bloc (bloc, 0);
747
dcfdbac7
JB
748 if (bloc == first_bloc && bloc == last_bloc)
749 {
750 first_bloc = last_bloc = NIL_BLOC;
751 }
752 else if (bloc == last_bloc)
753 {
754 last_bloc = bloc->prev;
755 last_bloc->next = NIL_BLOC;
756 }
757 else if (bloc == first_bloc)
758 {
759 first_bloc = bloc->next;
760 first_bloc->prev = NIL_BLOC;
dcfdbac7
JB
761 }
762 else
763 {
764 bloc->next->prev = bloc->prev;
765 bloc->prev->next = bloc->next;
dcfdbac7
JB
766 }
767
47f13333
RS
768 /* Update the records of which blocs are in HEAP. */
769 if (heap->first_bloc == bloc)
770 {
d5179acc 771 if (bloc->next != 0 && bloc->next->heap == heap)
47f13333
RS
772 heap->first_bloc = bloc->next;
773 else
774 heap->first_bloc = heap->last_bloc = NIL_BLOC;
775 }
776 if (heap->last_bloc == bloc)
777 {
d5179acc 778 if (bloc->prev != 0 && bloc->prev->heap == heap)
47f13333
RS
779 heap->last_bloc = bloc->prev;
780 else
781 heap->first_bloc = heap->last_bloc = NIL_BLOC;
782 }
783
e429caa2 784 relinquish ();
dcfdbac7
JB
785 free (bloc);
786}
787\f
956ace37
JB
788/* Interface routines. */
789
98b7fe02 790/* Obtain SIZE bytes of storage from the free pool, or the system, as
2c46d29f 791 necessary. If relocatable blocs are in use, this means relocating
98b7fe02
JB
792 them. This function gets plugged into the GNU malloc's __morecore
793 hook.
794
7516b7d5
RS
795 We provide hysteresis, never relocating by less than extra_bytes.
796
98b7fe02
JB
797 If we're out of memory, we should return zero, to imitate the other
798 __morecore hook values - in particular, __default_morecore in the
799 GNU malloc package. */
dcfdbac7 800
177c0ea7 801POINTER
dcfdbac7
JB
802r_alloc_sbrk (size)
803 long size;
804{
e429caa2
KH
805 register bloc_ptr b;
806 POINTER address;
dcfdbac7 807
44d3dec0
RS
808 if (! r_alloc_initialized)
809 r_alloc_init ();
810
dcfdbac7 811 if (! use_relocatable_buffers)
bbc60227 812 return (*real_morecore) (size);
dcfdbac7 813
e429caa2
KH
814 if (size == 0)
815 return virtual_break_value;
7516b7d5 816
e429caa2 817 if (size > 0)
dcfdbac7 818 {
abe9ff32
RS
819 /* Allocate a page-aligned space. GNU malloc would reclaim an
820 extra space if we passed an unaligned one. But we could
8e6208c5 821 not always find a space which is contiguous to the previous. */
e429caa2
KH
822 POINTER new_bloc_start;
823 heap_ptr h = first_heap;
abe9ff32 824 SIZE get = ROUNDUP (size);
7516b7d5 825
abe9ff32 826 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 827
abe9ff32
RS
828 /* Search the list upward for a heap which is large enough. */
829 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
e429caa2
KH
830 {
831 h = h->next;
832 if (h == NIL_HEAP)
833 break;
abe9ff32 834 address = (POINTER) ROUNDUP (h->start);
e429caa2
KH
835 }
836
abe9ff32 837 /* If not found, obtain more space. */
e429caa2
KH
838 if (h == NIL_HEAP)
839 {
840 get += extra_bytes + page_size;
841
49f82b3d 842 if (! obtain (address, get))
e429caa2 843 return 0;
98b7fe02 844
e429caa2 845 if (first_heap == last_heap)
abe9ff32 846 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 847 else
abe9ff32 848 address = (POINTER) ROUNDUP (last_heap->start);
e429caa2
KH
849 h = last_heap;
850 }
851
abe9ff32 852 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
e429caa2
KH
853
854 if (first_heap->bloc_start < new_bloc_start)
855 {
49f82b3d 856 /* This is no clean solution - no idea how to do it better. */
177c0ea7 857 if (r_alloc_freeze_level)
49f82b3d
RS
858 return NIL;
859
860 /* There is a bug here: if the above obtain call succeeded, but the
861 relocate_blocs call below does not succeed, we need to free
862 the memory that we got with obtain. */
863
abe9ff32 864 /* Move all blocs upward. */
49f82b3d 865 if (! relocate_blocs (first_bloc, h, new_bloc_start))
e429caa2
KH
866 return 0;
867
868 /* Note that (POINTER)(h+1) <= new_bloc_start since
869 get >= page_size, so the following does not destroy the heap
abe9ff32 870 header. */
e429caa2
KH
871 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
872 {
873 safe_bcopy (b->data, b->new_data, b->size);
874 *b->variable = b->data = b->new_data;
875 }
876
877 h->bloc_start = new_bloc_start;
abe9ff32 878
47f13333 879 update_heap_bloc_correspondence (first_bloc, h);
e429caa2 880 }
e429caa2
KH
881 if (h != first_heap)
882 {
883 /* Give up managing heaps below the one the new
abe9ff32 884 virtual_break_value points to. */
e429caa2
KH
885 first_heap->prev = NIL_HEAP;
886 first_heap->next = h->next;
887 first_heap->start = h->start;
888 first_heap->end = h->end;
abe9ff32 889 first_heap->free = h->free;
47f13333
RS
890 first_heap->first_bloc = h->first_bloc;
891 first_heap->last_bloc = h->last_bloc;
e429caa2
KH
892 first_heap->bloc_start = h->bloc_start;
893
894 if (first_heap->next)
895 first_heap->next->prev = first_heap;
896 else
897 last_heap = first_heap;
898 }
899
900 bzero (address, size);
dcfdbac7 901 }
e429caa2 902 else /* size < 0 */
dcfdbac7 903 {
e429caa2
KH
904 SIZE excess = (char *)first_heap->bloc_start
905 - ((char *)virtual_break_value + size);
906
907 address = virtual_break_value;
908
909 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
910 {
911 excess -= extra_bytes;
912 first_heap->bloc_start
47f13333 913 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
e429caa2 914
abe9ff32 915 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
7516b7d5 916
e429caa2
KH
917 for (b = first_bloc; b != NIL_BLOC; b = b->next)
918 {
919 safe_bcopy (b->data, b->new_data, b->size);
920 *b->variable = b->data = b->new_data;
921 }
922 }
923
924 if ((char *)virtual_break_value + size < (char *)first_heap->start)
925 {
926 /* We found an additional space below the first heap */
927 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
928 }
dcfdbac7
JB
929 }
930
e429caa2 931 virtual_break_value = (POINTER) ((char *)address + size);
47f13333 932 break_value = (last_bloc
91a211b5
GM
933 ? (char *) last_bloc->data + last_bloc->size
934 : (char *) first_heap->bloc_start);
e429caa2 935 if (size < 0)
abe9ff32 936 relinquish ();
7516b7d5 937
e429caa2 938 return address;
dcfdbac7
JB
939}
940
0a58f946 941
dcfdbac7
JB
942/* Allocate a relocatable bloc of storage of size SIZE. A pointer to
943 the data is returned in *PTR. PTR is thus the address of some variable
98b7fe02
JB
944 which will use the data area.
945
49f82b3d 946 The allocation of 0 bytes is valid.
f96f2c5b
JB
947 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
948 done before allocating a new area. Not yet done.
49f82b3d 949
98b7fe02
JB
950 If we can't allocate the necessary memory, set *PTR to zero, and
951 return zero. */
dcfdbac7
JB
952
953POINTER
954r_alloc (ptr, size)
955 POINTER *ptr;
956 SIZE size;
957{
958 register bloc_ptr new_bloc;
959
2c46d29f
RS
960 if (! r_alloc_initialized)
961 r_alloc_init ();
962
abe9ff32 963 new_bloc = get_bloc (MEM_ROUNDUP (size));
98b7fe02
JB
964 if (new_bloc)
965 {
966 new_bloc->variable = ptr;
967 *ptr = new_bloc->data;
968 }
969 else
970 *ptr = 0;
dcfdbac7
JB
971
972 return *ptr;
973}
974
2c46d29f
RS
975/* Free a bloc of relocatable storage whose data is pointed to by PTR.
976 Store 0 in *PTR to show there's no block allocated. */
dcfdbac7
JB
977
978void
979r_alloc_free (ptr)
980 register POINTER *ptr;
981{
982 register bloc_ptr dead_bloc;
983
44d3dec0
RS
984 if (! r_alloc_initialized)
985 r_alloc_init ();
986
dcfdbac7
JB
987 dead_bloc = find_bloc (ptr);
988 if (dead_bloc == NIL_BLOC)
747d9d14 989 abort (); /* Double free? PTR not originally used to allocate? */
dcfdbac7
JB
990
991 free_bloc (dead_bloc);
2c46d29f 992 *ptr = 0;
719b242f 993
d5179acc 994#ifdef emacs
719b242f 995 refill_memory_reserve ();
d5179acc 996#endif
dcfdbac7
JB
997}
998
16a5c729 999/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
98b7fe02
JB
1000 Do this by shifting all blocks above this one up in memory, unless
1001 SIZE is less than or equal to the current bloc size, in which case
1002 do nothing.
dcfdbac7 1003
f96f2c5b 1004 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
8e6208c5 1005 memory copied to it. Not very efficient. We could traverse the
49f82b3d
RS
1006 bloc_list for a best fit of free blocs first.
1007
98b7fe02
JB
1008 Change *PTR to reflect the new bloc, and return this value.
1009
1010 If more memory cannot be allocated, then leave *PTR unchanged, and
1011 return zero. */
dcfdbac7
JB
1012
1013POINTER
1014r_re_alloc (ptr, size)
1015 POINTER *ptr;
1016 SIZE size;
1017{
16a5c729 1018 register bloc_ptr bloc;
dcfdbac7 1019
44d3dec0
RS
1020 if (! r_alloc_initialized)
1021 r_alloc_init ();
1022
49f82b3d
RS
1023 if (!*ptr)
1024 return r_alloc (ptr, size);
177c0ea7 1025 if (!size)
49f82b3d
RS
1026 {
1027 r_alloc_free (ptr);
1028 return r_alloc (ptr, 0);
1029 }
1030
16a5c729
JB
1031 bloc = find_bloc (ptr);
1032 if (bloc == NIL_BLOC)
747d9d14 1033 abort (); /* Already freed? PTR not originally used to allocate? */
dcfdbac7 1034
177c0ea7 1035 if (size < bloc->size)
49f82b3d
RS
1036 {
1037 /* Wouldn't it be useful to actually resize the bloc here? */
1038 /* I think so too, but not if it's too expensive... */
177c0ea7
JB
1039 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1040 && r_alloc_freeze_level == 0)
49f82b3d
RS
1041 {
1042 resize_bloc (bloc, MEM_ROUNDUP (size));
1043 /* Never mind if this fails, just do nothing... */
1044 /* It *should* be infallible! */
1045 }
1046 }
1047 else if (size > bloc->size)
1048 {
1049 if (r_alloc_freeze_level)
1050 {
1051 bloc_ptr new_bloc;
1052 new_bloc = get_bloc (MEM_ROUNDUP (size));
1053 if (new_bloc)
1054 {
1055 new_bloc->variable = ptr;
1056 *ptr = new_bloc->data;
1057 bloc->variable = (POINTER *) NIL;
1058 }
1059 else
1060 return NIL;
1061 }
177c0ea7 1062 else
49f82b3d
RS
1063 {
1064 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1065 return NIL;
1066 }
1067 }
dcfdbac7
JB
1068 return *ptr;
1069}
81bd58e8
KH
1070
1071/* Disable relocations, after making room for at least SIZE bytes
1072 of non-relocatable heap if possible. The relocatable blocs are
1073 guaranteed to hold still until thawed, even if this means that
1074 malloc must return a null pointer. */
abe9ff32 1075
81bd58e8
KH
1076void
1077r_alloc_freeze (size)
1078 long size;
1079{
44d3dec0
RS
1080 if (! r_alloc_initialized)
1081 r_alloc_init ();
1082
81bd58e8
KH
1083 /* If already frozen, we can't make any more room, so don't try. */
1084 if (r_alloc_freeze_level > 0)
1085 size = 0;
1086 /* If we can't get the amount requested, half is better than nothing. */
1087 while (size > 0 && r_alloc_sbrk (size) == 0)
1088 size /= 2;
1089 ++r_alloc_freeze_level;
1090 if (size > 0)
1091 r_alloc_sbrk (-size);
1092}
1093
1094void
1095r_alloc_thaw ()
1096{
49f82b3d 1097
177c0ea7 1098 if (! r_alloc_initialized)
49f82b3d
RS
1099 r_alloc_init ();
1100
81bd58e8
KH
1101 if (--r_alloc_freeze_level < 0)
1102 abort ();
49f82b3d 1103
177c0ea7
JB
1104 /* This frees all unused blocs. It is not too inefficient, as the resize
1105 and bcopy is done only once. Afterwards, all unreferenced blocs are
49f82b3d 1106 already shrunk to zero size. */
177c0ea7 1107 if (!r_alloc_freeze_level)
49f82b3d
RS
1108 {
1109 bloc_ptr *b = &first_bloc;
177c0ea7
JB
1110 while (*b)
1111 if (!(*b)->variable)
1112 free_bloc (*b);
1113 else
49f82b3d
RS
1114 b = &(*b)->next;
1115 }
81bd58e8 1116}
49f82b3d 1117
dec41418
RS
1118
1119#if defined (emacs) && defined (DOUG_LEA_MALLOC)
1120
1121/* Reinitialize the morecore hook variables after restarting a dumped
1122 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1123void
1124r_alloc_reinit ()
1125{
1126 /* Only do this if the hook has been reset, so that we don't get an
1127 infinite loop, in case Emacs was linked statically. */
1128 if (__morecore != r_alloc_sbrk)
1129 {
1130 real_morecore = __morecore;
1131 __morecore = r_alloc_sbrk;
1132 }
1133}
0a58f946
GM
1134
1135#endif /* emacs && DOUG_LEA_MALLOC */
dec41418 1136
e429caa2 1137#ifdef DEBUG
0a58f946 1138
e429caa2
KH
1139#include <assert.h>
1140
44d3dec0 1141void
e429caa2
KH
1142r_alloc_check ()
1143{
6d16dd06
RS
1144 int found = 0;
1145 heap_ptr h, ph = 0;
1146 bloc_ptr b, pb = 0;
1147
1148 if (!r_alloc_initialized)
1149 return;
1150
1151 assert (first_heap);
1152 assert (last_heap->end <= (POINTER) sbrk (0));
1153 assert ((POINTER) first_heap < first_heap->start);
1154 assert (first_heap->start <= virtual_break_value);
1155 assert (virtual_break_value <= first_heap->end);
1156
1157 for (h = first_heap; h; h = h->next)
1158 {
1159 assert (h->prev == ph);
1160 assert ((POINTER) ROUNDUP (h->end) == h->end);
40f3f04b
RS
1161#if 0 /* ??? The code in ralloc.c does not really try to ensure
1162 the heap start has any sort of alignment.
1163 Perhaps it should. */
6d16dd06 1164 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
40f3f04b 1165#endif
6d16dd06
RS
1166 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1167 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1168
1169 if (ph)
1170 {
1171 assert (ph->end < h->start);
1172 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1173 }
1174
1175 if (h->bloc_start <= break_value && break_value <= h->end)
1176 found = 1;
1177
1178 ph = h;
1179 }
1180
1181 assert (found);
1182 assert (last_heap == ph);
1183
1184 for (b = first_bloc; b; b = b->next)
1185 {
1186 assert (b->prev == pb);
1187 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1188 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1189
1190 ph = 0;
1191 for (h = first_heap; h; h = h->next)
1192 {
1193 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1194 break;
1195 ph = h;
1196 }
1197
1198 assert (h);
1199
1200 if (pb && pb->data + pb->size != b->data)
1201 {
1202 assert (ph && b->data == h->bloc_start);
1203 while (ph)
1204 {
1205 if (ph->bloc_start <= pb->data
1206 && pb->data + pb->size <= ph->end)
1207 {
1208 assert (pb->data + pb->size + b->size > ph->end);
1209 break;
1210 }
1211 else
1212 {
1213 assert (ph->bloc_start + b->size > ph->end);
1214 }
1215 ph = ph->prev;
1216 }
1217 }
1218 pb = b;
1219 }
1220
1221 assert (last_bloc == pb);
1222
1223 if (last_bloc)
1224 assert (last_bloc->data + last_bloc->size == break_value);
1225 else
1226 assert (first_heap->bloc_start == break_value);
e429caa2 1227}
0a58f946 1228
e429caa2 1229#endif /* DEBUG */
0a58f946 1230
baae5c2d
JR
1231/* Update the internal record of which variable points to some data to NEW.
1232 Used by buffer-swap-text in Emacs to restore consistency after it
1233 swaps the buffer text between two buffer objects. The OLD pointer
1234 is checked to ensure that memory corruption does not occur due to
1235 misuse. */
1236void
1237r_alloc_reset_variable (old, new)
1238 POINTER *old, *new;
1239{
1240 bloc_ptr bloc = first_bloc;
1241
1242 /* Find the bloc that corresponds to the data pointed to by pointer.
1243 find_bloc cannot be used, as it has internal consistency checks
1244 which fail when the variable needs reseting. */
1245 while (bloc != NIL_BLOC)
1246 {
1247 if (bloc->data == *new)
1248 break;
1249
1250 bloc = bloc->next;
1251 }
1252
1253 if (bloc == NIL_BLOC || bloc->variable != old)
747d9d14 1254 abort (); /* Already freed? OLD not originally used to allocate? */
baae5c2d
JR
1255
1256 /* Update variable to point to the new location. */
1257 bloc->variable = new;
1258}
0a58f946
GM
1259
1260\f
1261/***********************************************************************
1262 Initialization
1263 ***********************************************************************/
1264
0a58f946
GM
1265/* Initialize various things for memory allocation. */
1266
1267static void
1268r_alloc_init ()
1269{
1270 if (r_alloc_initialized)
1271 return;
0a58f946 1272 r_alloc_initialized = 1;
177c0ea7 1273
a2c23c92
DL
1274 page_size = PAGE;
1275#ifndef SYSTEM_MALLOC
0a58f946
GM
1276 real_morecore = __morecore;
1277 __morecore = r_alloc_sbrk;
1278
1279 first_heap = last_heap = &heap_base;
1280 first_heap->next = first_heap->prev = NIL_HEAP;
1281 first_heap->start = first_heap->bloc_start
1282 = virtual_break_value = break_value = (*real_morecore) (0);
1283 if (break_value == NIL)
1284 abort ();
1285
0a58f946 1286 extra_bytes = ROUNDUP (50000);
a2c23c92 1287#endif
0a58f946
GM
1288
1289#ifdef DOUG_LEA_MALLOC
1673df2e
JD
1290 BLOCK_INPUT;
1291 mallopt (M_TOP_PAD, 64 * 4096);
1292 UNBLOCK_INPUT;
0a58f946 1293#else
a2c23c92 1294#ifndef SYSTEM_MALLOC
0a58f946
GM
1295 /* Give GNU malloc's morecore some hysteresis
1296 so that we move all the relocatable blocks much less often. */
1297 __malloc_extra_blocks = 64;
1298#endif
a2c23c92 1299#endif
0a58f946 1300
5ad25b24 1301#ifndef SYSTEM_MALLOC
0a58f946
GM
1302 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1303
1304 /* The extra call to real_morecore guarantees that the end of the
1305 address space is a multiple of page_size, even if page_size is
1306 not really the page size of the system running the binary in
1307 which page_size is stored. This allows a binary to be built on a
1308 system with one page size and run on a system with a smaller page
1309 size. */
91a211b5 1310 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
0a58f946
GM
1311
1312 /* Clear the rest of the last page; this memory is in our address space
1313 even though it is after the sbrk value. */
1314 /* Doubly true, with the additional call that explicitly adds the
1315 rest of that page to the address space. */
91a211b5
GM
1316 bzero (first_heap->start,
1317 (char *) first_heap->end - (char *) first_heap->start);
0a58f946 1318 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
a2c23c92 1319#endif
177c0ea7 1320
0a58f946
GM
1321 use_relocatable_buffers = 1;
1322}
ab5796a9
MB
1323
1324/* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1325 (do not change this comment) */