Add 2010 to copyright years.
[bpt/emacs.git] / src / ralloc.c
CommitLineData
177c0ea7 1/* Block-relocating memory allocator.
429ab54e 2 Copyright (C) 1993, 1995, 2000, 2001, 2002, 2003, 2004,
114f9c96 3 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
dcfdbac7
JB
4
5This file is part of GNU Emacs.
6
9ec0b715 7GNU Emacs is free software: you can redistribute it and/or modify
dcfdbac7 8it under the terms of the GNU General Public License as published by
9ec0b715
GM
9the Free Software Foundation, either version 3 of the License, or
10(at your option) any later version.
dcfdbac7
JB
11
12GNU Emacs is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
9ec0b715 18along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
dcfdbac7
JB
19
20/* NOTES:
21
eb8c3be9 22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
dcfdbac7 23 rather than all of them. This means allowing for a possible
abe9ff32 24 hole between the first bloc and the end of malloc storage. */
dcfdbac7 25
2c46d29f 26#ifdef emacs
aef4d570 27
18160b98 28#include <config.h>
d7306fe6 29#include <setjmp.h>
956ace37 30#include "lisp.h" /* Needed for VALBITS. */
a4766fd5 31#include "blockinput.h"
0a58f946 32
642a1733
DL
33#ifdef HAVE_UNISTD_H
34#include <unistd.h>
35#endif
a8c0e5ea 36
0a58f946
GM
37typedef POINTER_TYPE *POINTER;
38typedef size_t SIZE;
f275fd9a 39
2c46d29f
RS
40/* Declared in dispnew.c, this version doesn't screw up if regions
41 overlap. */
0a58f946 42
2c46d29f 43extern void safe_bcopy ();
2c46d29f 44
b0119c68 45#ifdef DOUG_LEA_MALLOC
177c0ea7 46#define M_TOP_PAD -2
b0119c68 47extern int mallopt ();
0a58f946 48#else /* not DOUG_LEA_MALLOC */
a2c23c92 49#ifndef SYSTEM_MALLOC
b1685c5f 50extern size_t __malloc_extra_blocks;
a2c23c92 51#endif /* SYSTEM_MALLOC */
0a58f946 52#endif /* not DOUG_LEA_MALLOC */
49081834 53
d5179acc 54#else /* not emacs */
aef4d570 55
2c46d29f 56#include <stddef.h>
aef4d570 57
2c46d29f
RS
58typedef size_t SIZE;
59typedef void *POINTER;
aef4d570 60
aef4d570
RM
61#include <unistd.h>
62#include <malloc.h>
aef4d570 63
2c46d29f 64#define safe_bcopy(x, y, z) memmove (y, x, z)
d5179acc
RS
65#define bzero(x, len) memset (x, 0, len)
66
67#endif /* not emacs */
2c46d29f 68
0a58f946 69
d5179acc 70#include "getpagesize.h"
dcfdbac7
JB
71
72#define NIL ((POINTER) 0)
73
2c46d29f
RS
74/* A flag to indicate whether we have initialized ralloc yet. For
75 Emacs's sake, please do not make this local to malloc_init; on some
76 machines, the dumping procedure makes all static variables
77 read-only. On these machines, the word static is #defined to be
78 the empty string, meaning that r_alloc_initialized becomes an
0a58f946
GM
79 automatic variable, and loses its value each time Emacs is started
80 up. */
81
2c46d29f
RS
82static int r_alloc_initialized = 0;
83
84static void r_alloc_init ();
0a58f946 85
dcfdbac7 86\f
956ace37
JB
87/* Declarations for working with the malloc, ralloc, and system breaks. */
88
abe9ff32 89/* Function to set the real break value. */
321ed47b 90POINTER (*real_morecore) ();
dcfdbac7 91
abe9ff32 92/* The break value, as seen by malloc. */
dcfdbac7
JB
93static POINTER virtual_break_value;
94
abe9ff32
RS
95/* The address of the end of the last data in use by ralloc,
96 including relocatable blocs as well as malloc data. */
dcfdbac7
JB
97static POINTER break_value;
98
7516b7d5
RS
99/* This is the size of a page. We round memory requests to this boundary. */
100static int page_size;
101
177c0ea7 102/* Whenever we get memory from the system, get this many extra bytes. This
ad3bb3d2 103 must be a multiple of page_size. */
7516b7d5
RS
104static int extra_bytes;
105
dcfdbac7 106/* Macros for rounding. Note that rounding to any value is possible
abe9ff32 107 by changing the definition of PAGE. */
dcfdbac7 108#define PAGE (getpagesize ())
f7a009a5
RM
109#define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
110#define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
111 & ~(page_size - 1))
7516b7d5 112#define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
e429caa2
KH
113
114#define MEM_ALIGN sizeof(double)
115#define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
116 & ~(MEM_ALIGN - 1))
0a58f946 117
aeac019e
GM
118/* The hook `malloc' uses for the function which gets more space
119 from the system. */
120
121#ifndef SYSTEM_MALLOC
122extern POINTER (*__morecore) ();
123#endif
124
125
e429caa2 126\f
0a58f946
GM
127/***********************************************************************
128 Implementation using sbrk
129 ***********************************************************************/
130
abe9ff32
RS
131/* Data structures of heaps and blocs. */
132
133/* The relocatable objects, or blocs, and the malloc data
134 both reside within one or more heaps.
135 Each heap contains malloc data, running from `start' to `bloc_start',
136 and relocatable objects, running from `bloc_start' to `free'.
137
138 Relocatable objects may relocate within the same heap
139 or may move into another heap; the heaps themselves may grow
140 but they never move.
141
142 We try to make just one heap and make it larger as necessary.
8e6208c5 143 But sometimes we can't do that, because we can't get contiguous
abe9ff32 144 space to add onto the heap. When that happens, we start a new heap. */
177c0ea7 145
e429caa2
KH
146typedef struct heap
147{
148 struct heap *next;
149 struct heap *prev;
abe9ff32 150 /* Start of memory range of this heap. */
e429caa2 151 POINTER start;
abe9ff32 152 /* End of memory range of this heap. */
e429caa2 153 POINTER end;
abe9ff32
RS
154 /* Start of relocatable data in this heap. */
155 POINTER bloc_start;
156 /* Start of unused space in this heap. */
157 POINTER free;
47f13333
RS
158 /* First bloc in this heap. */
159 struct bp *first_bloc;
160 /* Last bloc in this heap. */
161 struct bp *last_bloc;
e429caa2
KH
162} *heap_ptr;
163
164#define NIL_HEAP ((heap_ptr) 0)
165#define HEAP_PTR_SIZE (sizeof (struct heap))
166
abe9ff32
RS
167/* This is the first heap object.
168 If we need additional heap objects, each one resides at the beginning of
169 the space it covers. */
170static struct heap heap_base;
171
172/* Head and tail of the list of heaps. */
e429caa2
KH
173static heap_ptr first_heap, last_heap;
174
175/* These structures are allocated in the malloc arena.
176 The linked list is kept in order of increasing '.data' members.
177 The data blocks abut each other; if b->next is non-nil, then
177c0ea7 178 b->data + b->size == b->next->data.
49f82b3d
RS
179
180 An element with variable==NIL denotes a freed block, which has not yet
f96f2c5b
JB
181 been collected. They may only appear while r_alloc_freeze_level > 0,
182 and will be freed when the arena is thawed. Currently, these blocs are
183 not reusable, while the arena is frozen. Very inefficient. */
49f82b3d 184
e429caa2
KH
185typedef struct bp
186{
187 struct bp *next;
188 struct bp *prev;
189 POINTER *variable;
190 POINTER data;
191 SIZE size;
8e6208c5 192 POINTER new_data; /* temporarily used for relocation */
49f82b3d 193 struct heap *heap; /* Heap this bloc is in. */
e429caa2
KH
194} *bloc_ptr;
195
196#define NIL_BLOC ((bloc_ptr) 0)
197#define BLOC_PTR_SIZE (sizeof (struct bp))
198
abe9ff32 199/* Head and tail of the list of relocatable blocs. */
e429caa2
KH
200static bloc_ptr first_bloc, last_bloc;
201
49f82b3d
RS
202static int use_relocatable_buffers;
203
204/* If >0, no relocation whatsoever takes place. */
205static int r_alloc_freeze_level;
206
dcfdbac7 207\f
956ace37
JB
208/* Functions to get and return memory from the system. */
209
abe9ff32
RS
210/* Find the heap that ADDRESS falls within. */
211
212static heap_ptr
213find_heap (address)
214 POINTER address;
215{
216 heap_ptr heap;
217
218 for (heap = last_heap; heap; heap = heap->prev)
219 {
220 if (heap->start <= address && address <= heap->end)
221 return heap;
222 }
223
224 return NIL_HEAP;
225}
226
227/* Find SIZE bytes of space in a heap.
228 Try to get them at ADDRESS (which must fall within some heap's range)
229 if we can get that many within one heap.
230
e429caa2 231 If enough space is not presently available in our reserve, this means
8e6208c5
KH
232 getting more page-aligned space from the system. If the returned space
233 is not contiguous to the last heap, allocate a new heap, and append it
abe9ff32
RS
234
235 obtain does not try to keep track of whether space is in use
236 or not in use. It just returns the address of SIZE bytes that
237 fall within a single heap. If you call obtain twice in a row
238 with the same arguments, you typically get the same value.
239 to the heap list. It's the caller's responsibility to keep
240 track of what space is in use.
dcfdbac7 241
e429caa2
KH
242 Return the address of the space if all went well, or zero if we couldn't
243 allocate the memory. */
abe9ff32 244
e429caa2
KH
245static POINTER
246obtain (address, size)
247 POINTER address;
248 SIZE size;
dcfdbac7 249{
e429caa2
KH
250 heap_ptr heap;
251 SIZE already_available;
dcfdbac7 252
abe9ff32 253 /* Find the heap that ADDRESS falls within. */
e429caa2 254 for (heap = last_heap; heap; heap = heap->prev)
dcfdbac7 255 {
e429caa2
KH
256 if (heap->start <= address && address <= heap->end)
257 break;
258 }
dcfdbac7 259
e429caa2 260 if (! heap)
abe9ff32 261 abort ();
dcfdbac7 262
abe9ff32
RS
263 /* If we can't fit SIZE bytes in that heap,
264 try successive later heaps. */
91a211b5 265 while (heap && (char *) address + size > (char *) heap->end)
e429caa2
KH
266 {
267 heap = heap->next;
268 if (heap == NIL_HEAP)
269 break;
270 address = heap->bloc_start;
dcfdbac7
JB
271 }
272
abe9ff32
RS
273 /* If we can't fit them within any existing heap,
274 get more space. */
e429caa2
KH
275 if (heap == NIL_HEAP)
276 {
277 POINTER new = (*real_morecore)(0);
278 SIZE get;
98b7fe02 279
e429caa2 280 already_available = (char *)last_heap->end - (char *)address;
dcfdbac7 281
e429caa2
KH
282 if (new != last_heap->end)
283 {
abe9ff32
RS
284 /* Someone else called sbrk. Make a new heap. */
285
286 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
287 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
e429caa2 288
91a211b5 289 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
e429caa2
KH
290 return 0;
291
292 new_heap->start = new;
293 new_heap->end = bloc_start;
294 new_heap->bloc_start = bloc_start;
abe9ff32 295 new_heap->free = bloc_start;
e429caa2
KH
296 new_heap->next = NIL_HEAP;
297 new_heap->prev = last_heap;
47f13333
RS
298 new_heap->first_bloc = NIL_BLOC;
299 new_heap->last_bloc = NIL_BLOC;
e429caa2
KH
300 last_heap->next = new_heap;
301 last_heap = new_heap;
302
303 address = bloc_start;
304 already_available = 0;
305 }
dcfdbac7 306
abe9ff32
RS
307 /* Add space to the last heap (which we may have just created).
308 Get some extra, so we can come here less often. */
309
e429caa2 310 get = size + extra_bytes - already_available;
abe9ff32 311 get = (char *) ROUNDUP ((char *)last_heap->end + get)
e429caa2 312 - (char *) last_heap->end;
dcfdbac7 313
e429caa2
KH
314 if ((*real_morecore) (get) != last_heap->end)
315 return 0;
316
91a211b5 317 last_heap->end = (char *) last_heap->end + get;
e429caa2
KH
318 }
319
320 return address;
321}
dcfdbac7 322
abe9ff32
RS
323/* Return unused heap space to the system
324 if there is a lot of unused space now.
325 This can make the last heap smaller;
326 it can also eliminate the last heap entirely. */
327
dcfdbac7 328static void
e429caa2 329relinquish ()
dcfdbac7 330{
e429caa2 331 register heap_ptr h;
8d31e373 332 long excess = 0;
e429caa2 333
abe9ff32
RS
334 /* Add the amount of space beyond break_value
335 in all heaps which have extend beyond break_value at all. */
336
e429caa2
KH
337 for (h = last_heap; h && break_value < h->end; h = h->prev)
338 {
339 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
340 ? h->bloc_start : break_value);
341 }
342
343 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
dcfdbac7 344 {
7516b7d5
RS
345 /* Keep extra_bytes worth of empty space.
346 And don't free anything unless we can free at least extra_bytes. */
e429caa2 347 excess -= extra_bytes;
dcfdbac7 348
e429caa2
KH
349 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
350 {
47f13333
RS
351 /* This heap should have no blocs in it. */
352 if (last_heap->first_bloc != NIL_BLOC
353 || last_heap->last_bloc != NIL_BLOC)
354 abort ();
355
abe9ff32 356 /* Return the last heap, with its header, to the system. */
e429caa2
KH
357 excess = (char *)last_heap->end - (char *)last_heap->start;
358 last_heap = last_heap->prev;
359 last_heap->next = NIL_HEAP;
360 }
361 else
362 {
363 excess = (char *) last_heap->end
abe9ff32 364 - (char *) ROUNDUP ((char *)last_heap->end - excess);
91a211b5 365 last_heap->end = (char *) last_heap->end - excess;
e429caa2 366 }
dcfdbac7 367
e429caa2 368 if ((*real_morecore) (- excess) == 0)
21532667
KH
369 {
370 /* If the system didn't want that much memory back, adjust
371 the end of the last heap to reflect that. This can occur
372 if break_value is still within the original data segment. */
91a211b5 373 last_heap->end = (char *) last_heap->end + excess;
21532667
KH
374 /* Make sure that the result of the adjustment is accurate.
375 It should be, for the else clause above; the other case,
376 which returns the entire last heap to the system, seems
377 unlikely to trigger this mode of failure. */
378 if (last_heap->end != (*real_morecore) (0))
379 abort ();
380 }
e429caa2 381 }
dcfdbac7 382}
719b242f
RS
383
384/* Return the total size in use by relocating allocator,
385 above where malloc gets space. */
386
387long
388r_alloc_size_in_use ()
389{
91a211b5 390 return (char *) break_value - (char *) virtual_break_value;
719b242f 391}
dcfdbac7 392\f
956ace37
JB
393/* The meat - allocating, freeing, and relocating blocs. */
394
956ace37 395/* Find the bloc referenced by the address in PTR. Returns a pointer
abe9ff32 396 to that block. */
dcfdbac7
JB
397
398static bloc_ptr
399find_bloc (ptr)
400 POINTER *ptr;
401{
402 register bloc_ptr p = first_bloc;
403
404 while (p != NIL_BLOC)
405 {
747d9d14
JR
406 /* Consistency check. Don't return inconsistent blocs.
407 Don't abort here, as callers might be expecting this, but
408 callers that always expect a bloc to be returned should abort
409 if one isn't to avoid a memory corruption bug that is
410 difficult to track down. */
dcfdbac7
JB
411 if (p->variable == ptr && p->data == *ptr)
412 return p;
413
414 p = p->next;
415 }
416
417 return p;
418}
419
420/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
98b7fe02
JB
421 Returns a pointer to the new bloc, or zero if we couldn't allocate
422 memory for the new block. */
dcfdbac7
JB
423
424static bloc_ptr
425get_bloc (size)
426 SIZE size;
427{
98b7fe02 428 register bloc_ptr new_bloc;
abe9ff32 429 register heap_ptr heap;
98b7fe02
JB
430
431 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
e429caa2 432 || ! (new_bloc->data = obtain (break_value, size)))
98b7fe02 433 {
c2cd06e6 434 free (new_bloc);
98b7fe02
JB
435
436 return 0;
437 }
dcfdbac7 438
91a211b5 439 break_value = (char *) new_bloc->data + size;
e429caa2 440
dcfdbac7
JB
441 new_bloc->size = size;
442 new_bloc->next = NIL_BLOC;
8c7f1e35 443 new_bloc->variable = (POINTER *) NIL;
e429caa2 444 new_bloc->new_data = 0;
dcfdbac7 445
abe9ff32
RS
446 /* Record in the heap that this space is in use. */
447 heap = find_heap (new_bloc->data);
448 heap->free = break_value;
449
47f13333
RS
450 /* Maintain the correspondence between heaps and blocs. */
451 new_bloc->heap = heap;
452 heap->last_bloc = new_bloc;
453 if (heap->first_bloc == NIL_BLOC)
454 heap->first_bloc = new_bloc;
455
abe9ff32 456 /* Put this bloc on the doubly-linked list of blocs. */
dcfdbac7
JB
457 if (first_bloc)
458 {
459 new_bloc->prev = last_bloc;
460 last_bloc->next = new_bloc;
461 last_bloc = new_bloc;
462 }
463 else
464 {
465 first_bloc = last_bloc = new_bloc;
466 new_bloc->prev = NIL_BLOC;
467 }
468
469 return new_bloc;
470}
47f13333 471\f
abe9ff32
RS
472/* Calculate new locations of blocs in the list beginning with BLOC,
473 relocating it to start at ADDRESS, in heap HEAP. If enough space is
474 not presently available in our reserve, call obtain for
177c0ea7
JB
475 more space.
476
abe9ff32
RS
477 Store the new location of each bloc in its new_data field.
478 Do not touch the contents of blocs or break_value. */
dcfdbac7 479
e429caa2
KH
480static int
481relocate_blocs (bloc, heap, address)
482 bloc_ptr bloc;
483 heap_ptr heap;
484 POINTER address;
485{
486 register bloc_ptr b = bloc;
ad3bb3d2 487
49f82b3d 488 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 489 if (r_alloc_freeze_level)
49f82b3d
RS
490 abort();
491
e429caa2
KH
492 while (b)
493 {
abe9ff32
RS
494 /* If bloc B won't fit within HEAP,
495 move to the next heap and try again. */
91a211b5 496 while (heap && (char *) address + b->size > (char *) heap->end)
e429caa2
KH
497 {
498 heap = heap->next;
499 if (heap == NIL_HEAP)
500 break;
501 address = heap->bloc_start;
502 }
dcfdbac7 503
abe9ff32
RS
504 /* If BLOC won't fit in any heap,
505 get enough new space to hold BLOC and all following blocs. */
e429caa2
KH
506 if (heap == NIL_HEAP)
507 {
508 register bloc_ptr tb = b;
509 register SIZE s = 0;
510
abe9ff32 511 /* Add up the size of all the following blocs. */
e429caa2
KH
512 while (tb != NIL_BLOC)
513 {
177c0ea7 514 if (tb->variable)
49f82b3d
RS
515 s += tb->size;
516
e429caa2
KH
517 tb = tb->next;
518 }
519
abe9ff32
RS
520 /* Get that space. */
521 address = obtain (address, s);
522 if (address == 0)
e429caa2
KH
523 return 0;
524
525 heap = last_heap;
526 }
527
abe9ff32
RS
528 /* Record the new address of this bloc
529 and update where the next bloc can start. */
e429caa2 530 b->new_data = address;
177c0ea7 531 if (b->variable)
91a211b5 532 address = (char *) address + b->size;
e429caa2
KH
533 b = b->next;
534 }
535
536 return 1;
537}
538
47f13333
RS
539/* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
540 This is necessary if we put the memory of space of BLOC
541 before that of BEFORE. */
542
543static void
544reorder_bloc (bloc, before)
545 bloc_ptr bloc, before;
546{
547 bloc_ptr prev, next;
548
549 /* Splice BLOC out from where it is. */
550 prev = bloc->prev;
551 next = bloc->next;
552
553 if (prev)
554 prev->next = next;
555 if (next)
556 next->prev = prev;
557
558 /* Splice it in before BEFORE. */
559 prev = before->prev;
abe9ff32 560
47f13333
RS
561 if (prev)
562 prev->next = bloc;
563 bloc->prev = prev;
564
565 before->prev = bloc;
566 bloc->next = before;
567}
568\f
569/* Update the records of which heaps contain which blocs, starting
570 with heap HEAP and bloc BLOC. */
571
572static void
573update_heap_bloc_correspondence (bloc, heap)
abe9ff32
RS
574 bloc_ptr bloc;
575 heap_ptr heap;
576{
577 register bloc_ptr b;
578
47f13333
RS
579 /* Initialize HEAP's status to reflect blocs before BLOC. */
580 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
581 {
582 /* The previous bloc is in HEAP. */
583 heap->last_bloc = bloc->prev;
91a211b5 584 heap->free = (char *) bloc->prev->data + bloc->prev->size;
47f13333
RS
585 }
586 else
587 {
588 /* HEAP contains no blocs before BLOC. */
589 heap->first_bloc = NIL_BLOC;
590 heap->last_bloc = NIL_BLOC;
591 heap->free = heap->bloc_start;
592 }
593
abe9ff32
RS
594 /* Advance through blocs one by one. */
595 for (b = bloc; b != NIL_BLOC; b = b->next)
596 {
47f13333
RS
597 /* Advance through heaps, marking them empty,
598 till we get to the one that B is in. */
abe9ff32
RS
599 while (heap)
600 {
601 if (heap->bloc_start <= b->data && b->data <= heap->end)
602 break;
603 heap = heap->next;
47f13333
RS
604 /* We know HEAP is not null now,
605 because there has to be space for bloc B. */
606 heap->first_bloc = NIL_BLOC;
607 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
608 heap->free = heap->bloc_start;
609 }
47f13333
RS
610
611 /* Update HEAP's status for bloc B. */
91a211b5 612 heap->free = (char *) b->data + b->size;
47f13333
RS
613 heap->last_bloc = b;
614 if (heap->first_bloc == NIL_BLOC)
615 heap->first_bloc = b;
616
617 /* Record that B is in HEAP. */
618 b->heap = heap;
abe9ff32
RS
619 }
620
621 /* If there are any remaining heaps and no blocs left,
47f13333 622 mark those heaps as empty. */
abe9ff32
RS
623 heap = heap->next;
624 while (heap)
625 {
47f13333
RS
626 heap->first_bloc = NIL_BLOC;
627 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
628 heap->free = heap->bloc_start;
629 heap = heap->next;
630 }
631}
47f13333 632\f
abe9ff32
RS
633/* Resize BLOC to SIZE bytes. This relocates the blocs
634 that come after BLOC in memory. */
635
e429caa2
KH
636static int
637resize_bloc (bloc, size)
638 bloc_ptr bloc;
639 SIZE size;
dcfdbac7 640{
e429caa2
KH
641 register bloc_ptr b;
642 heap_ptr heap;
643 POINTER address;
644 SIZE old_size;
645
49f82b3d 646 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 647 if (r_alloc_freeze_level)
49f82b3d
RS
648 abort();
649
e429caa2
KH
650 if (bloc == NIL_BLOC || size == bloc->size)
651 return 1;
652
653 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
654 {
655 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
656 break;
657 }
658
659 if (heap == NIL_HEAP)
abe9ff32 660 abort ();
e429caa2
KH
661
662 old_size = bloc->size;
663 bloc->size = size;
664
abe9ff32 665 /* Note that bloc could be moved into the previous heap. */
91a211b5
GM
666 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
667 : (char *) first_heap->bloc_start);
e429caa2
KH
668 while (heap)
669 {
670 if (heap->bloc_start <= address && address <= heap->end)
671 break;
672 heap = heap->prev;
673 }
674
675 if (! relocate_blocs (bloc, heap, address))
676 {
677 bloc->size = old_size;
678 return 0;
679 }
680
681 if (size > old_size)
682 {
683 for (b = last_bloc; b != bloc; b = b->prev)
684 {
49f82b3d
RS
685 if (!b->variable)
686 {
687 b->size = 0;
688 b->data = b->new_data;
177c0ea7
JB
689 }
690 else
49f82b3d
RS
691 {
692 safe_bcopy (b->data, b->new_data, b->size);
693 *b->variable = b->data = b->new_data;
694 }
695 }
696 if (!bloc->variable)
697 {
698 bloc->size = 0;
699 bloc->data = bloc->new_data;
700 }
701 else
702 {
703 safe_bcopy (bloc->data, bloc->new_data, old_size);
91a211b5 704 bzero ((char *) bloc->new_data + old_size, size - old_size);
49f82b3d 705 *bloc->variable = bloc->data = bloc->new_data;
e429caa2 706 }
e429caa2
KH
707 }
708 else
dcfdbac7 709 {
ad3bb3d2
JB
710 for (b = bloc; b != NIL_BLOC; b = b->next)
711 {
49f82b3d
RS
712 if (!b->variable)
713 {
714 b->size = 0;
715 b->data = b->new_data;
177c0ea7
JB
716 }
717 else
49f82b3d
RS
718 {
719 safe_bcopy (b->data, b->new_data, b->size);
720 *b->variable = b->data = b->new_data;
721 }
ad3bb3d2 722 }
ad3bb3d2 723 }
dcfdbac7 724
47f13333 725 update_heap_bloc_correspondence (bloc, heap);
abe9ff32 726
91a211b5
GM
727 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
728 : (char *) first_heap->bloc_start);
e429caa2
KH
729 return 1;
730}
47f13333 731\f
abe9ff32
RS
732/* Free BLOC from the chain of blocs, relocating any blocs above it.
733 This may return space to the system. */
dcfdbac7
JB
734
735static void
736free_bloc (bloc)
737 bloc_ptr bloc;
738{
47f13333
RS
739 heap_ptr heap = bloc->heap;
740
49f82b3d
RS
741 if (r_alloc_freeze_level)
742 {
743 bloc->variable = (POINTER *) NIL;
744 return;
745 }
177c0ea7 746
e429caa2
KH
747 resize_bloc (bloc, 0);
748
dcfdbac7
JB
749 if (bloc == first_bloc && bloc == last_bloc)
750 {
751 first_bloc = last_bloc = NIL_BLOC;
752 }
753 else if (bloc == last_bloc)
754 {
755 last_bloc = bloc->prev;
756 last_bloc->next = NIL_BLOC;
757 }
758 else if (bloc == first_bloc)
759 {
760 first_bloc = bloc->next;
761 first_bloc->prev = NIL_BLOC;
dcfdbac7
JB
762 }
763 else
764 {
765 bloc->next->prev = bloc->prev;
766 bloc->prev->next = bloc->next;
dcfdbac7
JB
767 }
768
47f13333
RS
769 /* Update the records of which blocs are in HEAP. */
770 if (heap->first_bloc == bloc)
771 {
d5179acc 772 if (bloc->next != 0 && bloc->next->heap == heap)
47f13333
RS
773 heap->first_bloc = bloc->next;
774 else
775 heap->first_bloc = heap->last_bloc = NIL_BLOC;
776 }
777 if (heap->last_bloc == bloc)
778 {
d5179acc 779 if (bloc->prev != 0 && bloc->prev->heap == heap)
47f13333
RS
780 heap->last_bloc = bloc->prev;
781 else
782 heap->first_bloc = heap->last_bloc = NIL_BLOC;
783 }
784
e429caa2 785 relinquish ();
dcfdbac7
JB
786 free (bloc);
787}
788\f
956ace37
JB
789/* Interface routines. */
790
98b7fe02 791/* Obtain SIZE bytes of storage from the free pool, or the system, as
2c46d29f 792 necessary. If relocatable blocs are in use, this means relocating
98b7fe02
JB
793 them. This function gets plugged into the GNU malloc's __morecore
794 hook.
795
7516b7d5
RS
796 We provide hysteresis, never relocating by less than extra_bytes.
797
98b7fe02
JB
798 If we're out of memory, we should return zero, to imitate the other
799 __morecore hook values - in particular, __default_morecore in the
800 GNU malloc package. */
dcfdbac7 801
177c0ea7 802POINTER
dcfdbac7
JB
803r_alloc_sbrk (size)
804 long size;
805{
e429caa2
KH
806 register bloc_ptr b;
807 POINTER address;
dcfdbac7 808
44d3dec0
RS
809 if (! r_alloc_initialized)
810 r_alloc_init ();
811
dcfdbac7 812 if (! use_relocatable_buffers)
bbc60227 813 return (*real_morecore) (size);
dcfdbac7 814
e429caa2
KH
815 if (size == 0)
816 return virtual_break_value;
7516b7d5 817
e429caa2 818 if (size > 0)
dcfdbac7 819 {
abe9ff32
RS
820 /* Allocate a page-aligned space. GNU malloc would reclaim an
821 extra space if we passed an unaligned one. But we could
8e6208c5 822 not always find a space which is contiguous to the previous. */
e429caa2
KH
823 POINTER new_bloc_start;
824 heap_ptr h = first_heap;
abe9ff32 825 SIZE get = ROUNDUP (size);
7516b7d5 826
abe9ff32 827 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 828
abe9ff32
RS
829 /* Search the list upward for a heap which is large enough. */
830 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
e429caa2
KH
831 {
832 h = h->next;
833 if (h == NIL_HEAP)
834 break;
abe9ff32 835 address = (POINTER) ROUNDUP (h->start);
e429caa2
KH
836 }
837
abe9ff32 838 /* If not found, obtain more space. */
e429caa2
KH
839 if (h == NIL_HEAP)
840 {
841 get += extra_bytes + page_size;
842
49f82b3d 843 if (! obtain (address, get))
e429caa2 844 return 0;
98b7fe02 845
e429caa2 846 if (first_heap == last_heap)
abe9ff32 847 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 848 else
abe9ff32 849 address = (POINTER) ROUNDUP (last_heap->start);
e429caa2
KH
850 h = last_heap;
851 }
852
abe9ff32 853 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
e429caa2
KH
854
855 if (first_heap->bloc_start < new_bloc_start)
856 {
49f82b3d 857 /* This is no clean solution - no idea how to do it better. */
177c0ea7 858 if (r_alloc_freeze_level)
49f82b3d
RS
859 return NIL;
860
861 /* There is a bug here: if the above obtain call succeeded, but the
862 relocate_blocs call below does not succeed, we need to free
863 the memory that we got with obtain. */
864
abe9ff32 865 /* Move all blocs upward. */
49f82b3d 866 if (! relocate_blocs (first_bloc, h, new_bloc_start))
e429caa2
KH
867 return 0;
868
869 /* Note that (POINTER)(h+1) <= new_bloc_start since
870 get >= page_size, so the following does not destroy the heap
abe9ff32 871 header. */
e429caa2
KH
872 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
873 {
874 safe_bcopy (b->data, b->new_data, b->size);
875 *b->variable = b->data = b->new_data;
876 }
877
878 h->bloc_start = new_bloc_start;
abe9ff32 879
47f13333 880 update_heap_bloc_correspondence (first_bloc, h);
e429caa2 881 }
e429caa2
KH
882 if (h != first_heap)
883 {
884 /* Give up managing heaps below the one the new
abe9ff32 885 virtual_break_value points to. */
e429caa2
KH
886 first_heap->prev = NIL_HEAP;
887 first_heap->next = h->next;
888 first_heap->start = h->start;
889 first_heap->end = h->end;
abe9ff32 890 first_heap->free = h->free;
47f13333
RS
891 first_heap->first_bloc = h->first_bloc;
892 first_heap->last_bloc = h->last_bloc;
e429caa2
KH
893 first_heap->bloc_start = h->bloc_start;
894
895 if (first_heap->next)
896 first_heap->next->prev = first_heap;
897 else
898 last_heap = first_heap;
899 }
900
901 bzero (address, size);
dcfdbac7 902 }
e429caa2 903 else /* size < 0 */
dcfdbac7 904 {
e429caa2
KH
905 SIZE excess = (char *)first_heap->bloc_start
906 - ((char *)virtual_break_value + size);
907
908 address = virtual_break_value;
909
910 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
911 {
912 excess -= extra_bytes;
913 first_heap->bloc_start
47f13333 914 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
e429caa2 915
abe9ff32 916 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
7516b7d5 917
e429caa2
KH
918 for (b = first_bloc; b != NIL_BLOC; b = b->next)
919 {
920 safe_bcopy (b->data, b->new_data, b->size);
921 *b->variable = b->data = b->new_data;
922 }
923 }
924
925 if ((char *)virtual_break_value + size < (char *)first_heap->start)
926 {
927 /* We found an additional space below the first heap */
928 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
929 }
dcfdbac7
JB
930 }
931
e429caa2 932 virtual_break_value = (POINTER) ((char *)address + size);
47f13333 933 break_value = (last_bloc
91a211b5
GM
934 ? (char *) last_bloc->data + last_bloc->size
935 : (char *) first_heap->bloc_start);
e429caa2 936 if (size < 0)
abe9ff32 937 relinquish ();
7516b7d5 938
e429caa2 939 return address;
dcfdbac7
JB
940}
941
0a58f946 942
dcfdbac7
JB
943/* Allocate a relocatable bloc of storage of size SIZE. A pointer to
944 the data is returned in *PTR. PTR is thus the address of some variable
98b7fe02
JB
945 which will use the data area.
946
49f82b3d 947 The allocation of 0 bytes is valid.
f96f2c5b
JB
948 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
949 done before allocating a new area. Not yet done.
49f82b3d 950
98b7fe02
JB
951 If we can't allocate the necessary memory, set *PTR to zero, and
952 return zero. */
dcfdbac7
JB
953
954POINTER
955r_alloc (ptr, size)
956 POINTER *ptr;
957 SIZE size;
958{
959 register bloc_ptr new_bloc;
960
2c46d29f
RS
961 if (! r_alloc_initialized)
962 r_alloc_init ();
963
abe9ff32 964 new_bloc = get_bloc (MEM_ROUNDUP (size));
98b7fe02
JB
965 if (new_bloc)
966 {
967 new_bloc->variable = ptr;
968 *ptr = new_bloc->data;
969 }
970 else
971 *ptr = 0;
dcfdbac7
JB
972
973 return *ptr;
974}
975
2c46d29f
RS
976/* Free a bloc of relocatable storage whose data is pointed to by PTR.
977 Store 0 in *PTR to show there's no block allocated. */
dcfdbac7
JB
978
979void
980r_alloc_free (ptr)
981 register POINTER *ptr;
982{
983 register bloc_ptr dead_bloc;
984
44d3dec0
RS
985 if (! r_alloc_initialized)
986 r_alloc_init ();
987
dcfdbac7
JB
988 dead_bloc = find_bloc (ptr);
989 if (dead_bloc == NIL_BLOC)
747d9d14 990 abort (); /* Double free? PTR not originally used to allocate? */
dcfdbac7
JB
991
992 free_bloc (dead_bloc);
2c46d29f 993 *ptr = 0;
719b242f 994
d5179acc 995#ifdef emacs
719b242f 996 refill_memory_reserve ();
d5179acc 997#endif
dcfdbac7
JB
998}
999
16a5c729 1000/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
98b7fe02
JB
1001 Do this by shifting all blocks above this one up in memory, unless
1002 SIZE is less than or equal to the current bloc size, in which case
1003 do nothing.
dcfdbac7 1004
f96f2c5b 1005 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
8e6208c5 1006 memory copied to it. Not very efficient. We could traverse the
49f82b3d
RS
1007 bloc_list for a best fit of free blocs first.
1008
98b7fe02
JB
1009 Change *PTR to reflect the new bloc, and return this value.
1010
1011 If more memory cannot be allocated, then leave *PTR unchanged, and
1012 return zero. */
dcfdbac7
JB
1013
1014POINTER
1015r_re_alloc (ptr, size)
1016 POINTER *ptr;
1017 SIZE size;
1018{
16a5c729 1019 register bloc_ptr bloc;
dcfdbac7 1020
44d3dec0
RS
1021 if (! r_alloc_initialized)
1022 r_alloc_init ();
1023
49f82b3d
RS
1024 if (!*ptr)
1025 return r_alloc (ptr, size);
177c0ea7 1026 if (!size)
49f82b3d
RS
1027 {
1028 r_alloc_free (ptr);
1029 return r_alloc (ptr, 0);
1030 }
1031
16a5c729
JB
1032 bloc = find_bloc (ptr);
1033 if (bloc == NIL_BLOC)
747d9d14 1034 abort (); /* Already freed? PTR not originally used to allocate? */
dcfdbac7 1035
177c0ea7 1036 if (size < bloc->size)
49f82b3d
RS
1037 {
1038 /* Wouldn't it be useful to actually resize the bloc here? */
1039 /* I think so too, but not if it's too expensive... */
177c0ea7
JB
1040 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1041 && r_alloc_freeze_level == 0)
49f82b3d
RS
1042 {
1043 resize_bloc (bloc, MEM_ROUNDUP (size));
1044 /* Never mind if this fails, just do nothing... */
1045 /* It *should* be infallible! */
1046 }
1047 }
1048 else if (size > bloc->size)
1049 {
1050 if (r_alloc_freeze_level)
1051 {
1052 bloc_ptr new_bloc;
1053 new_bloc = get_bloc (MEM_ROUNDUP (size));
1054 if (new_bloc)
1055 {
1056 new_bloc->variable = ptr;
1057 *ptr = new_bloc->data;
1058 bloc->variable = (POINTER *) NIL;
1059 }
1060 else
1061 return NIL;
1062 }
177c0ea7 1063 else
49f82b3d
RS
1064 {
1065 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1066 return NIL;
1067 }
1068 }
dcfdbac7
JB
1069 return *ptr;
1070}
81bd58e8
KH
1071
1072/* Disable relocations, after making room for at least SIZE bytes
1073 of non-relocatable heap if possible. The relocatable blocs are
1074 guaranteed to hold still until thawed, even if this means that
1075 malloc must return a null pointer. */
abe9ff32 1076
81bd58e8
KH
1077void
1078r_alloc_freeze (size)
1079 long size;
1080{
44d3dec0
RS
1081 if (! r_alloc_initialized)
1082 r_alloc_init ();
1083
81bd58e8
KH
1084 /* If already frozen, we can't make any more room, so don't try. */
1085 if (r_alloc_freeze_level > 0)
1086 size = 0;
1087 /* If we can't get the amount requested, half is better than nothing. */
1088 while (size > 0 && r_alloc_sbrk (size) == 0)
1089 size /= 2;
1090 ++r_alloc_freeze_level;
1091 if (size > 0)
1092 r_alloc_sbrk (-size);
1093}
1094
1095void
1096r_alloc_thaw ()
1097{
49f82b3d 1098
177c0ea7 1099 if (! r_alloc_initialized)
49f82b3d
RS
1100 r_alloc_init ();
1101
81bd58e8
KH
1102 if (--r_alloc_freeze_level < 0)
1103 abort ();
49f82b3d 1104
177c0ea7
JB
1105 /* This frees all unused blocs. It is not too inefficient, as the resize
1106 and bcopy is done only once. Afterwards, all unreferenced blocs are
49f82b3d 1107 already shrunk to zero size. */
177c0ea7 1108 if (!r_alloc_freeze_level)
49f82b3d
RS
1109 {
1110 bloc_ptr *b = &first_bloc;
177c0ea7
JB
1111 while (*b)
1112 if (!(*b)->variable)
1113 free_bloc (*b);
1114 else
49f82b3d
RS
1115 b = &(*b)->next;
1116 }
81bd58e8 1117}
49f82b3d 1118
dec41418
RS
1119
1120#if defined (emacs) && defined (DOUG_LEA_MALLOC)
1121
1122/* Reinitialize the morecore hook variables after restarting a dumped
1123 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1124void
1125r_alloc_reinit ()
1126{
1127 /* Only do this if the hook has been reset, so that we don't get an
1128 infinite loop, in case Emacs was linked statically. */
1129 if (__morecore != r_alloc_sbrk)
1130 {
1131 real_morecore = __morecore;
1132 __morecore = r_alloc_sbrk;
1133 }
1134}
0a58f946
GM
1135
1136#endif /* emacs && DOUG_LEA_MALLOC */
dec41418 1137
e429caa2 1138#ifdef DEBUG
0a58f946 1139
e429caa2
KH
1140#include <assert.h>
1141
44d3dec0 1142void
e429caa2
KH
1143r_alloc_check ()
1144{
6d16dd06
RS
1145 int found = 0;
1146 heap_ptr h, ph = 0;
1147 bloc_ptr b, pb = 0;
1148
1149 if (!r_alloc_initialized)
1150 return;
1151
1152 assert (first_heap);
1153 assert (last_heap->end <= (POINTER) sbrk (0));
1154 assert ((POINTER) first_heap < first_heap->start);
1155 assert (first_heap->start <= virtual_break_value);
1156 assert (virtual_break_value <= first_heap->end);
1157
1158 for (h = first_heap; h; h = h->next)
1159 {
1160 assert (h->prev == ph);
1161 assert ((POINTER) ROUNDUP (h->end) == h->end);
40f3f04b
RS
1162#if 0 /* ??? The code in ralloc.c does not really try to ensure
1163 the heap start has any sort of alignment.
1164 Perhaps it should. */
6d16dd06 1165 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
40f3f04b 1166#endif
6d16dd06
RS
1167 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1168 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1169
1170 if (ph)
1171 {
1172 assert (ph->end < h->start);
1173 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1174 }
1175
1176 if (h->bloc_start <= break_value && break_value <= h->end)
1177 found = 1;
1178
1179 ph = h;
1180 }
1181
1182 assert (found);
1183 assert (last_heap == ph);
1184
1185 for (b = first_bloc; b; b = b->next)
1186 {
1187 assert (b->prev == pb);
1188 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1189 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1190
1191 ph = 0;
1192 for (h = first_heap; h; h = h->next)
1193 {
1194 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1195 break;
1196 ph = h;
1197 }
1198
1199 assert (h);
1200
1201 if (pb && pb->data + pb->size != b->data)
1202 {
1203 assert (ph && b->data == h->bloc_start);
1204 while (ph)
1205 {
1206 if (ph->bloc_start <= pb->data
1207 && pb->data + pb->size <= ph->end)
1208 {
1209 assert (pb->data + pb->size + b->size > ph->end);
1210 break;
1211 }
1212 else
1213 {
1214 assert (ph->bloc_start + b->size > ph->end);
1215 }
1216 ph = ph->prev;
1217 }
1218 }
1219 pb = b;
1220 }
1221
1222 assert (last_bloc == pb);
1223
1224 if (last_bloc)
1225 assert (last_bloc->data + last_bloc->size == break_value);
1226 else
1227 assert (first_heap->bloc_start == break_value);
e429caa2 1228}
0a58f946 1229
e429caa2 1230#endif /* DEBUG */
0a58f946 1231
baae5c2d
JR
1232/* Update the internal record of which variable points to some data to NEW.
1233 Used by buffer-swap-text in Emacs to restore consistency after it
1234 swaps the buffer text between two buffer objects. The OLD pointer
1235 is checked to ensure that memory corruption does not occur due to
1236 misuse. */
1237void
1238r_alloc_reset_variable (old, new)
1239 POINTER *old, *new;
1240{
1241 bloc_ptr bloc = first_bloc;
1242
1243 /* Find the bloc that corresponds to the data pointed to by pointer.
1244 find_bloc cannot be used, as it has internal consistency checks
1245 which fail when the variable needs reseting. */
1246 while (bloc != NIL_BLOC)
1247 {
1248 if (bloc->data == *new)
1249 break;
1250
1251 bloc = bloc->next;
1252 }
1253
1254 if (bloc == NIL_BLOC || bloc->variable != old)
747d9d14 1255 abort (); /* Already freed? OLD not originally used to allocate? */
baae5c2d
JR
1256
1257 /* Update variable to point to the new location. */
1258 bloc->variable = new;
1259}
0a58f946
GM
1260
1261\f
1262/***********************************************************************
1263 Initialization
1264 ***********************************************************************/
1265
0a58f946
GM
1266/* Initialize various things for memory allocation. */
1267
1268static void
1269r_alloc_init ()
1270{
1271 if (r_alloc_initialized)
1272 return;
0a58f946 1273 r_alloc_initialized = 1;
177c0ea7 1274
a2c23c92
DL
1275 page_size = PAGE;
1276#ifndef SYSTEM_MALLOC
0a58f946
GM
1277 real_morecore = __morecore;
1278 __morecore = r_alloc_sbrk;
1279
1280 first_heap = last_heap = &heap_base;
1281 first_heap->next = first_heap->prev = NIL_HEAP;
1282 first_heap->start = first_heap->bloc_start
1283 = virtual_break_value = break_value = (*real_morecore) (0);
1284 if (break_value == NIL)
1285 abort ();
1286
0a58f946 1287 extra_bytes = ROUNDUP (50000);
a2c23c92 1288#endif
0a58f946
GM
1289
1290#ifdef DOUG_LEA_MALLOC
1673df2e
JD
1291 BLOCK_INPUT;
1292 mallopt (M_TOP_PAD, 64 * 4096);
1293 UNBLOCK_INPUT;
0a58f946 1294#else
a2c23c92 1295#ifndef SYSTEM_MALLOC
0a58f946
GM
1296 /* Give GNU malloc's morecore some hysteresis
1297 so that we move all the relocatable blocks much less often. */
1298 __malloc_extra_blocks = 64;
1299#endif
a2c23c92 1300#endif
0a58f946 1301
5ad25b24 1302#ifndef SYSTEM_MALLOC
0a58f946
GM
1303 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1304
1305 /* The extra call to real_morecore guarantees that the end of the
1306 address space is a multiple of page_size, even if page_size is
1307 not really the page size of the system running the binary in
1308 which page_size is stored. This allows a binary to be built on a
1309 system with one page size and run on a system with a smaller page
1310 size. */
91a211b5 1311 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
0a58f946
GM
1312
1313 /* Clear the rest of the last page; this memory is in our address space
1314 even though it is after the sbrk value. */
1315 /* Doubly true, with the additional call that explicitly adds the
1316 rest of that page to the address space. */
91a211b5
GM
1317 bzero (first_heap->start,
1318 (char *) first_heap->end - (char *) first_heap->start);
0a58f946 1319 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
a2c23c92 1320#endif
177c0ea7 1321
0a58f946
GM
1322 use_relocatable_buffers = 1;
1323}
ab5796a9
MB
1324
1325/* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1326 (do not change this comment) */