* configure.in: Enable x86-64 OpenBSD compilation.
[bpt/emacs.git] / src / ralloc.c
CommitLineData
177c0ea7 1/* Block-relocating memory allocator.
0b5538bd 2 Copyright (C) 1993, 1995, 2000, 2002, 2003, 2004,
aaef169d 3 2005, 2006 Free Software Foundation, Inc.
dcfdbac7
JB
4
5This file is part of GNU Emacs.
6
7GNU Emacs is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
187996a8 9the Free Software Foundation; either version 2, or (at your option)
dcfdbac7
JB
10any later version.
11
12GNU Emacs is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU Emacs; see the file COPYING. If not, write to
4fc5845f
LK
19the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
20Boston, MA 02110-1301, USA. */
dcfdbac7
JB
21
22/* NOTES:
23
eb8c3be9 24 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
dcfdbac7 25 rather than all of them. This means allowing for a possible
abe9ff32 26 hole between the first bloc and the end of malloc storage. */
dcfdbac7 27
2c46d29f 28#ifdef emacs
aef4d570 29
18160b98 30#include <config.h>
956ace37 31#include "lisp.h" /* Needed for VALBITS. */
a4766fd5 32#include "blockinput.h"
0a58f946 33
642a1733
DL
34#ifdef HAVE_UNISTD_H
35#include <unistd.h>
36#endif
a8c0e5ea 37
0a58f946
GM
38typedef POINTER_TYPE *POINTER;
39typedef size_t SIZE;
f275fd9a 40
2c46d29f
RS
41/* Declared in dispnew.c, this version doesn't screw up if regions
42 overlap. */
0a58f946 43
2c46d29f 44extern void safe_bcopy ();
2c46d29f 45
b0119c68 46#ifdef DOUG_LEA_MALLOC
177c0ea7 47#define M_TOP_PAD -2
b0119c68 48extern int mallopt ();
0a58f946 49#else /* not DOUG_LEA_MALLOC */
a2c23c92 50#ifndef SYSTEM_MALLOC
b1685c5f 51extern size_t __malloc_extra_blocks;
a2c23c92 52#endif /* SYSTEM_MALLOC */
0a58f946 53#endif /* not DOUG_LEA_MALLOC */
49081834 54
d5179acc 55#else /* not emacs */
aef4d570 56
2c46d29f 57#include <stddef.h>
aef4d570 58
2c46d29f
RS
59typedef size_t SIZE;
60typedef void *POINTER;
aef4d570 61
aef4d570
RM
62#include <unistd.h>
63#include <malloc.h>
aef4d570 64
2c46d29f 65#define safe_bcopy(x, y, z) memmove (y, x, z)
d5179acc
RS
66#define bzero(x, len) memset (x, 0, len)
67
68#endif /* not emacs */
2c46d29f 69
0a58f946 70
d5179acc 71#include "getpagesize.h"
dcfdbac7
JB
72
73#define NIL ((POINTER) 0)
74
2c46d29f
RS
75/* A flag to indicate whether we have initialized ralloc yet. For
76 Emacs's sake, please do not make this local to malloc_init; on some
77 machines, the dumping procedure makes all static variables
78 read-only. On these machines, the word static is #defined to be
79 the empty string, meaning that r_alloc_initialized becomes an
0a58f946
GM
80 automatic variable, and loses its value each time Emacs is started
81 up. */
82
2c46d29f
RS
83static int r_alloc_initialized = 0;
84
85static void r_alloc_init ();
0a58f946 86
dcfdbac7 87\f
956ace37
JB
88/* Declarations for working with the malloc, ralloc, and system breaks. */
89
abe9ff32 90/* Function to set the real break value. */
321ed47b 91POINTER (*real_morecore) ();
dcfdbac7 92
abe9ff32 93/* The break value, as seen by malloc. */
dcfdbac7
JB
94static POINTER virtual_break_value;
95
abe9ff32
RS
96/* The address of the end of the last data in use by ralloc,
97 including relocatable blocs as well as malloc data. */
dcfdbac7
JB
98static POINTER break_value;
99
7516b7d5
RS
100/* This is the size of a page. We round memory requests to this boundary. */
101static int page_size;
102
177c0ea7 103/* Whenever we get memory from the system, get this many extra bytes. This
ad3bb3d2 104 must be a multiple of page_size. */
7516b7d5
RS
105static int extra_bytes;
106
dcfdbac7 107/* Macros for rounding. Note that rounding to any value is possible
abe9ff32 108 by changing the definition of PAGE. */
dcfdbac7 109#define PAGE (getpagesize ())
f7a009a5
RM
110#define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
111#define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
112 & ~(page_size - 1))
7516b7d5 113#define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
e429caa2
KH
114
115#define MEM_ALIGN sizeof(double)
116#define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
117 & ~(MEM_ALIGN - 1))
0a58f946 118
aeac019e
GM
119/* The hook `malloc' uses for the function which gets more space
120 from the system. */
121
122#ifndef SYSTEM_MALLOC
123extern POINTER (*__morecore) ();
124#endif
125
126
e429caa2 127\f
0a58f946
GM
128/***********************************************************************
129 Implementation using sbrk
130 ***********************************************************************/
131
abe9ff32
RS
132/* Data structures of heaps and blocs. */
133
134/* The relocatable objects, or blocs, and the malloc data
135 both reside within one or more heaps.
136 Each heap contains malloc data, running from `start' to `bloc_start',
137 and relocatable objects, running from `bloc_start' to `free'.
138
139 Relocatable objects may relocate within the same heap
140 or may move into another heap; the heaps themselves may grow
141 but they never move.
142
143 We try to make just one heap and make it larger as necessary.
8e6208c5 144 But sometimes we can't do that, because we can't get contiguous
abe9ff32 145 space to add onto the heap. When that happens, we start a new heap. */
177c0ea7 146
e429caa2
KH
147typedef struct heap
148{
149 struct heap *next;
150 struct heap *prev;
abe9ff32 151 /* Start of memory range of this heap. */
e429caa2 152 POINTER start;
abe9ff32 153 /* End of memory range of this heap. */
e429caa2 154 POINTER end;
abe9ff32
RS
155 /* Start of relocatable data in this heap. */
156 POINTER bloc_start;
157 /* Start of unused space in this heap. */
158 POINTER free;
47f13333
RS
159 /* First bloc in this heap. */
160 struct bp *first_bloc;
161 /* Last bloc in this heap. */
162 struct bp *last_bloc;
e429caa2
KH
163} *heap_ptr;
164
165#define NIL_HEAP ((heap_ptr) 0)
166#define HEAP_PTR_SIZE (sizeof (struct heap))
167
abe9ff32
RS
168/* This is the first heap object.
169 If we need additional heap objects, each one resides at the beginning of
170 the space it covers. */
171static struct heap heap_base;
172
173/* Head and tail of the list of heaps. */
e429caa2
KH
174static heap_ptr first_heap, last_heap;
175
176/* These structures are allocated in the malloc arena.
177 The linked list is kept in order of increasing '.data' members.
178 The data blocks abut each other; if b->next is non-nil, then
177c0ea7 179 b->data + b->size == b->next->data.
49f82b3d
RS
180
181 An element with variable==NIL denotes a freed block, which has not yet
182 been collected. They may only appear while r_alloc_freeze > 0, and will be
183 freed when the arena is thawed. Currently, these blocs are not reusable,
8e6208c5 184 while the arena is frozen. Very inefficient. */
49f82b3d 185
e429caa2
KH
186typedef struct bp
187{
188 struct bp *next;
189 struct bp *prev;
190 POINTER *variable;
191 POINTER data;
192 SIZE size;
8e6208c5 193 POINTER new_data; /* temporarily used for relocation */
49f82b3d 194 struct heap *heap; /* Heap this bloc is in. */
e429caa2
KH
195} *bloc_ptr;
196
197#define NIL_BLOC ((bloc_ptr) 0)
198#define BLOC_PTR_SIZE (sizeof (struct bp))
199
abe9ff32 200/* Head and tail of the list of relocatable blocs. */
e429caa2
KH
201static bloc_ptr first_bloc, last_bloc;
202
49f82b3d
RS
203static int use_relocatable_buffers;
204
205/* If >0, no relocation whatsoever takes place. */
206static int r_alloc_freeze_level;
207
dcfdbac7 208\f
956ace37
JB
209/* Functions to get and return memory from the system. */
210
abe9ff32
RS
211/* Find the heap that ADDRESS falls within. */
212
213static heap_ptr
214find_heap (address)
215 POINTER address;
216{
217 heap_ptr heap;
218
219 for (heap = last_heap; heap; heap = heap->prev)
220 {
221 if (heap->start <= address && address <= heap->end)
222 return heap;
223 }
224
225 return NIL_HEAP;
226}
227
228/* Find SIZE bytes of space in a heap.
229 Try to get them at ADDRESS (which must fall within some heap's range)
230 if we can get that many within one heap.
231
e429caa2 232 If enough space is not presently available in our reserve, this means
8e6208c5
KH
233 getting more page-aligned space from the system. If the returned space
234 is not contiguous to the last heap, allocate a new heap, and append it
abe9ff32
RS
235
236 obtain does not try to keep track of whether space is in use
237 or not in use. It just returns the address of SIZE bytes that
238 fall within a single heap. If you call obtain twice in a row
239 with the same arguments, you typically get the same value.
240 to the heap list. It's the caller's responsibility to keep
241 track of what space is in use.
dcfdbac7 242
e429caa2
KH
243 Return the address of the space if all went well, or zero if we couldn't
244 allocate the memory. */
abe9ff32 245
e429caa2
KH
246static POINTER
247obtain (address, size)
248 POINTER address;
249 SIZE size;
dcfdbac7 250{
e429caa2
KH
251 heap_ptr heap;
252 SIZE already_available;
dcfdbac7 253
abe9ff32 254 /* Find the heap that ADDRESS falls within. */
e429caa2 255 for (heap = last_heap; heap; heap = heap->prev)
dcfdbac7 256 {
e429caa2
KH
257 if (heap->start <= address && address <= heap->end)
258 break;
259 }
dcfdbac7 260
e429caa2 261 if (! heap)
abe9ff32 262 abort ();
dcfdbac7 263
abe9ff32
RS
264 /* If we can't fit SIZE bytes in that heap,
265 try successive later heaps. */
91a211b5 266 while (heap && (char *) address + size > (char *) heap->end)
e429caa2
KH
267 {
268 heap = heap->next;
269 if (heap == NIL_HEAP)
270 break;
271 address = heap->bloc_start;
dcfdbac7
JB
272 }
273
abe9ff32
RS
274 /* If we can't fit them within any existing heap,
275 get more space. */
e429caa2
KH
276 if (heap == NIL_HEAP)
277 {
278 POINTER new = (*real_morecore)(0);
279 SIZE get;
98b7fe02 280
e429caa2 281 already_available = (char *)last_heap->end - (char *)address;
dcfdbac7 282
e429caa2
KH
283 if (new != last_heap->end)
284 {
abe9ff32
RS
285 /* Someone else called sbrk. Make a new heap. */
286
287 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
288 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
e429caa2 289
91a211b5 290 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
e429caa2
KH
291 return 0;
292
293 new_heap->start = new;
294 new_heap->end = bloc_start;
295 new_heap->bloc_start = bloc_start;
abe9ff32 296 new_heap->free = bloc_start;
e429caa2
KH
297 new_heap->next = NIL_HEAP;
298 new_heap->prev = last_heap;
47f13333
RS
299 new_heap->first_bloc = NIL_BLOC;
300 new_heap->last_bloc = NIL_BLOC;
e429caa2
KH
301 last_heap->next = new_heap;
302 last_heap = new_heap;
303
304 address = bloc_start;
305 already_available = 0;
306 }
dcfdbac7 307
abe9ff32
RS
308 /* Add space to the last heap (which we may have just created).
309 Get some extra, so we can come here less often. */
310
e429caa2 311 get = size + extra_bytes - already_available;
abe9ff32 312 get = (char *) ROUNDUP ((char *)last_heap->end + get)
e429caa2 313 - (char *) last_heap->end;
dcfdbac7 314
e429caa2
KH
315 if ((*real_morecore) (get) != last_heap->end)
316 return 0;
317
91a211b5 318 last_heap->end = (char *) last_heap->end + get;
e429caa2
KH
319 }
320
321 return address;
322}
dcfdbac7 323
abe9ff32
RS
324/* Return unused heap space to the system
325 if there is a lot of unused space now.
326 This can make the last heap smaller;
327 it can also eliminate the last heap entirely. */
328
dcfdbac7 329static void
e429caa2 330relinquish ()
dcfdbac7 331{
e429caa2
KH
332 register heap_ptr h;
333 int excess = 0;
334
abe9ff32
RS
335 /* Add the amount of space beyond break_value
336 in all heaps which have extend beyond break_value at all. */
337
e429caa2
KH
338 for (h = last_heap; h && break_value < h->end; h = h->prev)
339 {
340 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
341 ? h->bloc_start : break_value);
342 }
343
344 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
dcfdbac7 345 {
7516b7d5
RS
346 /* Keep extra_bytes worth of empty space.
347 And don't free anything unless we can free at least extra_bytes. */
e429caa2 348 excess -= extra_bytes;
dcfdbac7 349
e429caa2
KH
350 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
351 {
47f13333
RS
352 /* This heap should have no blocs in it. */
353 if (last_heap->first_bloc != NIL_BLOC
354 || last_heap->last_bloc != NIL_BLOC)
355 abort ();
356
abe9ff32 357 /* Return the last heap, with its header, to the system. */
e429caa2
KH
358 excess = (char *)last_heap->end - (char *)last_heap->start;
359 last_heap = last_heap->prev;
360 last_heap->next = NIL_HEAP;
361 }
362 else
363 {
364 excess = (char *) last_heap->end
abe9ff32 365 - (char *) ROUNDUP ((char *)last_heap->end - excess);
91a211b5 366 last_heap->end = (char *) last_heap->end - excess;
e429caa2 367 }
dcfdbac7 368
e429caa2 369 if ((*real_morecore) (- excess) == 0)
21532667
KH
370 {
371 /* If the system didn't want that much memory back, adjust
372 the end of the last heap to reflect that. This can occur
373 if break_value is still within the original data segment. */
91a211b5 374 last_heap->end = (char *) last_heap->end + excess;
21532667
KH
375 /* Make sure that the result of the adjustment is accurate.
376 It should be, for the else clause above; the other case,
377 which returns the entire last heap to the system, seems
378 unlikely to trigger this mode of failure. */
379 if (last_heap->end != (*real_morecore) (0))
380 abort ();
381 }
e429caa2 382 }
dcfdbac7 383}
719b242f
RS
384
385/* Return the total size in use by relocating allocator,
386 above where malloc gets space. */
387
388long
389r_alloc_size_in_use ()
390{
91a211b5 391 return (char *) break_value - (char *) virtual_break_value;
719b242f 392}
dcfdbac7 393\f
956ace37
JB
394/* The meat - allocating, freeing, and relocating blocs. */
395
956ace37 396/* Find the bloc referenced by the address in PTR. Returns a pointer
abe9ff32 397 to that block. */
dcfdbac7
JB
398
399static bloc_ptr
400find_bloc (ptr)
401 POINTER *ptr;
402{
403 register bloc_ptr p = first_bloc;
404
405 while (p != NIL_BLOC)
406 {
407 if (p->variable == ptr && p->data == *ptr)
408 return p;
409
410 p = p->next;
411 }
412
413 return p;
414}
415
416/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
98b7fe02
JB
417 Returns a pointer to the new bloc, or zero if we couldn't allocate
418 memory for the new block. */
dcfdbac7
JB
419
420static bloc_ptr
421get_bloc (size)
422 SIZE size;
423{
98b7fe02 424 register bloc_ptr new_bloc;
abe9ff32 425 register heap_ptr heap;
98b7fe02
JB
426
427 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
e429caa2 428 || ! (new_bloc->data = obtain (break_value, size)))
98b7fe02
JB
429 {
430 if (new_bloc)
431 free (new_bloc);
432
433 return 0;
434 }
dcfdbac7 435
91a211b5 436 break_value = (char *) new_bloc->data + size;
e429caa2 437
dcfdbac7
JB
438 new_bloc->size = size;
439 new_bloc->next = NIL_BLOC;
8c7f1e35 440 new_bloc->variable = (POINTER *) NIL;
e429caa2 441 new_bloc->new_data = 0;
dcfdbac7 442
abe9ff32
RS
443 /* Record in the heap that this space is in use. */
444 heap = find_heap (new_bloc->data);
445 heap->free = break_value;
446
47f13333
RS
447 /* Maintain the correspondence between heaps and blocs. */
448 new_bloc->heap = heap;
449 heap->last_bloc = new_bloc;
450 if (heap->first_bloc == NIL_BLOC)
451 heap->first_bloc = new_bloc;
452
abe9ff32 453 /* Put this bloc on the doubly-linked list of blocs. */
dcfdbac7
JB
454 if (first_bloc)
455 {
456 new_bloc->prev = last_bloc;
457 last_bloc->next = new_bloc;
458 last_bloc = new_bloc;
459 }
460 else
461 {
462 first_bloc = last_bloc = new_bloc;
463 new_bloc->prev = NIL_BLOC;
464 }
465
466 return new_bloc;
467}
47f13333 468\f
abe9ff32
RS
469/* Calculate new locations of blocs in the list beginning with BLOC,
470 relocating it to start at ADDRESS, in heap HEAP. If enough space is
471 not presently available in our reserve, call obtain for
177c0ea7
JB
472 more space.
473
abe9ff32
RS
474 Store the new location of each bloc in its new_data field.
475 Do not touch the contents of blocs or break_value. */
dcfdbac7 476
e429caa2
KH
477static int
478relocate_blocs (bloc, heap, address)
479 bloc_ptr bloc;
480 heap_ptr heap;
481 POINTER address;
482{
483 register bloc_ptr b = bloc;
ad3bb3d2 484
49f82b3d 485 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 486 if (r_alloc_freeze_level)
49f82b3d
RS
487 abort();
488
e429caa2
KH
489 while (b)
490 {
abe9ff32
RS
491 /* If bloc B won't fit within HEAP,
492 move to the next heap and try again. */
91a211b5 493 while (heap && (char *) address + b->size > (char *) heap->end)
e429caa2
KH
494 {
495 heap = heap->next;
496 if (heap == NIL_HEAP)
497 break;
498 address = heap->bloc_start;
499 }
dcfdbac7 500
abe9ff32
RS
501 /* If BLOC won't fit in any heap,
502 get enough new space to hold BLOC and all following blocs. */
e429caa2
KH
503 if (heap == NIL_HEAP)
504 {
505 register bloc_ptr tb = b;
506 register SIZE s = 0;
507
abe9ff32 508 /* Add up the size of all the following blocs. */
e429caa2
KH
509 while (tb != NIL_BLOC)
510 {
177c0ea7 511 if (tb->variable)
49f82b3d
RS
512 s += tb->size;
513
e429caa2
KH
514 tb = tb->next;
515 }
516
abe9ff32
RS
517 /* Get that space. */
518 address = obtain (address, s);
519 if (address == 0)
e429caa2
KH
520 return 0;
521
522 heap = last_heap;
523 }
524
abe9ff32
RS
525 /* Record the new address of this bloc
526 and update where the next bloc can start. */
e429caa2 527 b->new_data = address;
177c0ea7 528 if (b->variable)
91a211b5 529 address = (char *) address + b->size;
e429caa2
KH
530 b = b->next;
531 }
532
533 return 1;
534}
535
47f13333
RS
536/* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
537 This is necessary if we put the memory of space of BLOC
538 before that of BEFORE. */
539
540static void
541reorder_bloc (bloc, before)
542 bloc_ptr bloc, before;
543{
544 bloc_ptr prev, next;
545
546 /* Splice BLOC out from where it is. */
547 prev = bloc->prev;
548 next = bloc->next;
549
550 if (prev)
551 prev->next = next;
552 if (next)
553 next->prev = prev;
554
555 /* Splice it in before BEFORE. */
556 prev = before->prev;
abe9ff32 557
47f13333
RS
558 if (prev)
559 prev->next = bloc;
560 bloc->prev = prev;
561
562 before->prev = bloc;
563 bloc->next = before;
564}
565\f
566/* Update the records of which heaps contain which blocs, starting
567 with heap HEAP and bloc BLOC. */
568
569static void
570update_heap_bloc_correspondence (bloc, heap)
abe9ff32
RS
571 bloc_ptr bloc;
572 heap_ptr heap;
573{
574 register bloc_ptr b;
575
47f13333
RS
576 /* Initialize HEAP's status to reflect blocs before BLOC. */
577 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
578 {
579 /* The previous bloc is in HEAP. */
580 heap->last_bloc = bloc->prev;
91a211b5 581 heap->free = (char *) bloc->prev->data + bloc->prev->size;
47f13333
RS
582 }
583 else
584 {
585 /* HEAP contains no blocs before BLOC. */
586 heap->first_bloc = NIL_BLOC;
587 heap->last_bloc = NIL_BLOC;
588 heap->free = heap->bloc_start;
589 }
590
abe9ff32
RS
591 /* Advance through blocs one by one. */
592 for (b = bloc; b != NIL_BLOC; b = b->next)
593 {
47f13333
RS
594 /* Advance through heaps, marking them empty,
595 till we get to the one that B is in. */
abe9ff32
RS
596 while (heap)
597 {
598 if (heap->bloc_start <= b->data && b->data <= heap->end)
599 break;
600 heap = heap->next;
47f13333
RS
601 /* We know HEAP is not null now,
602 because there has to be space for bloc B. */
603 heap->first_bloc = NIL_BLOC;
604 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
605 heap->free = heap->bloc_start;
606 }
47f13333
RS
607
608 /* Update HEAP's status for bloc B. */
91a211b5 609 heap->free = (char *) b->data + b->size;
47f13333
RS
610 heap->last_bloc = b;
611 if (heap->first_bloc == NIL_BLOC)
612 heap->first_bloc = b;
613
614 /* Record that B is in HEAP. */
615 b->heap = heap;
abe9ff32
RS
616 }
617
618 /* If there are any remaining heaps and no blocs left,
47f13333 619 mark those heaps as empty. */
abe9ff32
RS
620 heap = heap->next;
621 while (heap)
622 {
47f13333
RS
623 heap->first_bloc = NIL_BLOC;
624 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
625 heap->free = heap->bloc_start;
626 heap = heap->next;
627 }
628}
47f13333 629\f
abe9ff32
RS
630/* Resize BLOC to SIZE bytes. This relocates the blocs
631 that come after BLOC in memory. */
632
e429caa2
KH
633static int
634resize_bloc (bloc, size)
635 bloc_ptr bloc;
636 SIZE size;
dcfdbac7 637{
e429caa2
KH
638 register bloc_ptr b;
639 heap_ptr heap;
640 POINTER address;
641 SIZE old_size;
642
49f82b3d 643 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 644 if (r_alloc_freeze_level)
49f82b3d
RS
645 abort();
646
e429caa2
KH
647 if (bloc == NIL_BLOC || size == bloc->size)
648 return 1;
649
650 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
651 {
652 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
653 break;
654 }
655
656 if (heap == NIL_HEAP)
abe9ff32 657 abort ();
e429caa2
KH
658
659 old_size = bloc->size;
660 bloc->size = size;
661
abe9ff32 662 /* Note that bloc could be moved into the previous heap. */
91a211b5
GM
663 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
664 : (char *) first_heap->bloc_start);
e429caa2
KH
665 while (heap)
666 {
667 if (heap->bloc_start <= address && address <= heap->end)
668 break;
669 heap = heap->prev;
670 }
671
672 if (! relocate_blocs (bloc, heap, address))
673 {
674 bloc->size = old_size;
675 return 0;
676 }
677
678 if (size > old_size)
679 {
680 for (b = last_bloc; b != bloc; b = b->prev)
681 {
49f82b3d
RS
682 if (!b->variable)
683 {
684 b->size = 0;
685 b->data = b->new_data;
177c0ea7
JB
686 }
687 else
49f82b3d
RS
688 {
689 safe_bcopy (b->data, b->new_data, b->size);
690 *b->variable = b->data = b->new_data;
691 }
692 }
693 if (!bloc->variable)
694 {
695 bloc->size = 0;
696 bloc->data = bloc->new_data;
697 }
698 else
699 {
700 safe_bcopy (bloc->data, bloc->new_data, old_size);
91a211b5 701 bzero ((char *) bloc->new_data + old_size, size - old_size);
49f82b3d 702 *bloc->variable = bloc->data = bloc->new_data;
e429caa2 703 }
e429caa2
KH
704 }
705 else
dcfdbac7 706 {
ad3bb3d2
JB
707 for (b = bloc; b != NIL_BLOC; b = b->next)
708 {
49f82b3d
RS
709 if (!b->variable)
710 {
711 b->size = 0;
712 b->data = b->new_data;
177c0ea7
JB
713 }
714 else
49f82b3d
RS
715 {
716 safe_bcopy (b->data, b->new_data, b->size);
717 *b->variable = b->data = b->new_data;
718 }
ad3bb3d2 719 }
ad3bb3d2 720 }
dcfdbac7 721
47f13333 722 update_heap_bloc_correspondence (bloc, heap);
abe9ff32 723
91a211b5
GM
724 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
725 : (char *) first_heap->bloc_start);
e429caa2
KH
726 return 1;
727}
47f13333 728\f
abe9ff32
RS
729/* Free BLOC from the chain of blocs, relocating any blocs above it.
730 This may return space to the system. */
dcfdbac7
JB
731
732static void
733free_bloc (bloc)
734 bloc_ptr bloc;
735{
47f13333
RS
736 heap_ptr heap = bloc->heap;
737
49f82b3d
RS
738 if (r_alloc_freeze_level)
739 {
740 bloc->variable = (POINTER *) NIL;
741 return;
742 }
177c0ea7 743
e429caa2
KH
744 resize_bloc (bloc, 0);
745
dcfdbac7
JB
746 if (bloc == first_bloc && bloc == last_bloc)
747 {
748 first_bloc = last_bloc = NIL_BLOC;
749 }
750 else if (bloc == last_bloc)
751 {
752 last_bloc = bloc->prev;
753 last_bloc->next = NIL_BLOC;
754 }
755 else if (bloc == first_bloc)
756 {
757 first_bloc = bloc->next;
758 first_bloc->prev = NIL_BLOC;
dcfdbac7
JB
759 }
760 else
761 {
762 bloc->next->prev = bloc->prev;
763 bloc->prev->next = bloc->next;
dcfdbac7
JB
764 }
765
47f13333
RS
766 /* Update the records of which blocs are in HEAP. */
767 if (heap->first_bloc == bloc)
768 {
d5179acc 769 if (bloc->next != 0 && bloc->next->heap == heap)
47f13333
RS
770 heap->first_bloc = bloc->next;
771 else
772 heap->first_bloc = heap->last_bloc = NIL_BLOC;
773 }
774 if (heap->last_bloc == bloc)
775 {
d5179acc 776 if (bloc->prev != 0 && bloc->prev->heap == heap)
47f13333
RS
777 heap->last_bloc = bloc->prev;
778 else
779 heap->first_bloc = heap->last_bloc = NIL_BLOC;
780 }
781
e429caa2 782 relinquish ();
dcfdbac7
JB
783 free (bloc);
784}
785\f
956ace37
JB
786/* Interface routines. */
787
98b7fe02 788/* Obtain SIZE bytes of storage from the free pool, or the system, as
2c46d29f 789 necessary. If relocatable blocs are in use, this means relocating
98b7fe02
JB
790 them. This function gets plugged into the GNU malloc's __morecore
791 hook.
792
7516b7d5
RS
793 We provide hysteresis, never relocating by less than extra_bytes.
794
98b7fe02
JB
795 If we're out of memory, we should return zero, to imitate the other
796 __morecore hook values - in particular, __default_morecore in the
797 GNU malloc package. */
dcfdbac7 798
177c0ea7 799POINTER
dcfdbac7
JB
800r_alloc_sbrk (size)
801 long size;
802{
e429caa2
KH
803 register bloc_ptr b;
804 POINTER address;
dcfdbac7 805
44d3dec0
RS
806 if (! r_alloc_initialized)
807 r_alloc_init ();
808
dcfdbac7 809 if (! use_relocatable_buffers)
bbc60227 810 return (*real_morecore) (size);
dcfdbac7 811
e429caa2
KH
812 if (size == 0)
813 return virtual_break_value;
7516b7d5 814
e429caa2 815 if (size > 0)
dcfdbac7 816 {
abe9ff32
RS
817 /* Allocate a page-aligned space. GNU malloc would reclaim an
818 extra space if we passed an unaligned one. But we could
8e6208c5 819 not always find a space which is contiguous to the previous. */
e429caa2
KH
820 POINTER new_bloc_start;
821 heap_ptr h = first_heap;
abe9ff32 822 SIZE get = ROUNDUP (size);
7516b7d5 823
abe9ff32 824 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 825
abe9ff32
RS
826 /* Search the list upward for a heap which is large enough. */
827 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
e429caa2
KH
828 {
829 h = h->next;
830 if (h == NIL_HEAP)
831 break;
abe9ff32 832 address = (POINTER) ROUNDUP (h->start);
e429caa2
KH
833 }
834
abe9ff32 835 /* If not found, obtain more space. */
e429caa2
KH
836 if (h == NIL_HEAP)
837 {
838 get += extra_bytes + page_size;
839
49f82b3d 840 if (! obtain (address, get))
e429caa2 841 return 0;
98b7fe02 842
e429caa2 843 if (first_heap == last_heap)
abe9ff32 844 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 845 else
abe9ff32 846 address = (POINTER) ROUNDUP (last_heap->start);
e429caa2
KH
847 h = last_heap;
848 }
849
abe9ff32 850 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
e429caa2
KH
851
852 if (first_heap->bloc_start < new_bloc_start)
853 {
49f82b3d 854 /* This is no clean solution - no idea how to do it better. */
177c0ea7 855 if (r_alloc_freeze_level)
49f82b3d
RS
856 return NIL;
857
858 /* There is a bug here: if the above obtain call succeeded, but the
859 relocate_blocs call below does not succeed, we need to free
860 the memory that we got with obtain. */
861
abe9ff32 862 /* Move all blocs upward. */
49f82b3d 863 if (! relocate_blocs (first_bloc, h, new_bloc_start))
e429caa2
KH
864 return 0;
865
866 /* Note that (POINTER)(h+1) <= new_bloc_start since
867 get >= page_size, so the following does not destroy the heap
abe9ff32 868 header. */
e429caa2
KH
869 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
870 {
871 safe_bcopy (b->data, b->new_data, b->size);
872 *b->variable = b->data = b->new_data;
873 }
874
875 h->bloc_start = new_bloc_start;
abe9ff32 876
47f13333 877 update_heap_bloc_correspondence (first_bloc, h);
e429caa2 878 }
e429caa2
KH
879 if (h != first_heap)
880 {
881 /* Give up managing heaps below the one the new
abe9ff32 882 virtual_break_value points to. */
e429caa2
KH
883 first_heap->prev = NIL_HEAP;
884 first_heap->next = h->next;
885 first_heap->start = h->start;
886 first_heap->end = h->end;
abe9ff32 887 first_heap->free = h->free;
47f13333
RS
888 first_heap->first_bloc = h->first_bloc;
889 first_heap->last_bloc = h->last_bloc;
e429caa2
KH
890 first_heap->bloc_start = h->bloc_start;
891
892 if (first_heap->next)
893 first_heap->next->prev = first_heap;
894 else
895 last_heap = first_heap;
896 }
897
898 bzero (address, size);
dcfdbac7 899 }
e429caa2 900 else /* size < 0 */
dcfdbac7 901 {
e429caa2
KH
902 SIZE excess = (char *)first_heap->bloc_start
903 - ((char *)virtual_break_value + size);
904
905 address = virtual_break_value;
906
907 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
908 {
909 excess -= extra_bytes;
910 first_heap->bloc_start
47f13333 911 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
e429caa2 912
abe9ff32 913 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
7516b7d5 914
e429caa2
KH
915 for (b = first_bloc; b != NIL_BLOC; b = b->next)
916 {
917 safe_bcopy (b->data, b->new_data, b->size);
918 *b->variable = b->data = b->new_data;
919 }
920 }
921
922 if ((char *)virtual_break_value + size < (char *)first_heap->start)
923 {
924 /* We found an additional space below the first heap */
925 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
926 }
dcfdbac7
JB
927 }
928
e429caa2 929 virtual_break_value = (POINTER) ((char *)address + size);
47f13333 930 break_value = (last_bloc
91a211b5
GM
931 ? (char *) last_bloc->data + last_bloc->size
932 : (char *) first_heap->bloc_start);
e429caa2 933 if (size < 0)
abe9ff32 934 relinquish ();
7516b7d5 935
e429caa2 936 return address;
dcfdbac7
JB
937}
938
0a58f946 939
dcfdbac7
JB
940/* Allocate a relocatable bloc of storage of size SIZE. A pointer to
941 the data is returned in *PTR. PTR is thus the address of some variable
98b7fe02
JB
942 which will use the data area.
943
49f82b3d
RS
944 The allocation of 0 bytes is valid.
945 In case r_alloc_freeze is set, a best fit of unused blocs could be done
946 before allocating a new area. Not yet done.
947
98b7fe02
JB
948 If we can't allocate the necessary memory, set *PTR to zero, and
949 return zero. */
dcfdbac7
JB
950
951POINTER
952r_alloc (ptr, size)
953 POINTER *ptr;
954 SIZE size;
955{
956 register bloc_ptr new_bloc;
957
2c46d29f
RS
958 if (! r_alloc_initialized)
959 r_alloc_init ();
960
abe9ff32 961 new_bloc = get_bloc (MEM_ROUNDUP (size));
98b7fe02
JB
962 if (new_bloc)
963 {
964 new_bloc->variable = ptr;
965 *ptr = new_bloc->data;
966 }
967 else
968 *ptr = 0;
dcfdbac7
JB
969
970 return *ptr;
971}
972
2c46d29f
RS
973/* Free a bloc of relocatable storage whose data is pointed to by PTR.
974 Store 0 in *PTR to show there's no block allocated. */
dcfdbac7
JB
975
976void
977r_alloc_free (ptr)
978 register POINTER *ptr;
979{
980 register bloc_ptr dead_bloc;
981
44d3dec0
RS
982 if (! r_alloc_initialized)
983 r_alloc_init ();
984
dcfdbac7
JB
985 dead_bloc = find_bloc (ptr);
986 if (dead_bloc == NIL_BLOC)
987 abort ();
988
989 free_bloc (dead_bloc);
2c46d29f 990 *ptr = 0;
719b242f 991
d5179acc 992#ifdef emacs
719b242f 993 refill_memory_reserve ();
d5179acc 994#endif
dcfdbac7
JB
995}
996
16a5c729 997/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
98b7fe02
JB
998 Do this by shifting all blocks above this one up in memory, unless
999 SIZE is less than or equal to the current bloc size, in which case
1000 do nothing.
dcfdbac7 1001
49f82b3d 1002 In case r_alloc_freeze is set, a new bloc is allocated, and the
8e6208c5 1003 memory copied to it. Not very efficient. We could traverse the
49f82b3d
RS
1004 bloc_list for a best fit of free blocs first.
1005
98b7fe02
JB
1006 Change *PTR to reflect the new bloc, and return this value.
1007
1008 If more memory cannot be allocated, then leave *PTR unchanged, and
1009 return zero. */
dcfdbac7
JB
1010
1011POINTER
1012r_re_alloc (ptr, size)
1013 POINTER *ptr;
1014 SIZE size;
1015{
16a5c729 1016 register bloc_ptr bloc;
dcfdbac7 1017
44d3dec0
RS
1018 if (! r_alloc_initialized)
1019 r_alloc_init ();
1020
49f82b3d
RS
1021 if (!*ptr)
1022 return r_alloc (ptr, size);
177c0ea7 1023 if (!size)
49f82b3d
RS
1024 {
1025 r_alloc_free (ptr);
1026 return r_alloc (ptr, 0);
1027 }
1028
16a5c729
JB
1029 bloc = find_bloc (ptr);
1030 if (bloc == NIL_BLOC)
dcfdbac7
JB
1031 abort ();
1032
177c0ea7 1033 if (size < bloc->size)
49f82b3d
RS
1034 {
1035 /* Wouldn't it be useful to actually resize the bloc here? */
1036 /* I think so too, but not if it's too expensive... */
177c0ea7
JB
1037 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1038 && r_alloc_freeze_level == 0)
49f82b3d
RS
1039 {
1040 resize_bloc (bloc, MEM_ROUNDUP (size));
1041 /* Never mind if this fails, just do nothing... */
1042 /* It *should* be infallible! */
1043 }
1044 }
1045 else if (size > bloc->size)
1046 {
1047 if (r_alloc_freeze_level)
1048 {
1049 bloc_ptr new_bloc;
1050 new_bloc = get_bloc (MEM_ROUNDUP (size));
1051 if (new_bloc)
1052 {
1053 new_bloc->variable = ptr;
1054 *ptr = new_bloc->data;
1055 bloc->variable = (POINTER *) NIL;
1056 }
1057 else
1058 return NIL;
1059 }
177c0ea7 1060 else
49f82b3d
RS
1061 {
1062 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1063 return NIL;
1064 }
1065 }
dcfdbac7
JB
1066 return *ptr;
1067}
81bd58e8
KH
1068
1069/* Disable relocations, after making room for at least SIZE bytes
1070 of non-relocatable heap if possible. The relocatable blocs are
1071 guaranteed to hold still until thawed, even if this means that
1072 malloc must return a null pointer. */
abe9ff32 1073
81bd58e8
KH
1074void
1075r_alloc_freeze (size)
1076 long size;
1077{
44d3dec0
RS
1078 if (! r_alloc_initialized)
1079 r_alloc_init ();
1080
81bd58e8
KH
1081 /* If already frozen, we can't make any more room, so don't try. */
1082 if (r_alloc_freeze_level > 0)
1083 size = 0;
1084 /* If we can't get the amount requested, half is better than nothing. */
1085 while (size > 0 && r_alloc_sbrk (size) == 0)
1086 size /= 2;
1087 ++r_alloc_freeze_level;
1088 if (size > 0)
1089 r_alloc_sbrk (-size);
1090}
1091
1092void
1093r_alloc_thaw ()
1094{
49f82b3d 1095
177c0ea7 1096 if (! r_alloc_initialized)
49f82b3d
RS
1097 r_alloc_init ();
1098
81bd58e8
KH
1099 if (--r_alloc_freeze_level < 0)
1100 abort ();
49f82b3d 1101
177c0ea7
JB
1102 /* This frees all unused blocs. It is not too inefficient, as the resize
1103 and bcopy is done only once. Afterwards, all unreferenced blocs are
49f82b3d 1104 already shrunk to zero size. */
177c0ea7 1105 if (!r_alloc_freeze_level)
49f82b3d
RS
1106 {
1107 bloc_ptr *b = &first_bloc;
177c0ea7
JB
1108 while (*b)
1109 if (!(*b)->variable)
1110 free_bloc (*b);
1111 else
49f82b3d
RS
1112 b = &(*b)->next;
1113 }
81bd58e8 1114}
49f82b3d 1115
dec41418
RS
1116
1117#if defined (emacs) && defined (DOUG_LEA_MALLOC)
1118
1119/* Reinitialize the morecore hook variables after restarting a dumped
1120 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1121void
1122r_alloc_reinit ()
1123{
1124 /* Only do this if the hook has been reset, so that we don't get an
1125 infinite loop, in case Emacs was linked statically. */
1126 if (__morecore != r_alloc_sbrk)
1127 {
1128 real_morecore = __morecore;
1129 __morecore = r_alloc_sbrk;
1130 }
1131}
0a58f946
GM
1132
1133#endif /* emacs && DOUG_LEA_MALLOC */
dec41418 1134
e429caa2 1135#ifdef DEBUG
0a58f946 1136
e429caa2
KH
1137#include <assert.h>
1138
44d3dec0 1139void
e429caa2
KH
1140r_alloc_check ()
1141{
6d16dd06
RS
1142 int found = 0;
1143 heap_ptr h, ph = 0;
1144 bloc_ptr b, pb = 0;
1145
1146 if (!r_alloc_initialized)
1147 return;
1148
1149 assert (first_heap);
1150 assert (last_heap->end <= (POINTER) sbrk (0));
1151 assert ((POINTER) first_heap < first_heap->start);
1152 assert (first_heap->start <= virtual_break_value);
1153 assert (virtual_break_value <= first_heap->end);
1154
1155 for (h = first_heap; h; h = h->next)
1156 {
1157 assert (h->prev == ph);
1158 assert ((POINTER) ROUNDUP (h->end) == h->end);
40f3f04b
RS
1159#if 0 /* ??? The code in ralloc.c does not really try to ensure
1160 the heap start has any sort of alignment.
1161 Perhaps it should. */
6d16dd06 1162 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
40f3f04b 1163#endif
6d16dd06
RS
1164 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1165 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1166
1167 if (ph)
1168 {
1169 assert (ph->end < h->start);
1170 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1171 }
1172
1173 if (h->bloc_start <= break_value && break_value <= h->end)
1174 found = 1;
1175
1176 ph = h;
1177 }
1178
1179 assert (found);
1180 assert (last_heap == ph);
1181
1182 for (b = first_bloc; b; b = b->next)
1183 {
1184 assert (b->prev == pb);
1185 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1186 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1187
1188 ph = 0;
1189 for (h = first_heap; h; h = h->next)
1190 {
1191 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1192 break;
1193 ph = h;
1194 }
1195
1196 assert (h);
1197
1198 if (pb && pb->data + pb->size != b->data)
1199 {
1200 assert (ph && b->data == h->bloc_start);
1201 while (ph)
1202 {
1203 if (ph->bloc_start <= pb->data
1204 && pb->data + pb->size <= ph->end)
1205 {
1206 assert (pb->data + pb->size + b->size > ph->end);
1207 break;
1208 }
1209 else
1210 {
1211 assert (ph->bloc_start + b->size > ph->end);
1212 }
1213 ph = ph->prev;
1214 }
1215 }
1216 pb = b;
1217 }
1218
1219 assert (last_bloc == pb);
1220
1221 if (last_bloc)
1222 assert (last_bloc->data + last_bloc->size == break_value);
1223 else
1224 assert (first_heap->bloc_start == break_value);
e429caa2 1225}
0a58f946 1226
e429caa2 1227#endif /* DEBUG */
0a58f946 1228
0a58f946
GM
1229
1230\f
1231/***********************************************************************
1232 Initialization
1233 ***********************************************************************/
1234
0a58f946
GM
1235/* Initialize various things for memory allocation. */
1236
1237static void
1238r_alloc_init ()
1239{
1240 if (r_alloc_initialized)
1241 return;
0a58f946 1242 r_alloc_initialized = 1;
177c0ea7 1243
a2c23c92
DL
1244 page_size = PAGE;
1245#ifndef SYSTEM_MALLOC
0a58f946
GM
1246 real_morecore = __morecore;
1247 __morecore = r_alloc_sbrk;
1248
1249 first_heap = last_heap = &heap_base;
1250 first_heap->next = first_heap->prev = NIL_HEAP;
1251 first_heap->start = first_heap->bloc_start
1252 = virtual_break_value = break_value = (*real_morecore) (0);
1253 if (break_value == NIL)
1254 abort ();
1255
0a58f946 1256 extra_bytes = ROUNDUP (50000);
a2c23c92 1257#endif
0a58f946
GM
1258
1259#ifdef DOUG_LEA_MALLOC
1673df2e
JD
1260 BLOCK_INPUT;
1261 mallopt (M_TOP_PAD, 64 * 4096);
1262 UNBLOCK_INPUT;
0a58f946 1263#else
a2c23c92 1264#ifndef SYSTEM_MALLOC
0a58f946
GM
1265 /* Give GNU malloc's morecore some hysteresis
1266 so that we move all the relocatable blocks much less often. */
1267 __malloc_extra_blocks = 64;
1268#endif
a2c23c92 1269#endif
0a58f946 1270
5ad25b24 1271#ifndef SYSTEM_MALLOC
0a58f946
GM
1272 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1273
1274 /* The extra call to real_morecore guarantees that the end of the
1275 address space is a multiple of page_size, even if page_size is
1276 not really the page size of the system running the binary in
1277 which page_size is stored. This allows a binary to be built on a
1278 system with one page size and run on a system with a smaller page
1279 size. */
91a211b5 1280 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
0a58f946
GM
1281
1282 /* Clear the rest of the last page; this memory is in our address space
1283 even though it is after the sbrk value. */
1284 /* Doubly true, with the additional call that explicitly adds the
1285 rest of that page to the address space. */
91a211b5
GM
1286 bzero (first_heap->start,
1287 (char *) first_heap->end - (char *) first_heap->start);
0a58f946 1288 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
a2c23c92 1289#endif
177c0ea7 1290
0a58f946
GM
1291 use_relocatable_buffers = 1;
1292}
ab5796a9
MB
1293
1294/* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1295 (do not change this comment) */