Correction for TZ on Microsoft systems.
[bpt/emacs.git] / src / ralloc.c
CommitLineData
dcfdbac7 1/* Block-relocating memory allocator.
0a58f946 2 Copyright (C) 1993, 1995, 2000 Free Software Foundation, Inc.
dcfdbac7
JB
3
4This file is part of GNU Emacs.
5
6GNU Emacs is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
187996a8 8the Free Software Foundation; either version 2, or (at your option)
dcfdbac7
JB
9any later version.
10
11GNU Emacs is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU Emacs; see the file COPYING. If not, write to
3b7ad313
EN
18the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19Boston, MA 02111-1307, USA. */
dcfdbac7
JB
20
21/* NOTES:
22
eb8c3be9 23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
dcfdbac7 24 rather than all of them. This means allowing for a possible
abe9ff32 25 hole between the first bloc and the end of malloc storage. */
dcfdbac7 26
2c46d29f 27#ifdef emacs
aef4d570 28
18160b98 29#include <config.h>
956ace37 30#include "lisp.h" /* Needed for VALBITS. */
0a58f946 31
642a1733
DL
32#ifdef HAVE_UNISTD_H
33#include <unistd.h>
34#endif
a8c0e5ea 35
0a58f946
GM
36typedef POINTER_TYPE *POINTER;
37typedef size_t SIZE;
f275fd9a 38
2c46d29f
RS
39/* Declared in dispnew.c, this version doesn't screw up if regions
40 overlap. */
0a58f946 41
2c46d29f 42extern void safe_bcopy ();
2c46d29f 43
b0119c68
RS
44#ifdef DOUG_LEA_MALLOC
45#define M_TOP_PAD -2
46extern int mallopt ();
0a58f946 47#else /* not DOUG_LEA_MALLOC */
a2c23c92 48#ifndef SYSTEM_MALLOC
b1685c5f 49extern size_t __malloc_extra_blocks;
a2c23c92 50#endif /* SYSTEM_MALLOC */
0a58f946 51#endif /* not DOUG_LEA_MALLOC */
49081834 52
d5179acc 53#else /* not emacs */
aef4d570 54
2c46d29f 55#include <stddef.h>
aef4d570 56
2c46d29f
RS
57typedef size_t SIZE;
58typedef void *POINTER;
aef4d570 59
aef4d570
RM
60#include <unistd.h>
61#include <malloc.h>
aef4d570 62
2c46d29f 63#define safe_bcopy(x, y, z) memmove (y, x, z)
d5179acc
RS
64#define bzero(x, len) memset (x, 0, len)
65
66#endif /* not emacs */
2c46d29f 67
0a58f946 68
d5179acc 69#include "getpagesize.h"
dcfdbac7
JB
70
71#define NIL ((POINTER) 0)
72
2c46d29f
RS
73/* A flag to indicate whether we have initialized ralloc yet. For
74 Emacs's sake, please do not make this local to malloc_init; on some
75 machines, the dumping procedure makes all static variables
76 read-only. On these machines, the word static is #defined to be
77 the empty string, meaning that r_alloc_initialized becomes an
0a58f946
GM
78 automatic variable, and loses its value each time Emacs is started
79 up. */
80
2c46d29f
RS
81static int r_alloc_initialized = 0;
82
83static void r_alloc_init ();
0a58f946 84
dcfdbac7 85\f
956ace37
JB
86/* Declarations for working with the malloc, ralloc, and system breaks. */
87
abe9ff32 88/* Function to set the real break value. */
321ed47b 89POINTER (*real_morecore) ();
dcfdbac7 90
abe9ff32 91/* The break value, as seen by malloc. */
dcfdbac7
JB
92static POINTER virtual_break_value;
93
abe9ff32
RS
94/* The address of the end of the last data in use by ralloc,
95 including relocatable blocs as well as malloc data. */
dcfdbac7
JB
96static POINTER break_value;
97
7516b7d5
RS
98/* This is the size of a page. We round memory requests to this boundary. */
99static int page_size;
100
ad3bb3d2
JB
101/* Whenever we get memory from the system, get this many extra bytes. This
102 must be a multiple of page_size. */
7516b7d5
RS
103static int extra_bytes;
104
dcfdbac7 105/* Macros for rounding. Note that rounding to any value is possible
abe9ff32 106 by changing the definition of PAGE. */
dcfdbac7 107#define PAGE (getpagesize ())
f7a009a5
RM
108#define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
109#define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
110 & ~(page_size - 1))
7516b7d5 111#define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
e429caa2
KH
112
113#define MEM_ALIGN sizeof(double)
114#define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
115 & ~(MEM_ALIGN - 1))
0a58f946 116
e429caa2 117\f
0a58f946
GM
118/***********************************************************************
119 Implementation using sbrk
120 ***********************************************************************/
121
abe9ff32
RS
122/* Data structures of heaps and blocs. */
123
124/* The relocatable objects, or blocs, and the malloc data
125 both reside within one or more heaps.
126 Each heap contains malloc data, running from `start' to `bloc_start',
127 and relocatable objects, running from `bloc_start' to `free'.
128
129 Relocatable objects may relocate within the same heap
130 or may move into another heap; the heaps themselves may grow
131 but they never move.
132
133 We try to make just one heap and make it larger as necessary.
8e6208c5 134 But sometimes we can't do that, because we can't get contiguous
abe9ff32
RS
135 space to add onto the heap. When that happens, we start a new heap. */
136
e429caa2
KH
137typedef struct heap
138{
139 struct heap *next;
140 struct heap *prev;
abe9ff32 141 /* Start of memory range of this heap. */
e429caa2 142 POINTER start;
abe9ff32 143 /* End of memory range of this heap. */
e429caa2 144 POINTER end;
abe9ff32
RS
145 /* Start of relocatable data in this heap. */
146 POINTER bloc_start;
147 /* Start of unused space in this heap. */
148 POINTER free;
47f13333
RS
149 /* First bloc in this heap. */
150 struct bp *first_bloc;
151 /* Last bloc in this heap. */
152 struct bp *last_bloc;
e429caa2
KH
153} *heap_ptr;
154
155#define NIL_HEAP ((heap_ptr) 0)
156#define HEAP_PTR_SIZE (sizeof (struct heap))
157
abe9ff32
RS
158/* This is the first heap object.
159 If we need additional heap objects, each one resides at the beginning of
160 the space it covers. */
161static struct heap heap_base;
162
163/* Head and tail of the list of heaps. */
e429caa2
KH
164static heap_ptr first_heap, last_heap;
165
166/* These structures are allocated in the malloc arena.
167 The linked list is kept in order of increasing '.data' members.
168 The data blocks abut each other; if b->next is non-nil, then
49f82b3d
RS
169 b->data + b->size == b->next->data.
170
171 An element with variable==NIL denotes a freed block, which has not yet
172 been collected. They may only appear while r_alloc_freeze > 0, and will be
173 freed when the arena is thawed. Currently, these blocs are not reusable,
8e6208c5 174 while the arena is frozen. Very inefficient. */
49f82b3d 175
e429caa2
KH
176typedef struct bp
177{
178 struct bp *next;
179 struct bp *prev;
180 POINTER *variable;
181 POINTER data;
182 SIZE size;
8e6208c5 183 POINTER new_data; /* temporarily used for relocation */
49f82b3d 184 struct heap *heap; /* Heap this bloc is in. */
e429caa2
KH
185} *bloc_ptr;
186
187#define NIL_BLOC ((bloc_ptr) 0)
188#define BLOC_PTR_SIZE (sizeof (struct bp))
189
abe9ff32 190/* Head and tail of the list of relocatable blocs. */
e429caa2
KH
191static bloc_ptr first_bloc, last_bloc;
192
49f82b3d
RS
193static int use_relocatable_buffers;
194
195/* If >0, no relocation whatsoever takes place. */
196static int r_alloc_freeze_level;
197
dcfdbac7 198\f
956ace37
JB
199/* Functions to get and return memory from the system. */
200
abe9ff32
RS
201/* Find the heap that ADDRESS falls within. */
202
203static heap_ptr
204find_heap (address)
205 POINTER address;
206{
207 heap_ptr heap;
208
209 for (heap = last_heap; heap; heap = heap->prev)
210 {
211 if (heap->start <= address && address <= heap->end)
212 return heap;
213 }
214
215 return NIL_HEAP;
216}
217
218/* Find SIZE bytes of space in a heap.
219 Try to get them at ADDRESS (which must fall within some heap's range)
220 if we can get that many within one heap.
221
e429caa2 222 If enough space is not presently available in our reserve, this means
8e6208c5
KH
223 getting more page-aligned space from the system. If the returned space
224 is not contiguous to the last heap, allocate a new heap, and append it
abe9ff32
RS
225
226 obtain does not try to keep track of whether space is in use
227 or not in use. It just returns the address of SIZE bytes that
228 fall within a single heap. If you call obtain twice in a row
229 with the same arguments, you typically get the same value.
230 to the heap list. It's the caller's responsibility to keep
231 track of what space is in use.
dcfdbac7 232
e429caa2
KH
233 Return the address of the space if all went well, or zero if we couldn't
234 allocate the memory. */
abe9ff32 235
e429caa2
KH
236static POINTER
237obtain (address, size)
238 POINTER address;
239 SIZE size;
dcfdbac7 240{
e429caa2
KH
241 heap_ptr heap;
242 SIZE already_available;
dcfdbac7 243
abe9ff32 244 /* Find the heap that ADDRESS falls within. */
e429caa2 245 for (heap = last_heap; heap; heap = heap->prev)
dcfdbac7 246 {
e429caa2
KH
247 if (heap->start <= address && address <= heap->end)
248 break;
249 }
dcfdbac7 250
e429caa2 251 if (! heap)
abe9ff32 252 abort ();
dcfdbac7 253
abe9ff32
RS
254 /* If we can't fit SIZE bytes in that heap,
255 try successive later heaps. */
91a211b5 256 while (heap && (char *) address + size > (char *) heap->end)
e429caa2
KH
257 {
258 heap = heap->next;
259 if (heap == NIL_HEAP)
260 break;
261 address = heap->bloc_start;
dcfdbac7
JB
262 }
263
abe9ff32
RS
264 /* If we can't fit them within any existing heap,
265 get more space. */
e429caa2
KH
266 if (heap == NIL_HEAP)
267 {
268 POINTER new = (*real_morecore)(0);
269 SIZE get;
98b7fe02 270
e429caa2 271 already_available = (char *)last_heap->end - (char *)address;
dcfdbac7 272
e429caa2
KH
273 if (new != last_heap->end)
274 {
abe9ff32
RS
275 /* Someone else called sbrk. Make a new heap. */
276
277 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
278 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
e429caa2 279
91a211b5 280 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
e429caa2
KH
281 return 0;
282
283 new_heap->start = new;
284 new_heap->end = bloc_start;
285 new_heap->bloc_start = bloc_start;
abe9ff32 286 new_heap->free = bloc_start;
e429caa2
KH
287 new_heap->next = NIL_HEAP;
288 new_heap->prev = last_heap;
47f13333
RS
289 new_heap->first_bloc = NIL_BLOC;
290 new_heap->last_bloc = NIL_BLOC;
e429caa2
KH
291 last_heap->next = new_heap;
292 last_heap = new_heap;
293
294 address = bloc_start;
295 already_available = 0;
296 }
dcfdbac7 297
abe9ff32
RS
298 /* Add space to the last heap (which we may have just created).
299 Get some extra, so we can come here less often. */
300
e429caa2 301 get = size + extra_bytes - already_available;
abe9ff32 302 get = (char *) ROUNDUP ((char *)last_heap->end + get)
e429caa2 303 - (char *) last_heap->end;
dcfdbac7 304
e429caa2
KH
305 if ((*real_morecore) (get) != last_heap->end)
306 return 0;
307
91a211b5 308 last_heap->end = (char *) last_heap->end + get;
e429caa2
KH
309 }
310
311 return address;
312}
dcfdbac7 313
abe9ff32
RS
314/* Return unused heap space to the system
315 if there is a lot of unused space now.
316 This can make the last heap smaller;
317 it can also eliminate the last heap entirely. */
318
dcfdbac7 319static void
e429caa2 320relinquish ()
dcfdbac7 321{
e429caa2
KH
322 register heap_ptr h;
323 int excess = 0;
324
abe9ff32
RS
325 /* Add the amount of space beyond break_value
326 in all heaps which have extend beyond break_value at all. */
327
e429caa2
KH
328 for (h = last_heap; h && break_value < h->end; h = h->prev)
329 {
330 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
331 ? h->bloc_start : break_value);
332 }
333
334 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
dcfdbac7 335 {
7516b7d5
RS
336 /* Keep extra_bytes worth of empty space.
337 And don't free anything unless we can free at least extra_bytes. */
e429caa2 338 excess -= extra_bytes;
dcfdbac7 339
e429caa2
KH
340 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
341 {
47f13333
RS
342 /* This heap should have no blocs in it. */
343 if (last_heap->first_bloc != NIL_BLOC
344 || last_heap->last_bloc != NIL_BLOC)
345 abort ();
346
abe9ff32 347 /* Return the last heap, with its header, to the system. */
e429caa2
KH
348 excess = (char *)last_heap->end - (char *)last_heap->start;
349 last_heap = last_heap->prev;
350 last_heap->next = NIL_HEAP;
351 }
352 else
353 {
354 excess = (char *) last_heap->end
abe9ff32 355 - (char *) ROUNDUP ((char *)last_heap->end - excess);
91a211b5 356 last_heap->end = (char *) last_heap->end - excess;
e429caa2 357 }
dcfdbac7 358
e429caa2 359 if ((*real_morecore) (- excess) == 0)
21532667
KH
360 {
361 /* If the system didn't want that much memory back, adjust
362 the end of the last heap to reflect that. This can occur
363 if break_value is still within the original data segment. */
91a211b5 364 last_heap->end = (char *) last_heap->end + excess;
21532667
KH
365 /* Make sure that the result of the adjustment is accurate.
366 It should be, for the else clause above; the other case,
367 which returns the entire last heap to the system, seems
368 unlikely to trigger this mode of failure. */
369 if (last_heap->end != (*real_morecore) (0))
370 abort ();
371 }
e429caa2 372 }
dcfdbac7 373}
719b242f
RS
374
375/* Return the total size in use by relocating allocator,
376 above where malloc gets space. */
377
378long
379r_alloc_size_in_use ()
380{
91a211b5 381 return (char *) break_value - (char *) virtual_break_value;
719b242f 382}
dcfdbac7 383\f
956ace37
JB
384/* The meat - allocating, freeing, and relocating blocs. */
385
956ace37 386/* Find the bloc referenced by the address in PTR. Returns a pointer
abe9ff32 387 to that block. */
dcfdbac7
JB
388
389static bloc_ptr
390find_bloc (ptr)
391 POINTER *ptr;
392{
393 register bloc_ptr p = first_bloc;
394
395 while (p != NIL_BLOC)
396 {
397 if (p->variable == ptr && p->data == *ptr)
398 return p;
399
400 p = p->next;
401 }
402
403 return p;
404}
405
406/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
98b7fe02
JB
407 Returns a pointer to the new bloc, or zero if we couldn't allocate
408 memory for the new block. */
dcfdbac7
JB
409
410static bloc_ptr
411get_bloc (size)
412 SIZE size;
413{
98b7fe02 414 register bloc_ptr new_bloc;
abe9ff32 415 register heap_ptr heap;
98b7fe02
JB
416
417 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
e429caa2 418 || ! (new_bloc->data = obtain (break_value, size)))
98b7fe02
JB
419 {
420 if (new_bloc)
421 free (new_bloc);
422
423 return 0;
424 }
dcfdbac7 425
91a211b5 426 break_value = (char *) new_bloc->data + size;
e429caa2 427
dcfdbac7
JB
428 new_bloc->size = size;
429 new_bloc->next = NIL_BLOC;
8c7f1e35 430 new_bloc->variable = (POINTER *) NIL;
e429caa2 431 new_bloc->new_data = 0;
dcfdbac7 432
abe9ff32
RS
433 /* Record in the heap that this space is in use. */
434 heap = find_heap (new_bloc->data);
435 heap->free = break_value;
436
47f13333
RS
437 /* Maintain the correspondence between heaps and blocs. */
438 new_bloc->heap = heap;
439 heap->last_bloc = new_bloc;
440 if (heap->first_bloc == NIL_BLOC)
441 heap->first_bloc = new_bloc;
442
abe9ff32 443 /* Put this bloc on the doubly-linked list of blocs. */
dcfdbac7
JB
444 if (first_bloc)
445 {
446 new_bloc->prev = last_bloc;
447 last_bloc->next = new_bloc;
448 last_bloc = new_bloc;
449 }
450 else
451 {
452 first_bloc = last_bloc = new_bloc;
453 new_bloc->prev = NIL_BLOC;
454 }
455
456 return new_bloc;
457}
47f13333 458\f
abe9ff32
RS
459/* Calculate new locations of blocs in the list beginning with BLOC,
460 relocating it to start at ADDRESS, in heap HEAP. If enough space is
461 not presently available in our reserve, call obtain for
e429caa2
KH
462 more space.
463
abe9ff32
RS
464 Store the new location of each bloc in its new_data field.
465 Do not touch the contents of blocs or break_value. */
dcfdbac7 466
e429caa2
KH
467static int
468relocate_blocs (bloc, heap, address)
469 bloc_ptr bloc;
470 heap_ptr heap;
471 POINTER address;
472{
473 register bloc_ptr b = bloc;
ad3bb3d2 474
49f82b3d
RS
475 /* No need to ever call this if arena is frozen, bug somewhere! */
476 if (r_alloc_freeze_level)
477 abort();
478
e429caa2
KH
479 while (b)
480 {
abe9ff32
RS
481 /* If bloc B won't fit within HEAP,
482 move to the next heap and try again. */
91a211b5 483 while (heap && (char *) address + b->size > (char *) heap->end)
e429caa2
KH
484 {
485 heap = heap->next;
486 if (heap == NIL_HEAP)
487 break;
488 address = heap->bloc_start;
489 }
dcfdbac7 490
abe9ff32
RS
491 /* If BLOC won't fit in any heap,
492 get enough new space to hold BLOC and all following blocs. */
e429caa2
KH
493 if (heap == NIL_HEAP)
494 {
495 register bloc_ptr tb = b;
496 register SIZE s = 0;
497
abe9ff32 498 /* Add up the size of all the following blocs. */
e429caa2
KH
499 while (tb != NIL_BLOC)
500 {
49f82b3d
RS
501 if (tb->variable)
502 s += tb->size;
503
e429caa2
KH
504 tb = tb->next;
505 }
506
abe9ff32
RS
507 /* Get that space. */
508 address = obtain (address, s);
509 if (address == 0)
e429caa2
KH
510 return 0;
511
512 heap = last_heap;
513 }
514
abe9ff32
RS
515 /* Record the new address of this bloc
516 and update where the next bloc can start. */
e429caa2 517 b->new_data = address;
49f82b3d 518 if (b->variable)
91a211b5 519 address = (char *) address + b->size;
e429caa2
KH
520 b = b->next;
521 }
522
523 return 1;
524}
525
47f13333
RS
526/* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
527 This is necessary if we put the memory of space of BLOC
528 before that of BEFORE. */
529
530static void
531reorder_bloc (bloc, before)
532 bloc_ptr bloc, before;
533{
534 bloc_ptr prev, next;
535
536 /* Splice BLOC out from where it is. */
537 prev = bloc->prev;
538 next = bloc->next;
539
540 if (prev)
541 prev->next = next;
542 if (next)
543 next->prev = prev;
544
545 /* Splice it in before BEFORE. */
546 prev = before->prev;
abe9ff32 547
47f13333
RS
548 if (prev)
549 prev->next = bloc;
550 bloc->prev = prev;
551
552 before->prev = bloc;
553 bloc->next = before;
554}
555\f
556/* Update the records of which heaps contain which blocs, starting
557 with heap HEAP and bloc BLOC. */
558
559static void
560update_heap_bloc_correspondence (bloc, heap)
abe9ff32
RS
561 bloc_ptr bloc;
562 heap_ptr heap;
563{
564 register bloc_ptr b;
565
47f13333
RS
566 /* Initialize HEAP's status to reflect blocs before BLOC. */
567 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
568 {
569 /* The previous bloc is in HEAP. */
570 heap->last_bloc = bloc->prev;
91a211b5 571 heap->free = (char *) bloc->prev->data + bloc->prev->size;
47f13333
RS
572 }
573 else
574 {
575 /* HEAP contains no blocs before BLOC. */
576 heap->first_bloc = NIL_BLOC;
577 heap->last_bloc = NIL_BLOC;
578 heap->free = heap->bloc_start;
579 }
580
abe9ff32
RS
581 /* Advance through blocs one by one. */
582 for (b = bloc; b != NIL_BLOC; b = b->next)
583 {
47f13333
RS
584 /* Advance through heaps, marking them empty,
585 till we get to the one that B is in. */
abe9ff32
RS
586 while (heap)
587 {
588 if (heap->bloc_start <= b->data && b->data <= heap->end)
589 break;
590 heap = heap->next;
47f13333
RS
591 /* We know HEAP is not null now,
592 because there has to be space for bloc B. */
593 heap->first_bloc = NIL_BLOC;
594 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
595 heap->free = heap->bloc_start;
596 }
47f13333
RS
597
598 /* Update HEAP's status for bloc B. */
91a211b5 599 heap->free = (char *) b->data + b->size;
47f13333
RS
600 heap->last_bloc = b;
601 if (heap->first_bloc == NIL_BLOC)
602 heap->first_bloc = b;
603
604 /* Record that B is in HEAP. */
605 b->heap = heap;
abe9ff32
RS
606 }
607
608 /* If there are any remaining heaps and no blocs left,
47f13333 609 mark those heaps as empty. */
abe9ff32
RS
610 heap = heap->next;
611 while (heap)
612 {
47f13333
RS
613 heap->first_bloc = NIL_BLOC;
614 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
615 heap->free = heap->bloc_start;
616 heap = heap->next;
617 }
618}
47f13333 619\f
abe9ff32
RS
620/* Resize BLOC to SIZE bytes. This relocates the blocs
621 that come after BLOC in memory. */
622
e429caa2
KH
623static int
624resize_bloc (bloc, size)
625 bloc_ptr bloc;
626 SIZE size;
dcfdbac7 627{
e429caa2
KH
628 register bloc_ptr b;
629 heap_ptr heap;
630 POINTER address;
631 SIZE old_size;
632
49f82b3d
RS
633 /* No need to ever call this if arena is frozen, bug somewhere! */
634 if (r_alloc_freeze_level)
635 abort();
636
e429caa2
KH
637 if (bloc == NIL_BLOC || size == bloc->size)
638 return 1;
639
640 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
641 {
642 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
643 break;
644 }
645
646 if (heap == NIL_HEAP)
abe9ff32 647 abort ();
e429caa2
KH
648
649 old_size = bloc->size;
650 bloc->size = size;
651
abe9ff32 652 /* Note that bloc could be moved into the previous heap. */
91a211b5
GM
653 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
654 : (char *) first_heap->bloc_start);
e429caa2
KH
655 while (heap)
656 {
657 if (heap->bloc_start <= address && address <= heap->end)
658 break;
659 heap = heap->prev;
660 }
661
662 if (! relocate_blocs (bloc, heap, address))
663 {
664 bloc->size = old_size;
665 return 0;
666 }
667
668 if (size > old_size)
669 {
670 for (b = last_bloc; b != bloc; b = b->prev)
671 {
49f82b3d
RS
672 if (!b->variable)
673 {
674 b->size = 0;
675 b->data = b->new_data;
676 }
677 else
678 {
679 safe_bcopy (b->data, b->new_data, b->size);
680 *b->variable = b->data = b->new_data;
681 }
682 }
683 if (!bloc->variable)
684 {
685 bloc->size = 0;
686 bloc->data = bloc->new_data;
687 }
688 else
689 {
690 safe_bcopy (bloc->data, bloc->new_data, old_size);
91a211b5 691 bzero ((char *) bloc->new_data + old_size, size - old_size);
49f82b3d 692 *bloc->variable = bloc->data = bloc->new_data;
e429caa2 693 }
e429caa2
KH
694 }
695 else
dcfdbac7 696 {
ad3bb3d2
JB
697 for (b = bloc; b != NIL_BLOC; b = b->next)
698 {
49f82b3d
RS
699 if (!b->variable)
700 {
701 b->size = 0;
702 b->data = b->new_data;
703 }
704 else
705 {
706 safe_bcopy (b->data, b->new_data, b->size);
707 *b->variable = b->data = b->new_data;
708 }
ad3bb3d2 709 }
ad3bb3d2 710 }
dcfdbac7 711
47f13333 712 update_heap_bloc_correspondence (bloc, heap);
abe9ff32 713
91a211b5
GM
714 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
715 : (char *) first_heap->bloc_start);
e429caa2
KH
716 return 1;
717}
47f13333 718\f
abe9ff32
RS
719/* Free BLOC from the chain of blocs, relocating any blocs above it.
720 This may return space to the system. */
dcfdbac7
JB
721
722static void
723free_bloc (bloc)
724 bloc_ptr bloc;
725{
47f13333
RS
726 heap_ptr heap = bloc->heap;
727
49f82b3d
RS
728 if (r_alloc_freeze_level)
729 {
730 bloc->variable = (POINTER *) NIL;
731 return;
732 }
733
e429caa2
KH
734 resize_bloc (bloc, 0);
735
dcfdbac7
JB
736 if (bloc == first_bloc && bloc == last_bloc)
737 {
738 first_bloc = last_bloc = NIL_BLOC;
739 }
740 else if (bloc == last_bloc)
741 {
742 last_bloc = bloc->prev;
743 last_bloc->next = NIL_BLOC;
744 }
745 else if (bloc == first_bloc)
746 {
747 first_bloc = bloc->next;
748 first_bloc->prev = NIL_BLOC;
dcfdbac7
JB
749 }
750 else
751 {
752 bloc->next->prev = bloc->prev;
753 bloc->prev->next = bloc->next;
dcfdbac7
JB
754 }
755
47f13333
RS
756 /* Update the records of which blocs are in HEAP. */
757 if (heap->first_bloc == bloc)
758 {
d5179acc 759 if (bloc->next != 0 && bloc->next->heap == heap)
47f13333
RS
760 heap->first_bloc = bloc->next;
761 else
762 heap->first_bloc = heap->last_bloc = NIL_BLOC;
763 }
764 if (heap->last_bloc == bloc)
765 {
d5179acc 766 if (bloc->prev != 0 && bloc->prev->heap == heap)
47f13333
RS
767 heap->last_bloc = bloc->prev;
768 else
769 heap->first_bloc = heap->last_bloc = NIL_BLOC;
770 }
771
e429caa2 772 relinquish ();
dcfdbac7
JB
773 free (bloc);
774}
775\f
956ace37
JB
776/* Interface routines. */
777
98b7fe02 778/* Obtain SIZE bytes of storage from the free pool, or the system, as
2c46d29f 779 necessary. If relocatable blocs are in use, this means relocating
98b7fe02
JB
780 them. This function gets plugged into the GNU malloc's __morecore
781 hook.
782
7516b7d5
RS
783 We provide hysteresis, never relocating by less than extra_bytes.
784
98b7fe02
JB
785 If we're out of memory, we should return zero, to imitate the other
786 __morecore hook values - in particular, __default_morecore in the
787 GNU malloc package. */
dcfdbac7
JB
788
789POINTER
790r_alloc_sbrk (size)
791 long size;
792{
e429caa2
KH
793 register bloc_ptr b;
794 POINTER address;
dcfdbac7 795
44d3dec0
RS
796 if (! r_alloc_initialized)
797 r_alloc_init ();
798
dcfdbac7 799 if (! use_relocatable_buffers)
bbc60227 800 return (*real_morecore) (size);
dcfdbac7 801
e429caa2
KH
802 if (size == 0)
803 return virtual_break_value;
7516b7d5 804
e429caa2 805 if (size > 0)
dcfdbac7 806 {
abe9ff32
RS
807 /* Allocate a page-aligned space. GNU malloc would reclaim an
808 extra space if we passed an unaligned one. But we could
8e6208c5 809 not always find a space which is contiguous to the previous. */
e429caa2
KH
810 POINTER new_bloc_start;
811 heap_ptr h = first_heap;
abe9ff32 812 SIZE get = ROUNDUP (size);
7516b7d5 813
abe9ff32 814 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 815
abe9ff32
RS
816 /* Search the list upward for a heap which is large enough. */
817 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
e429caa2
KH
818 {
819 h = h->next;
820 if (h == NIL_HEAP)
821 break;
abe9ff32 822 address = (POINTER) ROUNDUP (h->start);
e429caa2
KH
823 }
824
abe9ff32 825 /* If not found, obtain more space. */
e429caa2
KH
826 if (h == NIL_HEAP)
827 {
828 get += extra_bytes + page_size;
829
49f82b3d 830 if (! obtain (address, get))
e429caa2 831 return 0;
98b7fe02 832
e429caa2 833 if (first_heap == last_heap)
abe9ff32 834 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 835 else
abe9ff32 836 address = (POINTER) ROUNDUP (last_heap->start);
e429caa2
KH
837 h = last_heap;
838 }
839
abe9ff32 840 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
e429caa2
KH
841
842 if (first_heap->bloc_start < new_bloc_start)
843 {
49f82b3d
RS
844 /* This is no clean solution - no idea how to do it better. */
845 if (r_alloc_freeze_level)
846 return NIL;
847
848 /* There is a bug here: if the above obtain call succeeded, but the
849 relocate_blocs call below does not succeed, we need to free
850 the memory that we got with obtain. */
851
abe9ff32 852 /* Move all blocs upward. */
49f82b3d 853 if (! relocate_blocs (first_bloc, h, new_bloc_start))
e429caa2
KH
854 return 0;
855
856 /* Note that (POINTER)(h+1) <= new_bloc_start since
857 get >= page_size, so the following does not destroy the heap
abe9ff32 858 header. */
e429caa2
KH
859 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
860 {
861 safe_bcopy (b->data, b->new_data, b->size);
862 *b->variable = b->data = b->new_data;
863 }
864
865 h->bloc_start = new_bloc_start;
abe9ff32 866
47f13333 867 update_heap_bloc_correspondence (first_bloc, h);
e429caa2 868 }
e429caa2
KH
869 if (h != first_heap)
870 {
871 /* Give up managing heaps below the one the new
abe9ff32 872 virtual_break_value points to. */
e429caa2
KH
873 first_heap->prev = NIL_HEAP;
874 first_heap->next = h->next;
875 first_heap->start = h->start;
876 first_heap->end = h->end;
abe9ff32 877 first_heap->free = h->free;
47f13333
RS
878 first_heap->first_bloc = h->first_bloc;
879 first_heap->last_bloc = h->last_bloc;
e429caa2
KH
880 first_heap->bloc_start = h->bloc_start;
881
882 if (first_heap->next)
883 first_heap->next->prev = first_heap;
884 else
885 last_heap = first_heap;
886 }
887
888 bzero (address, size);
dcfdbac7 889 }
e429caa2 890 else /* size < 0 */
dcfdbac7 891 {
e429caa2
KH
892 SIZE excess = (char *)first_heap->bloc_start
893 - ((char *)virtual_break_value + size);
894
895 address = virtual_break_value;
896
897 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
898 {
899 excess -= extra_bytes;
900 first_heap->bloc_start
47f13333 901 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
e429caa2 902
abe9ff32 903 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
7516b7d5 904
e429caa2
KH
905 for (b = first_bloc; b != NIL_BLOC; b = b->next)
906 {
907 safe_bcopy (b->data, b->new_data, b->size);
908 *b->variable = b->data = b->new_data;
909 }
910 }
911
912 if ((char *)virtual_break_value + size < (char *)first_heap->start)
913 {
914 /* We found an additional space below the first heap */
915 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
916 }
dcfdbac7
JB
917 }
918
e429caa2 919 virtual_break_value = (POINTER) ((char *)address + size);
47f13333 920 break_value = (last_bloc
91a211b5
GM
921 ? (char *) last_bloc->data + last_bloc->size
922 : (char *) first_heap->bloc_start);
e429caa2 923 if (size < 0)
abe9ff32 924 relinquish ();
7516b7d5 925
e429caa2 926 return address;
dcfdbac7
JB
927}
928
0a58f946 929
dcfdbac7
JB
930/* Allocate a relocatable bloc of storage of size SIZE. A pointer to
931 the data is returned in *PTR. PTR is thus the address of some variable
98b7fe02
JB
932 which will use the data area.
933
49f82b3d
RS
934 The allocation of 0 bytes is valid.
935 In case r_alloc_freeze is set, a best fit of unused blocs could be done
936 before allocating a new area. Not yet done.
937
98b7fe02
JB
938 If we can't allocate the necessary memory, set *PTR to zero, and
939 return zero. */
dcfdbac7
JB
940
941POINTER
942r_alloc (ptr, size)
943 POINTER *ptr;
944 SIZE size;
945{
946 register bloc_ptr new_bloc;
947
2c46d29f
RS
948 if (! r_alloc_initialized)
949 r_alloc_init ();
950
abe9ff32 951 new_bloc = get_bloc (MEM_ROUNDUP (size));
98b7fe02
JB
952 if (new_bloc)
953 {
954 new_bloc->variable = ptr;
955 *ptr = new_bloc->data;
956 }
957 else
958 *ptr = 0;
dcfdbac7
JB
959
960 return *ptr;
961}
962
2c46d29f
RS
963/* Free a bloc of relocatable storage whose data is pointed to by PTR.
964 Store 0 in *PTR to show there's no block allocated. */
dcfdbac7
JB
965
966void
967r_alloc_free (ptr)
968 register POINTER *ptr;
969{
970 register bloc_ptr dead_bloc;
971
44d3dec0
RS
972 if (! r_alloc_initialized)
973 r_alloc_init ();
974
dcfdbac7
JB
975 dead_bloc = find_bloc (ptr);
976 if (dead_bloc == NIL_BLOC)
977 abort ();
978
979 free_bloc (dead_bloc);
2c46d29f 980 *ptr = 0;
719b242f 981
d5179acc 982#ifdef emacs
719b242f 983 refill_memory_reserve ();
d5179acc 984#endif
dcfdbac7
JB
985}
986
16a5c729 987/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
98b7fe02
JB
988 Do this by shifting all blocks above this one up in memory, unless
989 SIZE is less than or equal to the current bloc size, in which case
990 do nothing.
dcfdbac7 991
49f82b3d 992 In case r_alloc_freeze is set, a new bloc is allocated, and the
8e6208c5 993 memory copied to it. Not very efficient. We could traverse the
49f82b3d
RS
994 bloc_list for a best fit of free blocs first.
995
98b7fe02
JB
996 Change *PTR to reflect the new bloc, and return this value.
997
998 If more memory cannot be allocated, then leave *PTR unchanged, and
999 return zero. */
dcfdbac7
JB
1000
1001POINTER
1002r_re_alloc (ptr, size)
1003 POINTER *ptr;
1004 SIZE size;
1005{
16a5c729 1006 register bloc_ptr bloc;
dcfdbac7 1007
44d3dec0
RS
1008 if (! r_alloc_initialized)
1009 r_alloc_init ();
1010
49f82b3d
RS
1011 if (!*ptr)
1012 return r_alloc (ptr, size);
1013 if (!size)
1014 {
1015 r_alloc_free (ptr);
1016 return r_alloc (ptr, 0);
1017 }
1018
16a5c729
JB
1019 bloc = find_bloc (ptr);
1020 if (bloc == NIL_BLOC)
dcfdbac7
JB
1021 abort ();
1022
49f82b3d
RS
1023 if (size < bloc->size)
1024 {
1025 /* Wouldn't it be useful to actually resize the bloc here? */
1026 /* I think so too, but not if it's too expensive... */
1027 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1028 && r_alloc_freeze_level == 0)
1029 {
1030 resize_bloc (bloc, MEM_ROUNDUP (size));
1031 /* Never mind if this fails, just do nothing... */
1032 /* It *should* be infallible! */
1033 }
1034 }
1035 else if (size > bloc->size)
1036 {
1037 if (r_alloc_freeze_level)
1038 {
1039 bloc_ptr new_bloc;
1040 new_bloc = get_bloc (MEM_ROUNDUP (size));
1041 if (new_bloc)
1042 {
1043 new_bloc->variable = ptr;
1044 *ptr = new_bloc->data;
1045 bloc->variable = (POINTER *) NIL;
1046 }
1047 else
1048 return NIL;
1049 }
1050 else
1051 {
1052 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1053 return NIL;
1054 }
1055 }
dcfdbac7
JB
1056 return *ptr;
1057}
81bd58e8
KH
1058
1059/* Disable relocations, after making room for at least SIZE bytes
1060 of non-relocatable heap if possible. The relocatable blocs are
1061 guaranteed to hold still until thawed, even if this means that
1062 malloc must return a null pointer. */
abe9ff32 1063
81bd58e8
KH
1064void
1065r_alloc_freeze (size)
1066 long size;
1067{
44d3dec0
RS
1068 if (! r_alloc_initialized)
1069 r_alloc_init ();
1070
81bd58e8
KH
1071 /* If already frozen, we can't make any more room, so don't try. */
1072 if (r_alloc_freeze_level > 0)
1073 size = 0;
1074 /* If we can't get the amount requested, half is better than nothing. */
1075 while (size > 0 && r_alloc_sbrk (size) == 0)
1076 size /= 2;
1077 ++r_alloc_freeze_level;
1078 if (size > 0)
1079 r_alloc_sbrk (-size);
1080}
1081
1082void
1083r_alloc_thaw ()
1084{
49f82b3d
RS
1085
1086 if (! r_alloc_initialized)
1087 r_alloc_init ();
1088
81bd58e8
KH
1089 if (--r_alloc_freeze_level < 0)
1090 abort ();
49f82b3d 1091
8e6208c5 1092 /* This frees all unused blocs. It is not too inefficient, as the resize
49f82b3d
RS
1093 and bcopy is done only once. Afterwards, all unreferenced blocs are
1094 already shrunk to zero size. */
1095 if (!r_alloc_freeze_level)
1096 {
1097 bloc_ptr *b = &first_bloc;
1098 while (*b)
1099 if (!(*b)->variable)
1100 free_bloc (*b);
1101 else
1102 b = &(*b)->next;
1103 }
81bd58e8 1104}
49f82b3d 1105
dec41418
RS
1106
1107#if defined (emacs) && defined (DOUG_LEA_MALLOC)
1108
1109/* Reinitialize the morecore hook variables after restarting a dumped
1110 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1111void
1112r_alloc_reinit ()
1113{
1114 /* Only do this if the hook has been reset, so that we don't get an
1115 infinite loop, in case Emacs was linked statically. */
1116 if (__morecore != r_alloc_sbrk)
1117 {
1118 real_morecore = __morecore;
1119 __morecore = r_alloc_sbrk;
1120 }
1121}
0a58f946
GM
1122
1123#endif /* emacs && DOUG_LEA_MALLOC */
dec41418 1124
e429caa2 1125#ifdef DEBUG
0a58f946 1126
e429caa2
KH
1127#include <assert.h>
1128
44d3dec0 1129void
e429caa2
KH
1130r_alloc_check ()
1131{
6d16dd06
RS
1132 int found = 0;
1133 heap_ptr h, ph = 0;
1134 bloc_ptr b, pb = 0;
1135
1136 if (!r_alloc_initialized)
1137 return;
1138
1139 assert (first_heap);
1140 assert (last_heap->end <= (POINTER) sbrk (0));
1141 assert ((POINTER) first_heap < first_heap->start);
1142 assert (first_heap->start <= virtual_break_value);
1143 assert (virtual_break_value <= first_heap->end);
1144
1145 for (h = first_heap; h; h = h->next)
1146 {
1147 assert (h->prev == ph);
1148 assert ((POINTER) ROUNDUP (h->end) == h->end);
40f3f04b
RS
1149#if 0 /* ??? The code in ralloc.c does not really try to ensure
1150 the heap start has any sort of alignment.
1151 Perhaps it should. */
6d16dd06 1152 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
40f3f04b 1153#endif
6d16dd06
RS
1154 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1155 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1156
1157 if (ph)
1158 {
1159 assert (ph->end < h->start);
1160 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1161 }
1162
1163 if (h->bloc_start <= break_value && break_value <= h->end)
1164 found = 1;
1165
1166 ph = h;
1167 }
1168
1169 assert (found);
1170 assert (last_heap == ph);
1171
1172 for (b = first_bloc; b; b = b->next)
1173 {
1174 assert (b->prev == pb);
1175 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1176 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1177
1178 ph = 0;
1179 for (h = first_heap; h; h = h->next)
1180 {
1181 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1182 break;
1183 ph = h;
1184 }
1185
1186 assert (h);
1187
1188 if (pb && pb->data + pb->size != b->data)
1189 {
1190 assert (ph && b->data == h->bloc_start);
1191 while (ph)
1192 {
1193 if (ph->bloc_start <= pb->data
1194 && pb->data + pb->size <= ph->end)
1195 {
1196 assert (pb->data + pb->size + b->size > ph->end);
1197 break;
1198 }
1199 else
1200 {
1201 assert (ph->bloc_start + b->size > ph->end);
1202 }
1203 ph = ph->prev;
1204 }
1205 }
1206 pb = b;
1207 }
1208
1209 assert (last_bloc == pb);
1210
1211 if (last_bloc)
1212 assert (last_bloc->data + last_bloc->size == break_value);
1213 else
1214 assert (first_heap->bloc_start == break_value);
e429caa2 1215}
0a58f946 1216
e429caa2 1217#endif /* DEBUG */
0a58f946 1218
0a58f946
GM
1219
1220\f
1221/***********************************************************************
1222 Initialization
1223 ***********************************************************************/
1224
1225/* The hook `malloc' uses for the function which gets more space
1226 from the system. */
1227
a2c23c92 1228#ifndef SYSTEM_MALLOC
0a58f946 1229extern POINTER (*__morecore) ();
a2c23c92 1230#endif
0a58f946 1231
d93c3186 1232
0a58f946
GM
1233/* Initialize various things for memory allocation. */
1234
1235static void
1236r_alloc_init ()
1237{
1238 if (r_alloc_initialized)
1239 return;
0a58f946 1240 r_alloc_initialized = 1;
d1a02881 1241
a2c23c92
DL
1242 page_size = PAGE;
1243#ifndef SYSTEM_MALLOC
0a58f946
GM
1244 real_morecore = __morecore;
1245 __morecore = r_alloc_sbrk;
1246
1247 first_heap = last_heap = &heap_base;
1248 first_heap->next = first_heap->prev = NIL_HEAP;
1249 first_heap->start = first_heap->bloc_start
1250 = virtual_break_value = break_value = (*real_morecore) (0);
1251 if (break_value == NIL)
1252 abort ();
1253
0a58f946 1254 extra_bytes = ROUNDUP (50000);
a2c23c92 1255#endif
0a58f946
GM
1256
1257#ifdef DOUG_LEA_MALLOC
1258 mallopt (M_TOP_PAD, 64 * 4096);
1259#else
a2c23c92 1260#ifndef SYSTEM_MALLOC
0a58f946
GM
1261 /* Give GNU malloc's morecore some hysteresis
1262 so that we move all the relocatable blocks much less often. */
1263 __malloc_extra_blocks = 64;
1264#endif
a2c23c92 1265#endif
0a58f946 1266
5ad25b24 1267#ifndef SYSTEM_MALLOC
0a58f946
GM
1268 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1269
1270 /* The extra call to real_morecore guarantees that the end of the
1271 address space is a multiple of page_size, even if page_size is
1272 not really the page size of the system running the binary in
1273 which page_size is stored. This allows a binary to be built on a
1274 system with one page size and run on a system with a smaller page
1275 size. */
91a211b5 1276 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
0a58f946
GM
1277
1278 /* Clear the rest of the last page; this memory is in our address space
1279 even though it is after the sbrk value. */
1280 /* Doubly true, with the additional call that explicitly adds the
1281 rest of that page to the address space. */
91a211b5
GM
1282 bzero (first_heap->start,
1283 (char *) first_heap->end - (char *) first_heap->start);
0a58f946 1284 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
a2c23c92 1285#endif
d1a02881 1286
0a58f946
GM
1287 use_relocatable_buffers = 1;
1288}