merge trunk
[bpt/emacs.git] / src / ralloc.c
CommitLineData
177c0ea7 1/* Block-relocating memory allocator.
ab422c4d 2 Copyright (C) 1993, 1995, 2000-2013 Free Software Foundation, Inc.
dcfdbac7
JB
3
4This file is part of GNU Emacs.
5
9ec0b715 6GNU Emacs is free software: you can redistribute it and/or modify
dcfdbac7 7it under the terms of the GNU General Public License as published by
9ec0b715
GM
8the Free Software Foundation, either version 3 of the License, or
9(at your option) any later version.
dcfdbac7
JB
10
11GNU Emacs is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
9ec0b715 17along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
dcfdbac7
JB
18
19/* NOTES:
20
eb8c3be9 21 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
dcfdbac7 22 rather than all of them. This means allowing for a possible
abe9ff32 23 hole between the first bloc and the end of malloc storage. */
dcfdbac7 24
2c46d29f 25#ifdef emacs
aef4d570 26
18160b98 27#include <config.h>
0328b6de 28
956ace37 29#include "lisp.h" /* Needed for VALBITS. */
a4766fd5 30#include "blockinput.h"
0a58f946 31
642a1733 32#include <unistd.h>
a8c0e5ea 33
b0119c68 34#ifdef DOUG_LEA_MALLOC
177c0ea7 35#define M_TOP_PAD -2
971de7fb 36extern int mallopt (int, int);
0a58f946 37#else /* not DOUG_LEA_MALLOC */
a2c23c92 38#ifndef SYSTEM_MALLOC
b1685c5f 39extern size_t __malloc_extra_blocks;
a2c23c92 40#endif /* SYSTEM_MALLOC */
0a58f946 41#endif /* not DOUG_LEA_MALLOC */
49081834 42
d5179acc 43#else /* not emacs */
aef4d570 44
2c46d29f 45#include <stddef.h>
aef4d570 46
aef4d570
RM
47#include <unistd.h>
48#include <malloc.h>
aef4d570 49
d5179acc 50#endif /* not emacs */
2c46d29f 51
0a58f946 52
d5179acc 53#include "getpagesize.h"
dcfdbac7 54
2c46d29f
RS
55/* A flag to indicate whether we have initialized ralloc yet. For
56 Emacs's sake, please do not make this local to malloc_init; on some
57 machines, the dumping procedure makes all static variables
58 read-only. On these machines, the word static is #defined to be
59 the empty string, meaning that r_alloc_initialized becomes an
0a58f946
GM
60 automatic variable, and loses its value each time Emacs is started
61 up. */
62
2c46d29f
RS
63static int r_alloc_initialized = 0;
64
971de7fb 65static void r_alloc_init (void);
0a58f946 66
dcfdbac7 67\f
956ace37
JB
68/* Declarations for working with the malloc, ralloc, and system breaks. */
69
abe9ff32 70/* Function to set the real break value. */
fcee5028 71void *(*real_morecore) (ptrdiff_t);
dcfdbac7 72
abe9ff32 73/* The break value, as seen by malloc. */
fcee5028 74static void *virtual_break_value;
dcfdbac7 75
abe9ff32
RS
76/* The address of the end of the last data in use by ralloc,
77 including relocatable blocs as well as malloc data. */
fcee5028 78static void *break_value;
dcfdbac7 79
7516b7d5
RS
80/* This is the size of a page. We round memory requests to this boundary. */
81static int page_size;
82
177c0ea7 83/* Whenever we get memory from the system, get this many extra bytes. This
ad3bb3d2 84 must be a multiple of page_size. */
7516b7d5
RS
85static int extra_bytes;
86
dcfdbac7 87/* Macros for rounding. Note that rounding to any value is possible
abe9ff32 88 by changing the definition of PAGE. */
dcfdbac7 89#define PAGE (getpagesize ())
62aba0d4 90#define ROUNDUP(size) (((size_t) (size) + page_size - 1) \
fcee5028 91 & ~((size_t) (page_size - 1)))
e429caa2 92
5e617bc2 93#define MEM_ALIGN sizeof (double)
fcee5028 94#define MEM_ROUNDUP(addr) (((size_t) (addr) + MEM_ALIGN - 1) \
2d7d1608 95 & ~(MEM_ALIGN - 1))
0a58f946 96
aeac019e
GM
97/* The hook `malloc' uses for the function which gets more space
98 from the system. */
99
100#ifndef SYSTEM_MALLOC
fcee5028 101extern void *(*__morecore) (ptrdiff_t);
aeac019e
GM
102#endif
103
104
e429caa2 105\f
0a58f946
GM
106/***********************************************************************
107 Implementation using sbrk
108 ***********************************************************************/
109
abe9ff32
RS
110/* Data structures of heaps and blocs. */
111
112/* The relocatable objects, or blocs, and the malloc data
113 both reside within one or more heaps.
114 Each heap contains malloc data, running from `start' to `bloc_start',
115 and relocatable objects, running from `bloc_start' to `free'.
116
117 Relocatable objects may relocate within the same heap
118 or may move into another heap; the heaps themselves may grow
119 but they never move.
120
121 We try to make just one heap and make it larger as necessary.
8e6208c5 122 But sometimes we can't do that, because we can't get contiguous
abe9ff32 123 space to add onto the heap. When that happens, we start a new heap. */
177c0ea7 124
e429caa2
KH
125typedef struct heap
126{
127 struct heap *next;
128 struct heap *prev;
abe9ff32 129 /* Start of memory range of this heap. */
fcee5028 130 void *start;
abe9ff32 131 /* End of memory range of this heap. */
fcee5028 132 void *end;
abe9ff32 133 /* Start of relocatable data in this heap. */
fcee5028 134 void *bloc_start;
abe9ff32 135 /* Start of unused space in this heap. */
fcee5028 136 void *free;
47f13333
RS
137 /* First bloc in this heap. */
138 struct bp *first_bloc;
139 /* Last bloc in this heap. */
140 struct bp *last_bloc;
e429caa2
KH
141} *heap_ptr;
142
143#define NIL_HEAP ((heap_ptr) 0)
e429caa2 144
abe9ff32
RS
145/* This is the first heap object.
146 If we need additional heap objects, each one resides at the beginning of
147 the space it covers. */
148static struct heap heap_base;
149
150/* Head and tail of the list of heaps. */
e429caa2
KH
151static heap_ptr first_heap, last_heap;
152
153/* These structures are allocated in the malloc arena.
154 The linked list is kept in order of increasing '.data' members.
155 The data blocks abut each other; if b->next is non-nil, then
177c0ea7 156 b->data + b->size == b->next->data.
49f82b3d 157
fcee5028 158 An element with variable==NULL denotes a freed block, which has not yet
f96f2c5b
JB
159 been collected. They may only appear while r_alloc_freeze_level > 0,
160 and will be freed when the arena is thawed. Currently, these blocs are
161 not reusable, while the arena is frozen. Very inefficient. */
49f82b3d 162
e429caa2
KH
163typedef struct bp
164{
165 struct bp *next;
166 struct bp *prev;
fcee5028
PE
167 void **variable;
168 void *data;
169 size_t size;
170 void *new_data; /* temporarily used for relocation */
49f82b3d 171 struct heap *heap; /* Heap this bloc is in. */
e429caa2
KH
172} *bloc_ptr;
173
174#define NIL_BLOC ((bloc_ptr) 0)
175#define BLOC_PTR_SIZE (sizeof (struct bp))
176
abe9ff32 177/* Head and tail of the list of relocatable blocs. */
e429caa2
KH
178static bloc_ptr first_bloc, last_bloc;
179
49f82b3d
RS
180static int use_relocatable_buffers;
181
182/* If >0, no relocation whatsoever takes place. */
183static int r_alloc_freeze_level;
184
dcfdbac7 185\f
956ace37
JB
186/* Functions to get and return memory from the system. */
187
abe9ff32
RS
188/* Find the heap that ADDRESS falls within. */
189
190static heap_ptr
fcee5028 191find_heap (void *address)
abe9ff32
RS
192{
193 heap_ptr heap;
194
195 for (heap = last_heap; heap; heap = heap->prev)
196 {
197 if (heap->start <= address && address <= heap->end)
198 return heap;
199 }
200
201 return NIL_HEAP;
202}
203
204/* Find SIZE bytes of space in a heap.
205 Try to get them at ADDRESS (which must fall within some heap's range)
206 if we can get that many within one heap.
207
e429caa2 208 If enough space is not presently available in our reserve, this means
8e6208c5
KH
209 getting more page-aligned space from the system. If the returned space
210 is not contiguous to the last heap, allocate a new heap, and append it
0d26e0b6 211 to the heap list.
abe9ff32 212
0d26e0b6
JB
213 obtain does not try to keep track of whether space is in use or not
214 in use. It just returns the address of SIZE bytes that fall within a
215 single heap. If you call obtain twice in a row with the same arguments,
216 you typically get the same value. It's the caller's responsibility to
217 keep track of what space is in use.
dcfdbac7 218
e429caa2
KH
219 Return the address of the space if all went well, or zero if we couldn't
220 allocate the memory. */
abe9ff32 221
fcee5028
PE
222static void *
223obtain (void *address, size_t size)
dcfdbac7 224{
e429caa2 225 heap_ptr heap;
fcee5028 226 size_t already_available;
dcfdbac7 227
abe9ff32 228 /* Find the heap that ADDRESS falls within. */
e429caa2 229 for (heap = last_heap; heap; heap = heap->prev)
dcfdbac7 230 {
e429caa2
KH
231 if (heap->start <= address && address <= heap->end)
232 break;
233 }
dcfdbac7 234
e429caa2 235 if (! heap)
1088b922 236 emacs_abort ();
dcfdbac7 237
abe9ff32
RS
238 /* If we can't fit SIZE bytes in that heap,
239 try successive later heaps. */
91a211b5 240 while (heap && (char *) address + size > (char *) heap->end)
e429caa2
KH
241 {
242 heap = heap->next;
243 if (heap == NIL_HEAP)
244 break;
245 address = heap->bloc_start;
dcfdbac7
JB
246 }
247
abe9ff32
RS
248 /* If we can't fit them within any existing heap,
249 get more space. */
e429caa2
KH
250 if (heap == NIL_HEAP)
251 {
fcee5028
PE
252 void *new = real_morecore (0);
253 size_t get;
98b7fe02 254
fcee5028 255 already_available = (char *) last_heap->end - (char *) address;
dcfdbac7 256
e429caa2
KH
257 if (new != last_heap->end)
258 {
abe9ff32
RS
259 /* Someone else called sbrk. Make a new heap. */
260
261 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
fcee5028 262 void *bloc_start = (void *) MEM_ROUNDUP ((void *) (new_heap + 1));
e429caa2 263
fcee5028 264 if (real_morecore ((char *) bloc_start - (char *) new) != new)
e429caa2
KH
265 return 0;
266
267 new_heap->start = new;
268 new_heap->end = bloc_start;
269 new_heap->bloc_start = bloc_start;
abe9ff32 270 new_heap->free = bloc_start;
e429caa2
KH
271 new_heap->next = NIL_HEAP;
272 new_heap->prev = last_heap;
47f13333
RS
273 new_heap->first_bloc = NIL_BLOC;
274 new_heap->last_bloc = NIL_BLOC;
e429caa2
KH
275 last_heap->next = new_heap;
276 last_heap = new_heap;
277
278 address = bloc_start;
279 already_available = 0;
280 }
dcfdbac7 281
abe9ff32
RS
282 /* Add space to the last heap (which we may have just created).
283 Get some extra, so we can come here less often. */
284
e429caa2 285 get = size + extra_bytes - already_available;
fcee5028 286 get = (char *) ROUNDUP ((char *) last_heap->end + get)
e429caa2 287 - (char *) last_heap->end;
dcfdbac7 288
fcee5028 289 if (real_morecore (get) != last_heap->end)
e429caa2
KH
290 return 0;
291
91a211b5 292 last_heap->end = (char *) last_heap->end + get;
e429caa2
KH
293 }
294
295 return address;
296}
dcfdbac7 297
abe9ff32
RS
298/* Return unused heap space to the system
299 if there is a lot of unused space now.
300 This can make the last heap smaller;
301 it can also eliminate the last heap entirely. */
302
dcfdbac7 303static void
971de7fb 304relinquish (void)
dcfdbac7 305{
e429caa2 306 register heap_ptr h;
62aba0d4 307 ptrdiff_t excess = 0;
e429caa2 308
abe9ff32
RS
309 /* Add the amount of space beyond break_value
310 in all heaps which have extend beyond break_value at all. */
311
e429caa2
KH
312 for (h = last_heap; h && break_value < h->end; h = h->prev)
313 {
314 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
315 ? h->bloc_start : break_value);
316 }
317
fcee5028 318 if (excess > extra_bytes * 2 && real_morecore (0) == last_heap->end)
dcfdbac7 319 {
7516b7d5
RS
320 /* Keep extra_bytes worth of empty space.
321 And don't free anything unless we can free at least extra_bytes. */
e429caa2 322 excess -= extra_bytes;
dcfdbac7 323
fcee5028 324 if ((char *) last_heap->end - (char *) last_heap->bloc_start <= excess)
e429caa2 325 {
508f51f5
EZ
326 heap_ptr lh_prev;
327
98daa893
EZ
328 /* This heap should have no blocs in it. If it does, we
329 cannot return it to the system. */
47f13333
RS
330 if (last_heap->first_bloc != NIL_BLOC
331 || last_heap->last_bloc != NIL_BLOC)
98daa893 332 return;
47f13333 333
abe9ff32 334 /* Return the last heap, with its header, to the system. */
fcee5028 335 excess = (char *) last_heap->end - (char *) last_heap->start;
508f51f5
EZ
336 lh_prev = last_heap->prev;
337 /* If the system doesn't want that much memory back, leave
338 last_heap unaltered to reflect that. This can occur if
339 break_value is still within the original data segment. */
fcee5028 340 if (real_morecore (- excess) != 0)
508f51f5
EZ
341 {
342 last_heap = lh_prev;
343 last_heap->next = NIL_HEAP;
344 }
e429caa2
KH
345 }
346 else
347 {
fcee5028
PE
348 excess = ((char *) last_heap->end
349 - (char *) ROUNDUP ((char *) last_heap->end - excess));
508f51f5
EZ
350 /* If the system doesn't want that much memory back, leave
351 the end of the last heap unchanged to reflect that. This
352 can occur if break_value is still within the original
353 data segment. */
fcee5028 354 if (real_morecore (- excess) != 0)
508f51f5 355 last_heap->end = (char *) last_heap->end - excess;
21532667 356 }
e429caa2 357 }
dcfdbac7
JB
358}
359\f
956ace37
JB
360/* The meat - allocating, freeing, and relocating blocs. */
361
956ace37 362/* Find the bloc referenced by the address in PTR. Returns a pointer
abe9ff32 363 to that block. */
dcfdbac7
JB
364
365static bloc_ptr
fcee5028 366find_bloc (void **ptr)
dcfdbac7 367{
fcee5028 368 bloc_ptr p = first_bloc;
dcfdbac7
JB
369
370 while (p != NIL_BLOC)
371 {
747d9d14 372 /* Consistency check. Don't return inconsistent blocs.
0d26e0b6 373 Don't abort here, as callers might be expecting this, but
747d9d14
JR
374 callers that always expect a bloc to be returned should abort
375 if one isn't to avoid a memory corruption bug that is
376 difficult to track down. */
dcfdbac7
JB
377 if (p->variable == ptr && p->data == *ptr)
378 return p;
379
380 p = p->next;
381 }
382
383 return p;
384}
385
386/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
98b7fe02
JB
387 Returns a pointer to the new bloc, or zero if we couldn't allocate
388 memory for the new block. */
dcfdbac7
JB
389
390static bloc_ptr
fcee5028 391get_bloc (size_t size)
dcfdbac7 392{
fcee5028
PE
393 bloc_ptr new_bloc;
394 heap_ptr heap;
98b7fe02 395
38182d90 396 if (! (new_bloc = malloc (BLOC_PTR_SIZE))
e429caa2 397 || ! (new_bloc->data = obtain (break_value, size)))
98b7fe02 398 {
c2cd06e6 399 free (new_bloc);
98b7fe02
JB
400
401 return 0;
402 }
dcfdbac7 403
91a211b5 404 break_value = (char *) new_bloc->data + size;
e429caa2 405
dcfdbac7
JB
406 new_bloc->size = size;
407 new_bloc->next = NIL_BLOC;
fcee5028 408 new_bloc->variable = NULL;
e429caa2 409 new_bloc->new_data = 0;
dcfdbac7 410
abe9ff32
RS
411 /* Record in the heap that this space is in use. */
412 heap = find_heap (new_bloc->data);
413 heap->free = break_value;
414
47f13333
RS
415 /* Maintain the correspondence between heaps and blocs. */
416 new_bloc->heap = heap;
417 heap->last_bloc = new_bloc;
418 if (heap->first_bloc == NIL_BLOC)
419 heap->first_bloc = new_bloc;
420
abe9ff32 421 /* Put this bloc on the doubly-linked list of blocs. */
dcfdbac7
JB
422 if (first_bloc)
423 {
424 new_bloc->prev = last_bloc;
425 last_bloc->next = new_bloc;
426 last_bloc = new_bloc;
427 }
428 else
429 {
430 first_bloc = last_bloc = new_bloc;
431 new_bloc->prev = NIL_BLOC;
432 }
433
434 return new_bloc;
435}
47f13333 436\f
abe9ff32
RS
437/* Calculate new locations of blocs in the list beginning with BLOC,
438 relocating it to start at ADDRESS, in heap HEAP. If enough space is
439 not presently available in our reserve, call obtain for
177c0ea7
JB
440 more space.
441
abe9ff32
RS
442 Store the new location of each bloc in its new_data field.
443 Do not touch the contents of blocs or break_value. */
dcfdbac7 444
e429caa2 445static int
fcee5028 446relocate_blocs (bloc_ptr bloc, heap_ptr heap, void *address)
e429caa2 447{
fcee5028 448 bloc_ptr b = bloc;
ad3bb3d2 449
49f82b3d 450 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 451 if (r_alloc_freeze_level)
1088b922 452 emacs_abort ();
49f82b3d 453
e429caa2
KH
454 while (b)
455 {
abe9ff32
RS
456 /* If bloc B won't fit within HEAP,
457 move to the next heap and try again. */
91a211b5 458 while (heap && (char *) address + b->size > (char *) heap->end)
e429caa2
KH
459 {
460 heap = heap->next;
461 if (heap == NIL_HEAP)
462 break;
463 address = heap->bloc_start;
464 }
dcfdbac7 465
abe9ff32
RS
466 /* If BLOC won't fit in any heap,
467 get enough new space to hold BLOC and all following blocs. */
e429caa2
KH
468 if (heap == NIL_HEAP)
469 {
fcee5028
PE
470 bloc_ptr tb = b;
471 size_t s = 0;
e429caa2 472
abe9ff32 473 /* Add up the size of all the following blocs. */
e429caa2
KH
474 while (tb != NIL_BLOC)
475 {
177c0ea7 476 if (tb->variable)
49f82b3d
RS
477 s += tb->size;
478
e429caa2
KH
479 tb = tb->next;
480 }
481
abe9ff32
RS
482 /* Get that space. */
483 address = obtain (address, s);
484 if (address == 0)
e429caa2
KH
485 return 0;
486
487 heap = last_heap;
488 }
489
abe9ff32
RS
490 /* Record the new address of this bloc
491 and update where the next bloc can start. */
e429caa2 492 b->new_data = address;
177c0ea7 493 if (b->variable)
91a211b5 494 address = (char *) address + b->size;
e429caa2
KH
495 b = b->next;
496 }
497
498 return 1;
499}
47f13333
RS
500\f
501/* Update the records of which heaps contain which blocs, starting
502 with heap HEAP and bloc BLOC. */
503
504static void
971de7fb 505update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
abe9ff32
RS
506{
507 register bloc_ptr b;
508
47f13333
RS
509 /* Initialize HEAP's status to reflect blocs before BLOC. */
510 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
511 {
512 /* The previous bloc is in HEAP. */
513 heap->last_bloc = bloc->prev;
91a211b5 514 heap->free = (char *) bloc->prev->data + bloc->prev->size;
47f13333
RS
515 }
516 else
517 {
518 /* HEAP contains no blocs before BLOC. */
519 heap->first_bloc = NIL_BLOC;
520 heap->last_bloc = NIL_BLOC;
521 heap->free = heap->bloc_start;
522 }
523
abe9ff32
RS
524 /* Advance through blocs one by one. */
525 for (b = bloc; b != NIL_BLOC; b = b->next)
526 {
47f13333
RS
527 /* Advance through heaps, marking them empty,
528 till we get to the one that B is in. */
abe9ff32
RS
529 while (heap)
530 {
531 if (heap->bloc_start <= b->data && b->data <= heap->end)
532 break;
533 heap = heap->next;
47f13333
RS
534 /* We know HEAP is not null now,
535 because there has to be space for bloc B. */
536 heap->first_bloc = NIL_BLOC;
537 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
538 heap->free = heap->bloc_start;
539 }
47f13333
RS
540
541 /* Update HEAP's status for bloc B. */
91a211b5 542 heap->free = (char *) b->data + b->size;
47f13333
RS
543 heap->last_bloc = b;
544 if (heap->first_bloc == NIL_BLOC)
545 heap->first_bloc = b;
546
547 /* Record that B is in HEAP. */
548 b->heap = heap;
abe9ff32
RS
549 }
550
551 /* If there are any remaining heaps and no blocs left,
47f13333 552 mark those heaps as empty. */
abe9ff32
RS
553 heap = heap->next;
554 while (heap)
555 {
47f13333
RS
556 heap->first_bloc = NIL_BLOC;
557 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
558 heap->free = heap->bloc_start;
559 heap = heap->next;
560 }
561}
47f13333 562\f
abe9ff32
RS
563/* Resize BLOC to SIZE bytes. This relocates the blocs
564 that come after BLOC in memory. */
565
e429caa2 566static int
fcee5028 567resize_bloc (bloc_ptr bloc, size_t size)
dcfdbac7 568{
fcee5028 569 bloc_ptr b;
e429caa2 570 heap_ptr heap;
fcee5028
PE
571 void *address;
572 size_t old_size;
e429caa2 573
49f82b3d 574 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 575 if (r_alloc_freeze_level)
1088b922 576 emacs_abort ();
49f82b3d 577
e429caa2
KH
578 if (bloc == NIL_BLOC || size == bloc->size)
579 return 1;
580
581 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
582 {
583 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
584 break;
585 }
586
587 if (heap == NIL_HEAP)
1088b922 588 emacs_abort ();
e429caa2
KH
589
590 old_size = bloc->size;
591 bloc->size = size;
592
abe9ff32 593 /* Note that bloc could be moved into the previous heap. */
91a211b5
GM
594 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
595 : (char *) first_heap->bloc_start);
e429caa2
KH
596 while (heap)
597 {
598 if (heap->bloc_start <= address && address <= heap->end)
599 break;
600 heap = heap->prev;
601 }
602
603 if (! relocate_blocs (bloc, heap, address))
604 {
605 bloc->size = old_size;
606 return 0;
607 }
608
609 if (size > old_size)
610 {
611 for (b = last_bloc; b != bloc; b = b->prev)
612 {
49f82b3d
RS
613 if (!b->variable)
614 {
615 b->size = 0;
616 b->data = b->new_data;
177c0ea7
JB
617 }
618 else
49f82b3d 619 {
78cef877
EZ
620 if (b->new_data != b->data)
621 memmove (b->new_data, b->data, b->size);
49f82b3d
RS
622 *b->variable = b->data = b->new_data;
623 }
624 }
625 if (!bloc->variable)
626 {
627 bloc->size = 0;
628 bloc->data = bloc->new_data;
629 }
630 else
631 {
78cef877
EZ
632 if (bloc->new_data != bloc->data)
633 memmove (bloc->new_data, bloc->data, old_size);
3ce2f8ac 634 memset ((char *) bloc->new_data + old_size, 0, size - old_size);
49f82b3d 635 *bloc->variable = bloc->data = bloc->new_data;
e429caa2 636 }
e429caa2
KH
637 }
638 else
dcfdbac7 639 {
ad3bb3d2
JB
640 for (b = bloc; b != NIL_BLOC; b = b->next)
641 {
49f82b3d
RS
642 if (!b->variable)
643 {
644 b->size = 0;
645 b->data = b->new_data;
177c0ea7
JB
646 }
647 else
49f82b3d 648 {
78cef877
EZ
649 if (b->new_data != b->data)
650 memmove (b->new_data, b->data, b->size);
49f82b3d
RS
651 *b->variable = b->data = b->new_data;
652 }
ad3bb3d2 653 }
ad3bb3d2 654 }
dcfdbac7 655
47f13333 656 update_heap_bloc_correspondence (bloc, heap);
abe9ff32 657
91a211b5
GM
658 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
659 : (char *) first_heap->bloc_start);
e429caa2
KH
660 return 1;
661}
47f13333 662\f
abe9ff32
RS
663/* Free BLOC from the chain of blocs, relocating any blocs above it.
664 This may return space to the system. */
dcfdbac7
JB
665
666static void
971de7fb 667free_bloc (bloc_ptr bloc)
dcfdbac7 668{
47f13333 669 heap_ptr heap = bloc->heap;
36c46f8e 670 heap_ptr h;
47f13333 671
49f82b3d
RS
672 if (r_alloc_freeze_level)
673 {
fcee5028 674 bloc->variable = NULL;
49f82b3d
RS
675 return;
676 }
177c0ea7 677
e429caa2
KH
678 resize_bloc (bloc, 0);
679
dcfdbac7
JB
680 if (bloc == first_bloc && bloc == last_bloc)
681 {
682 first_bloc = last_bloc = NIL_BLOC;
683 }
684 else if (bloc == last_bloc)
685 {
686 last_bloc = bloc->prev;
687 last_bloc->next = NIL_BLOC;
688 }
689 else if (bloc == first_bloc)
690 {
691 first_bloc = bloc->next;
692 first_bloc->prev = NIL_BLOC;
dcfdbac7
JB
693 }
694 else
695 {
696 bloc->next->prev = bloc->prev;
697 bloc->prev->next = bloc->next;
dcfdbac7
JB
698 }
699
36c46f8e
EZ
700 /* Sometimes, 'heap' obtained from bloc->heap above is not really a
701 'heap' structure. It can even be beyond the current break point,
702 which will cause crashes when we dereference it below (see
703 bug#12242). Evidently, the reason is bloc allocations done while
704 use_relocatable_buffers was non-positive, because additional
705 memory we get then is not recorded in the heaps we manage. If
706 bloc->heap records such a "heap", we cannot (and don't need to)
707 update its records. So we validate the 'heap' value by making
708 sure it is one of the heaps we manage via the heaps linked list,
709 and don't touch a 'heap' that isn't found there. This avoids
710 accessing memory we know nothing about. */
711 for (h = first_heap; h != NIL_HEAP; h = h->next)
712 if (heap == h)
713 break;
714
715 if (h)
47f13333 716 {
36c46f8e
EZ
717 /* Update the records of which blocs are in HEAP. */
718 if (heap->first_bloc == bloc)
719 {
720 if (bloc->next != 0 && bloc->next->heap == heap)
721 heap->first_bloc = bloc->next;
722 else
723 heap->first_bloc = heap->last_bloc = NIL_BLOC;
724 }
725 if (heap->last_bloc == bloc)
726 {
727 if (bloc->prev != 0 && bloc->prev->heap == heap)
728 heap->last_bloc = bloc->prev;
729 else
730 heap->first_bloc = heap->last_bloc = NIL_BLOC;
731 }
47f13333
RS
732 }
733
e429caa2 734 relinquish ();
dcfdbac7
JB
735 free (bloc);
736}
737\f
956ace37
JB
738/* Interface routines. */
739
98b7fe02 740/* Obtain SIZE bytes of storage from the free pool, or the system, as
2c46d29f 741 necessary. If relocatable blocs are in use, this means relocating
98b7fe02
JB
742 them. This function gets plugged into the GNU malloc's __morecore
743 hook.
744
7516b7d5
RS
745 We provide hysteresis, never relocating by less than extra_bytes.
746
98b7fe02
JB
747 If we're out of memory, we should return zero, to imitate the other
748 __morecore hook values - in particular, __default_morecore in the
749 GNU malloc package. */
dcfdbac7 750
fcee5028 751static void *
62aba0d4 752r_alloc_sbrk (ptrdiff_t size)
dcfdbac7 753{
fcee5028
PE
754 bloc_ptr b;
755 void *address;
dcfdbac7 756
44d3dec0
RS
757 if (! r_alloc_initialized)
758 r_alloc_init ();
759
e8a02204 760 if (use_relocatable_buffers <= 0)
fcee5028 761 return real_morecore (size);
dcfdbac7 762
e429caa2
KH
763 if (size == 0)
764 return virtual_break_value;
7516b7d5 765
e429caa2 766 if (size > 0)
dcfdbac7 767 {
abe9ff32
RS
768 /* Allocate a page-aligned space. GNU malloc would reclaim an
769 extra space if we passed an unaligned one. But we could
8e6208c5 770 not always find a space which is contiguous to the previous. */
fcee5028 771 void *new_bloc_start;
e429caa2 772 heap_ptr h = first_heap;
fcee5028 773 size_t get = ROUNDUP (size);
7516b7d5 774
fcee5028 775 address = (void *) ROUNDUP (virtual_break_value);
e429caa2 776
abe9ff32 777 /* Search the list upward for a heap which is large enough. */
fcee5028 778 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *) address + get))
e429caa2
KH
779 {
780 h = h->next;
781 if (h == NIL_HEAP)
782 break;
fcee5028 783 address = (void *) ROUNDUP (h->start);
e429caa2
KH
784 }
785
abe9ff32 786 /* If not found, obtain more space. */
e429caa2
KH
787 if (h == NIL_HEAP)
788 {
789 get += extra_bytes + page_size;
790
49f82b3d 791 if (! obtain (address, get))
e429caa2 792 return 0;
98b7fe02 793
e429caa2 794 if (first_heap == last_heap)
fcee5028 795 address = (void *) ROUNDUP (virtual_break_value);
e429caa2 796 else
fcee5028 797 address = (void *) ROUNDUP (last_heap->start);
e429caa2
KH
798 h = last_heap;
799 }
800
fcee5028 801 new_bloc_start = (void *) MEM_ROUNDUP ((char *) address + get);
e429caa2
KH
802
803 if (first_heap->bloc_start < new_bloc_start)
804 {
49f82b3d 805 /* This is no clean solution - no idea how to do it better. */
177c0ea7 806 if (r_alloc_freeze_level)
fcee5028 807 return NULL;
49f82b3d
RS
808
809 /* There is a bug here: if the above obtain call succeeded, but the
810 relocate_blocs call below does not succeed, we need to free
811 the memory that we got with obtain. */
812
abe9ff32 813 /* Move all blocs upward. */
49f82b3d 814 if (! relocate_blocs (first_bloc, h, new_bloc_start))
e429caa2
KH
815 return 0;
816
fcee5028 817 /* Note that (char *) (h + 1) <= (char *) new_bloc_start since
e429caa2 818 get >= page_size, so the following does not destroy the heap
abe9ff32 819 header. */
e429caa2
KH
820 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
821 {
78cef877
EZ
822 if (b->new_data != b->data)
823 memmove (b->new_data, b->data, b->size);
e429caa2
KH
824 *b->variable = b->data = b->new_data;
825 }
826
827 h->bloc_start = new_bloc_start;
abe9ff32 828
47f13333 829 update_heap_bloc_correspondence (first_bloc, h);
e429caa2 830 }
e429caa2
KH
831 if (h != first_heap)
832 {
833 /* Give up managing heaps below the one the new
abe9ff32 834 virtual_break_value points to. */
e429caa2
KH
835 first_heap->prev = NIL_HEAP;
836 first_heap->next = h->next;
837 first_heap->start = h->start;
838 first_heap->end = h->end;
abe9ff32 839 first_heap->free = h->free;
47f13333
RS
840 first_heap->first_bloc = h->first_bloc;
841 first_heap->last_bloc = h->last_bloc;
e429caa2
KH
842 first_heap->bloc_start = h->bloc_start;
843
844 if (first_heap->next)
845 first_heap->next->prev = first_heap;
846 else
847 last_heap = first_heap;
848 }
849
72af86bd 850 memset (address, 0, size);
dcfdbac7 851 }
e429caa2 852 else /* size < 0 */
dcfdbac7 853 {
fcee5028
PE
854 size_t excess = ((char *) first_heap->bloc_start
855 - ((char *) virtual_break_value + size));
e429caa2
KH
856
857 address = virtual_break_value;
858
859 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
860 {
861 excess -= extra_bytes;
862 first_heap->bloc_start
fcee5028 863 = (void *) MEM_ROUNDUP ((char *) first_heap->bloc_start - excess);
e429caa2 864
abe9ff32 865 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
7516b7d5 866
e429caa2
KH
867 for (b = first_bloc; b != NIL_BLOC; b = b->next)
868 {
78cef877
EZ
869 if (b->new_data != b->data)
870 memmove (b->new_data, b->data, b->size);
e429caa2
KH
871 *b->variable = b->data = b->new_data;
872 }
873 }
874
fcee5028 875 if ((char *) virtual_break_value + size < (char *) first_heap->start)
e429caa2
KH
876 {
877 /* We found an additional space below the first heap */
fcee5028 878 first_heap->start = (void *) ((char *) virtual_break_value + size);
e429caa2 879 }
dcfdbac7
JB
880 }
881
fcee5028 882 virtual_break_value = (void *) ((char *) address + size);
47f13333 883 break_value = (last_bloc
91a211b5
GM
884 ? (char *) last_bloc->data + last_bloc->size
885 : (char *) first_heap->bloc_start);
e429caa2 886 if (size < 0)
abe9ff32 887 relinquish ();
7516b7d5 888
e429caa2 889 return address;
dcfdbac7
JB
890}
891
0a58f946 892
dcfdbac7
JB
893/* Allocate a relocatable bloc of storage of size SIZE. A pointer to
894 the data is returned in *PTR. PTR is thus the address of some variable
98b7fe02
JB
895 which will use the data area.
896
49f82b3d 897 The allocation of 0 bytes is valid.
f96f2c5b
JB
898 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
899 done before allocating a new area. Not yet done.
49f82b3d 900
98b7fe02
JB
901 If we can't allocate the necessary memory, set *PTR to zero, and
902 return zero. */
dcfdbac7 903
fcee5028
PE
904void *
905r_alloc (void **ptr, size_t size)
dcfdbac7 906{
fcee5028 907 bloc_ptr new_bloc;
dcfdbac7 908
2c46d29f
RS
909 if (! r_alloc_initialized)
910 r_alloc_init ();
911
abe9ff32 912 new_bloc = get_bloc (MEM_ROUNDUP (size));
98b7fe02
JB
913 if (new_bloc)
914 {
915 new_bloc->variable = ptr;
916 *ptr = new_bloc->data;
917 }
918 else
919 *ptr = 0;
dcfdbac7
JB
920
921 return *ptr;
922}
923
2c46d29f
RS
924/* Free a bloc of relocatable storage whose data is pointed to by PTR.
925 Store 0 in *PTR to show there's no block allocated. */
dcfdbac7
JB
926
927void
fcee5028 928r_alloc_free (void **ptr)
dcfdbac7 929{
fcee5028 930 bloc_ptr dead_bloc;
dcfdbac7 931
44d3dec0
RS
932 if (! r_alloc_initialized)
933 r_alloc_init ();
934
dcfdbac7
JB
935 dead_bloc = find_bloc (ptr);
936 if (dead_bloc == NIL_BLOC)
1088b922 937 emacs_abort (); /* Double free? PTR not originally used to allocate? */
dcfdbac7
JB
938
939 free_bloc (dead_bloc);
2c46d29f 940 *ptr = 0;
719b242f 941
d5179acc 942#ifdef emacs
719b242f 943 refill_memory_reserve ();
d5179acc 944#endif
dcfdbac7
JB
945}
946
16a5c729 947/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
98b7fe02
JB
948 Do this by shifting all blocks above this one up in memory, unless
949 SIZE is less than or equal to the current bloc size, in which case
950 do nothing.
dcfdbac7 951
f96f2c5b 952 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
8e6208c5 953 memory copied to it. Not very efficient. We could traverse the
49f82b3d
RS
954 bloc_list for a best fit of free blocs first.
955
98b7fe02
JB
956 Change *PTR to reflect the new bloc, and return this value.
957
958 If more memory cannot be allocated, then leave *PTR unchanged, and
959 return zero. */
dcfdbac7 960
fcee5028
PE
961void *
962r_re_alloc (void **ptr, size_t size)
dcfdbac7 963{
fcee5028 964 bloc_ptr bloc;
dcfdbac7 965
44d3dec0
RS
966 if (! r_alloc_initialized)
967 r_alloc_init ();
968
49f82b3d
RS
969 if (!*ptr)
970 return r_alloc (ptr, size);
177c0ea7 971 if (!size)
49f82b3d
RS
972 {
973 r_alloc_free (ptr);
974 return r_alloc (ptr, 0);
975 }
976
16a5c729
JB
977 bloc = find_bloc (ptr);
978 if (bloc == NIL_BLOC)
1088b922 979 emacs_abort (); /* Already freed? PTR not originally used to allocate? */
dcfdbac7 980
177c0ea7 981 if (size < bloc->size)
49f82b3d
RS
982 {
983 /* Wouldn't it be useful to actually resize the bloc here? */
984 /* I think so too, but not if it's too expensive... */
177c0ea7
JB
985 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
986 && r_alloc_freeze_level == 0)
49f82b3d
RS
987 {
988 resize_bloc (bloc, MEM_ROUNDUP (size));
989 /* Never mind if this fails, just do nothing... */
990 /* It *should* be infallible! */
991 }
992 }
993 else if (size > bloc->size)
994 {
995 if (r_alloc_freeze_level)
996 {
997 bloc_ptr new_bloc;
998 new_bloc = get_bloc (MEM_ROUNDUP (size));
999 if (new_bloc)
1000 {
1001 new_bloc->variable = ptr;
1002 *ptr = new_bloc->data;
fcee5028 1003 bloc->variable = NULL;
49f82b3d
RS
1004 }
1005 else
fcee5028 1006 return NULL;
49f82b3d 1007 }
177c0ea7 1008 else
49f82b3d
RS
1009 {
1010 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
fcee5028 1011 return NULL;
49f82b3d
RS
1012 }
1013 }
dcfdbac7
JB
1014 return *ptr;
1015}
81bd58e8 1016
dec41418
RS
1017
1018#if defined (emacs) && defined (DOUG_LEA_MALLOC)
1019
1020/* Reinitialize the morecore hook variables after restarting a dumped
1021 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1022void
971de7fb 1023r_alloc_reinit (void)
dec41418
RS
1024{
1025 /* Only do this if the hook has been reset, so that we don't get an
1026 infinite loop, in case Emacs was linked statically. */
1027 if (__morecore != r_alloc_sbrk)
1028 {
1029 real_morecore = __morecore;
1030 __morecore = r_alloc_sbrk;
1031 }
1032}
0a58f946
GM
1033
1034#endif /* emacs && DOUG_LEA_MALLOC */
dec41418 1035
e429caa2 1036#ifdef DEBUG
0a58f946 1037
e429caa2
KH
1038#include <assert.h>
1039
44d3dec0 1040void
268c2c36 1041r_alloc_check (void)
e429caa2 1042{
6d16dd06
RS
1043 int found = 0;
1044 heap_ptr h, ph = 0;
1045 bloc_ptr b, pb = 0;
1046
1047 if (!r_alloc_initialized)
1048 return;
1049
1050 assert (first_heap);
fcee5028
PE
1051 assert (last_heap->end <= (void *) sbrk (0));
1052 assert ((void *) first_heap < first_heap->start);
6d16dd06
RS
1053 assert (first_heap->start <= virtual_break_value);
1054 assert (virtual_break_value <= first_heap->end);
1055
1056 for (h = first_heap; h; h = h->next)
1057 {
1058 assert (h->prev == ph);
fcee5028 1059 assert ((void *) ROUNDUP (h->end) == h->end);
40f3f04b
RS
1060#if 0 /* ??? The code in ralloc.c does not really try to ensure
1061 the heap start has any sort of alignment.
1062 Perhaps it should. */
fcee5028 1063 assert ((void *) MEM_ROUNDUP (h->start) == h->start);
40f3f04b 1064#endif
fcee5028 1065 assert ((void *) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
6d16dd06
RS
1066 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1067
1068 if (ph)
1069 {
1070 assert (ph->end < h->start);
fcee5028 1071 assert (h->start <= (void *) h && (void *) (h + 1) <= h->bloc_start);
6d16dd06
RS
1072 }
1073
1074 if (h->bloc_start <= break_value && break_value <= h->end)
1075 found = 1;
1076
1077 ph = h;
1078 }
1079
1080 assert (found);
1081 assert (last_heap == ph);
1082
1083 for (b = first_bloc; b; b = b->next)
1084 {
1085 assert (b->prev == pb);
fcee5028
PE
1086 assert ((void *) MEM_ROUNDUP (b->data) == b->data);
1087 assert ((size_t) MEM_ROUNDUP (b->size) == b->size);
6d16dd06
RS
1088
1089 ph = 0;
1090 for (h = first_heap; h; h = h->next)
1091 {
1092 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1093 break;
1094 ph = h;
1095 }
1096
1097 assert (h);
1098
1099 if (pb && pb->data + pb->size != b->data)
1100 {
1101 assert (ph && b->data == h->bloc_start);
1102 while (ph)
1103 {
1104 if (ph->bloc_start <= pb->data
1105 && pb->data + pb->size <= ph->end)
1106 {
1107 assert (pb->data + pb->size + b->size > ph->end);
1108 break;
1109 }
1110 else
1111 {
1112 assert (ph->bloc_start + b->size > ph->end);
1113 }
1114 ph = ph->prev;
1115 }
1116 }
1117 pb = b;
1118 }
1119
1120 assert (last_bloc == pb);
1121
1122 if (last_bloc)
1123 assert (last_bloc->data + last_bloc->size == break_value);
1124 else
1125 assert (first_heap->bloc_start == break_value);
e429caa2 1126}
0a58f946 1127
e429caa2 1128#endif /* DEBUG */
0a58f946 1129
baae5c2d
JR
1130/* Update the internal record of which variable points to some data to NEW.
1131 Used by buffer-swap-text in Emacs to restore consistency after it
1132 swaps the buffer text between two buffer objects. The OLD pointer
1133 is checked to ensure that memory corruption does not occur due to
1134 misuse. */
1135void
fcee5028 1136r_alloc_reset_variable (void **old, void **new)
baae5c2d
JR
1137{
1138 bloc_ptr bloc = first_bloc;
1139
1140 /* Find the bloc that corresponds to the data pointed to by pointer.
1141 find_bloc cannot be used, as it has internal consistency checks
0d26e0b6 1142 which fail when the variable needs resetting. */
baae5c2d
JR
1143 while (bloc != NIL_BLOC)
1144 {
1145 if (bloc->data == *new)
1146 break;
1147
1148 bloc = bloc->next;
1149 }
1150
1151 if (bloc == NIL_BLOC || bloc->variable != old)
1088b922 1152 emacs_abort (); /* Already freed? OLD not originally used to allocate? */
baae5c2d
JR
1153
1154 /* Update variable to point to the new location. */
1155 bloc->variable = new;
1156}
0a58f946 1157
52c55cc7
EZ
1158void
1159r_alloc_inhibit_buffer_relocation (int inhibit)
1160{
e8a02204
EZ
1161 if (use_relocatable_buffers > 1)
1162 use_relocatable_buffers = 1;
291d430f 1163 if (inhibit)
291d430f 1164 use_relocatable_buffers--;
e8a02204
EZ
1165 else if (use_relocatable_buffers < 1)
1166 use_relocatable_buffers++;
52c55cc7
EZ
1167}
1168
0a58f946
GM
1169\f
1170/***********************************************************************
1171 Initialization
1172 ***********************************************************************/
1173
0a58f946
GM
1174/* Initialize various things for memory allocation. */
1175
1176static void
971de7fb 1177r_alloc_init (void)
0a58f946
GM
1178{
1179 if (r_alloc_initialized)
1180 return;
0a58f946 1181 r_alloc_initialized = 1;
177c0ea7 1182
a2c23c92
DL
1183 page_size = PAGE;
1184#ifndef SYSTEM_MALLOC
0a58f946
GM
1185 real_morecore = __morecore;
1186 __morecore = r_alloc_sbrk;
1187
1188 first_heap = last_heap = &heap_base;
1189 first_heap->next = first_heap->prev = NIL_HEAP;
1190 first_heap->start = first_heap->bloc_start
fcee5028
PE
1191 = virtual_break_value = break_value = real_morecore (0);
1192 if (break_value == NULL)
1088b922 1193 emacs_abort ();
0a58f946 1194
0a58f946 1195 extra_bytes = ROUNDUP (50000);
a2c23c92 1196#endif
0a58f946
GM
1197
1198#ifdef DOUG_LEA_MALLOC
4d7e6e51 1199 block_input ();
1673df2e 1200 mallopt (M_TOP_PAD, 64 * 4096);
4d7e6e51 1201 unblock_input ();
0a58f946 1202#else
a2c23c92 1203#ifndef SYSTEM_MALLOC
45ba16c7
EZ
1204 /* Give GNU malloc's morecore some hysteresis so that we move all
1205 the relocatable blocks much less often. The number used to be
1206 64, but alloc.c would override that with 32 in code that was
1207 removed when SYNC_INPUT became the only input handling mode.
9b318728 1208 That code was conditioned on !DOUG_LEA_MALLOC, so the call to
45ba16c7
EZ
1209 mallopt above is left unchanged. (Actually, I think there's no
1210 system nowadays that uses DOUG_LEA_MALLOC and also uses
1211 REL_ALLOC.) */
1212 __malloc_extra_blocks = 32;
0a58f946 1213#endif
a2c23c92 1214#endif
0a58f946 1215
5ad25b24 1216#ifndef SYSTEM_MALLOC
fcee5028 1217 first_heap->end = (void *) ROUNDUP (first_heap->start);
0a58f946
GM
1218
1219 /* The extra call to real_morecore guarantees that the end of the
1220 address space is a multiple of page_size, even if page_size is
1221 not really the page size of the system running the binary in
1222 which page_size is stored. This allows a binary to be built on a
1223 system with one page size and run on a system with a smaller page
1224 size. */
fcee5028 1225 real_morecore ((char *) first_heap->end - (char *) first_heap->start);
0a58f946
GM
1226
1227 /* Clear the rest of the last page; this memory is in our address space
1228 even though it is after the sbrk value. */
1229 /* Doubly true, with the additional call that explicitly adds the
1230 rest of that page to the address space. */
72af86bd
AS
1231 memset (first_heap->start, 0,
1232 (char *) first_heap->end - (char *) first_heap->start);
0a58f946 1233 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
a2c23c92 1234#endif
177c0ea7 1235
0a58f946
GM
1236 use_relocatable_buffers = 1;
1237}