Merge from emacs-24; up to 2012-12-21T07:35:02Z!ueno@gnu.org
[bpt/emacs.git] / src / ralloc.c
CommitLineData
177c0ea7 1/* Block-relocating memory allocator.
ab422c4d 2 Copyright (C) 1993, 1995, 2000-2013 Free Software Foundation, Inc.
dcfdbac7
JB
3
4This file is part of GNU Emacs.
5
9ec0b715 6GNU Emacs is free software: you can redistribute it and/or modify
dcfdbac7 7it under the terms of the GNU General Public License as published by
9ec0b715
GM
8the Free Software Foundation, either version 3 of the License, or
9(at your option) any later version.
dcfdbac7
JB
10
11GNU Emacs is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
9ec0b715 17along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
dcfdbac7
JB
18
19/* NOTES:
20
eb8c3be9 21 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
dcfdbac7 22 rather than all of them. This means allowing for a possible
abe9ff32 23 hole between the first bloc and the end of malloc storage. */
dcfdbac7 24
2c46d29f 25#ifdef emacs
aef4d570 26
18160b98 27#include <config.h>
0328b6de 28
956ace37 29#include "lisp.h" /* Needed for VALBITS. */
a4766fd5 30#include "blockinput.h"
0a58f946 31
642a1733 32#include <unistd.h>
a8c0e5ea 33
b0119c68 34#ifdef DOUG_LEA_MALLOC
177c0ea7 35#define M_TOP_PAD -2
971de7fb 36extern int mallopt (int, int);
0a58f946 37#else /* not DOUG_LEA_MALLOC */
a2c23c92 38#ifndef SYSTEM_MALLOC
b1685c5f 39extern size_t __malloc_extra_blocks;
a2c23c92 40#endif /* SYSTEM_MALLOC */
0a58f946 41#endif /* not DOUG_LEA_MALLOC */
49081834 42
d5179acc 43#else /* not emacs */
aef4d570 44
2c46d29f 45#include <stddef.h>
aef4d570 46
aef4d570
RM
47#include <unistd.h>
48#include <malloc.h>
aef4d570 49
d5179acc 50#endif /* not emacs */
2c46d29f 51
0a58f946 52
d5179acc 53#include "getpagesize.h"
dcfdbac7 54
261cb4bb
PE
55typedef size_t SIZE;
56typedef void *POINTER;
dcfdbac7
JB
57#define NIL ((POINTER) 0)
58
2c46d29f
RS
59/* A flag to indicate whether we have initialized ralloc yet. For
60 Emacs's sake, please do not make this local to malloc_init; on some
61 machines, the dumping procedure makes all static variables
62 read-only. On these machines, the word static is #defined to be
63 the empty string, meaning that r_alloc_initialized becomes an
0a58f946
GM
64 automatic variable, and loses its value each time Emacs is started
65 up. */
66
2c46d29f
RS
67static int r_alloc_initialized = 0;
68
971de7fb 69static void r_alloc_init (void);
0a58f946 70
dcfdbac7 71\f
956ace37
JB
72/* Declarations for working with the malloc, ralloc, and system breaks. */
73
abe9ff32 74/* Function to set the real break value. */
62aba0d4 75POINTER (*real_morecore) (ptrdiff_t);
dcfdbac7 76
abe9ff32 77/* The break value, as seen by malloc. */
dcfdbac7
JB
78static POINTER virtual_break_value;
79
abe9ff32
RS
80/* The address of the end of the last data in use by ralloc,
81 including relocatable blocs as well as malloc data. */
dcfdbac7
JB
82static POINTER break_value;
83
7516b7d5
RS
84/* This is the size of a page. We round memory requests to this boundary. */
85static int page_size;
86
177c0ea7 87/* Whenever we get memory from the system, get this many extra bytes. This
ad3bb3d2 88 must be a multiple of page_size. */
7516b7d5
RS
89static int extra_bytes;
90
dcfdbac7 91/* Macros for rounding. Note that rounding to any value is possible
abe9ff32 92 by changing the definition of PAGE. */
dcfdbac7 93#define PAGE (getpagesize ())
62aba0d4 94#define ROUNDUP(size) (((size_t) (size) + page_size - 1) \
2d7d1608 95 & ~((size_t)(page_size - 1)))
e429caa2 96
5e617bc2 97#define MEM_ALIGN sizeof (double)
62aba0d4 98#define MEM_ROUNDUP(addr) (((size_t)(addr) + MEM_ALIGN - 1) \
2d7d1608 99 & ~(MEM_ALIGN - 1))
0a58f946 100
aeac019e
GM
101/* The hook `malloc' uses for the function which gets more space
102 from the system. */
103
104#ifndef SYSTEM_MALLOC
62aba0d4 105extern POINTER (*__morecore) (ptrdiff_t);
aeac019e
GM
106#endif
107
108
e429caa2 109\f
0a58f946
GM
110/***********************************************************************
111 Implementation using sbrk
112 ***********************************************************************/
113
abe9ff32
RS
114/* Data structures of heaps and blocs. */
115
116/* The relocatable objects, or blocs, and the malloc data
117 both reside within one or more heaps.
118 Each heap contains malloc data, running from `start' to `bloc_start',
119 and relocatable objects, running from `bloc_start' to `free'.
120
121 Relocatable objects may relocate within the same heap
122 or may move into another heap; the heaps themselves may grow
123 but they never move.
124
125 We try to make just one heap and make it larger as necessary.
8e6208c5 126 But sometimes we can't do that, because we can't get contiguous
abe9ff32 127 space to add onto the heap. When that happens, we start a new heap. */
177c0ea7 128
e429caa2
KH
129typedef struct heap
130{
131 struct heap *next;
132 struct heap *prev;
abe9ff32 133 /* Start of memory range of this heap. */
e429caa2 134 POINTER start;
abe9ff32 135 /* End of memory range of this heap. */
e429caa2 136 POINTER end;
abe9ff32
RS
137 /* Start of relocatable data in this heap. */
138 POINTER bloc_start;
139 /* Start of unused space in this heap. */
140 POINTER free;
47f13333
RS
141 /* First bloc in this heap. */
142 struct bp *first_bloc;
143 /* Last bloc in this heap. */
144 struct bp *last_bloc;
e429caa2
KH
145} *heap_ptr;
146
147#define NIL_HEAP ((heap_ptr) 0)
e429caa2 148
abe9ff32
RS
149/* This is the first heap object.
150 If we need additional heap objects, each one resides at the beginning of
151 the space it covers. */
152static struct heap heap_base;
153
154/* Head and tail of the list of heaps. */
e429caa2
KH
155static heap_ptr first_heap, last_heap;
156
157/* These structures are allocated in the malloc arena.
158 The linked list is kept in order of increasing '.data' members.
159 The data blocks abut each other; if b->next is non-nil, then
177c0ea7 160 b->data + b->size == b->next->data.
49f82b3d
RS
161
162 An element with variable==NIL denotes a freed block, which has not yet
f96f2c5b
JB
163 been collected. They may only appear while r_alloc_freeze_level > 0,
164 and will be freed when the arena is thawed. Currently, these blocs are
165 not reusable, while the arena is frozen. Very inefficient. */
49f82b3d 166
e429caa2
KH
167typedef struct bp
168{
169 struct bp *next;
170 struct bp *prev;
171 POINTER *variable;
172 POINTER data;
173 SIZE size;
8e6208c5 174 POINTER new_data; /* temporarily used for relocation */
49f82b3d 175 struct heap *heap; /* Heap this bloc is in. */
e429caa2
KH
176} *bloc_ptr;
177
178#define NIL_BLOC ((bloc_ptr) 0)
179#define BLOC_PTR_SIZE (sizeof (struct bp))
180
abe9ff32 181/* Head and tail of the list of relocatable blocs. */
e429caa2
KH
182static bloc_ptr first_bloc, last_bloc;
183
49f82b3d
RS
184static int use_relocatable_buffers;
185
186/* If >0, no relocation whatsoever takes place. */
187static int r_alloc_freeze_level;
188
dcfdbac7 189\f
956ace37
JB
190/* Functions to get and return memory from the system. */
191
abe9ff32
RS
192/* Find the heap that ADDRESS falls within. */
193
194static heap_ptr
971de7fb 195find_heap (POINTER address)
abe9ff32
RS
196{
197 heap_ptr heap;
198
199 for (heap = last_heap; heap; heap = heap->prev)
200 {
201 if (heap->start <= address && address <= heap->end)
202 return heap;
203 }
204
205 return NIL_HEAP;
206}
207
208/* Find SIZE bytes of space in a heap.
209 Try to get them at ADDRESS (which must fall within some heap's range)
210 if we can get that many within one heap.
211
e429caa2 212 If enough space is not presently available in our reserve, this means
8e6208c5
KH
213 getting more page-aligned space from the system. If the returned space
214 is not contiguous to the last heap, allocate a new heap, and append it
0d26e0b6 215 to the heap list.
abe9ff32 216
0d26e0b6
JB
217 obtain does not try to keep track of whether space is in use or not
218 in use. It just returns the address of SIZE bytes that fall within a
219 single heap. If you call obtain twice in a row with the same arguments,
220 you typically get the same value. It's the caller's responsibility to
221 keep track of what space is in use.
dcfdbac7 222
e429caa2
KH
223 Return the address of the space if all went well, or zero if we couldn't
224 allocate the memory. */
abe9ff32 225
e429caa2 226static POINTER
971de7fb 227obtain (POINTER address, SIZE size)
dcfdbac7 228{
e429caa2
KH
229 heap_ptr heap;
230 SIZE already_available;
dcfdbac7 231
abe9ff32 232 /* Find the heap that ADDRESS falls within. */
e429caa2 233 for (heap = last_heap; heap; heap = heap->prev)
dcfdbac7 234 {
e429caa2
KH
235 if (heap->start <= address && address <= heap->end)
236 break;
237 }
dcfdbac7 238
e429caa2 239 if (! heap)
1088b922 240 emacs_abort ();
dcfdbac7 241
abe9ff32
RS
242 /* If we can't fit SIZE bytes in that heap,
243 try successive later heaps. */
91a211b5 244 while (heap && (char *) address + size > (char *) heap->end)
e429caa2
KH
245 {
246 heap = heap->next;
247 if (heap == NIL_HEAP)
248 break;
249 address = heap->bloc_start;
dcfdbac7
JB
250 }
251
abe9ff32
RS
252 /* If we can't fit them within any existing heap,
253 get more space. */
e429caa2
KH
254 if (heap == NIL_HEAP)
255 {
256 POINTER new = (*real_morecore)(0);
257 SIZE get;
98b7fe02 258
e429caa2 259 already_available = (char *)last_heap->end - (char *)address;
dcfdbac7 260
e429caa2
KH
261 if (new != last_heap->end)
262 {
abe9ff32
RS
263 /* Someone else called sbrk. Make a new heap. */
264
265 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
266 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
e429caa2 267
91a211b5 268 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
e429caa2
KH
269 return 0;
270
271 new_heap->start = new;
272 new_heap->end = bloc_start;
273 new_heap->bloc_start = bloc_start;
abe9ff32 274 new_heap->free = bloc_start;
e429caa2
KH
275 new_heap->next = NIL_HEAP;
276 new_heap->prev = last_heap;
47f13333
RS
277 new_heap->first_bloc = NIL_BLOC;
278 new_heap->last_bloc = NIL_BLOC;
e429caa2
KH
279 last_heap->next = new_heap;
280 last_heap = new_heap;
281
282 address = bloc_start;
283 already_available = 0;
284 }
dcfdbac7 285
abe9ff32
RS
286 /* Add space to the last heap (which we may have just created).
287 Get some extra, so we can come here less often. */
288
e429caa2 289 get = size + extra_bytes - already_available;
abe9ff32 290 get = (char *) ROUNDUP ((char *)last_heap->end + get)
e429caa2 291 - (char *) last_heap->end;
dcfdbac7 292
e429caa2
KH
293 if ((*real_morecore) (get) != last_heap->end)
294 return 0;
295
91a211b5 296 last_heap->end = (char *) last_heap->end + get;
e429caa2
KH
297 }
298
299 return address;
300}
dcfdbac7 301
abe9ff32
RS
302/* Return unused heap space to the system
303 if there is a lot of unused space now.
304 This can make the last heap smaller;
305 it can also eliminate the last heap entirely. */
306
dcfdbac7 307static void
971de7fb 308relinquish (void)
dcfdbac7 309{
e429caa2 310 register heap_ptr h;
62aba0d4 311 ptrdiff_t excess = 0;
e429caa2 312
abe9ff32
RS
313 /* Add the amount of space beyond break_value
314 in all heaps which have extend beyond break_value at all. */
315
e429caa2
KH
316 for (h = last_heap; h && break_value < h->end; h = h->prev)
317 {
318 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
319 ? h->bloc_start : break_value);
320 }
321
322 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
dcfdbac7 323 {
7516b7d5
RS
324 /* Keep extra_bytes worth of empty space.
325 And don't free anything unless we can free at least extra_bytes. */
e429caa2 326 excess -= extra_bytes;
dcfdbac7 327
e429caa2
KH
328 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
329 {
508f51f5
EZ
330 heap_ptr lh_prev;
331
98daa893
EZ
332 /* This heap should have no blocs in it. If it does, we
333 cannot return it to the system. */
47f13333
RS
334 if (last_heap->first_bloc != NIL_BLOC
335 || last_heap->last_bloc != NIL_BLOC)
98daa893 336 return;
47f13333 337
abe9ff32 338 /* Return the last heap, with its header, to the system. */
e429caa2 339 excess = (char *)last_heap->end - (char *)last_heap->start;
508f51f5
EZ
340 lh_prev = last_heap->prev;
341 /* If the system doesn't want that much memory back, leave
342 last_heap unaltered to reflect that. This can occur if
343 break_value is still within the original data segment. */
344 if ((*real_morecore) (- excess) != 0)
345 {
346 last_heap = lh_prev;
347 last_heap->next = NIL_HEAP;
348 }
e429caa2
KH
349 }
350 else
351 {
352 excess = (char *) last_heap->end
abe9ff32 353 - (char *) ROUNDUP ((char *)last_heap->end - excess);
508f51f5
EZ
354 /* If the system doesn't want that much memory back, leave
355 the end of the last heap unchanged to reflect that. This
356 can occur if break_value is still within the original
357 data segment. */
358 if ((*real_morecore) (- excess) != 0)
359 last_heap->end = (char *) last_heap->end - excess;
21532667 360 }
e429caa2 361 }
dcfdbac7
JB
362}
363\f
956ace37
JB
364/* The meat - allocating, freeing, and relocating blocs. */
365
956ace37 366/* Find the bloc referenced by the address in PTR. Returns a pointer
abe9ff32 367 to that block. */
dcfdbac7
JB
368
369static bloc_ptr
971de7fb 370find_bloc (POINTER *ptr)
dcfdbac7
JB
371{
372 register bloc_ptr p = first_bloc;
373
374 while (p != NIL_BLOC)
375 {
747d9d14 376 /* Consistency check. Don't return inconsistent blocs.
0d26e0b6 377 Don't abort here, as callers might be expecting this, but
747d9d14
JR
378 callers that always expect a bloc to be returned should abort
379 if one isn't to avoid a memory corruption bug that is
380 difficult to track down. */
dcfdbac7
JB
381 if (p->variable == ptr && p->data == *ptr)
382 return p;
383
384 p = p->next;
385 }
386
387 return p;
388}
389
390/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
98b7fe02
JB
391 Returns a pointer to the new bloc, or zero if we couldn't allocate
392 memory for the new block. */
dcfdbac7
JB
393
394static bloc_ptr
971de7fb 395get_bloc (SIZE size)
dcfdbac7 396{
98b7fe02 397 register bloc_ptr new_bloc;
abe9ff32 398 register heap_ptr heap;
98b7fe02 399
38182d90 400 if (! (new_bloc = malloc (BLOC_PTR_SIZE))
e429caa2 401 || ! (new_bloc->data = obtain (break_value, size)))
98b7fe02 402 {
c2cd06e6 403 free (new_bloc);
98b7fe02
JB
404
405 return 0;
406 }
dcfdbac7 407
91a211b5 408 break_value = (char *) new_bloc->data + size;
e429caa2 409
dcfdbac7
JB
410 new_bloc->size = size;
411 new_bloc->next = NIL_BLOC;
8c7f1e35 412 new_bloc->variable = (POINTER *) NIL;
e429caa2 413 new_bloc->new_data = 0;
dcfdbac7 414
abe9ff32
RS
415 /* Record in the heap that this space is in use. */
416 heap = find_heap (new_bloc->data);
417 heap->free = break_value;
418
47f13333
RS
419 /* Maintain the correspondence between heaps and blocs. */
420 new_bloc->heap = heap;
421 heap->last_bloc = new_bloc;
422 if (heap->first_bloc == NIL_BLOC)
423 heap->first_bloc = new_bloc;
424
abe9ff32 425 /* Put this bloc on the doubly-linked list of blocs. */
dcfdbac7
JB
426 if (first_bloc)
427 {
428 new_bloc->prev = last_bloc;
429 last_bloc->next = new_bloc;
430 last_bloc = new_bloc;
431 }
432 else
433 {
434 first_bloc = last_bloc = new_bloc;
435 new_bloc->prev = NIL_BLOC;
436 }
437
438 return new_bloc;
439}
47f13333 440\f
abe9ff32
RS
441/* Calculate new locations of blocs in the list beginning with BLOC,
442 relocating it to start at ADDRESS, in heap HEAP. If enough space is
443 not presently available in our reserve, call obtain for
177c0ea7
JB
444 more space.
445
abe9ff32
RS
446 Store the new location of each bloc in its new_data field.
447 Do not touch the contents of blocs or break_value. */
dcfdbac7 448
e429caa2 449static int
971de7fb 450relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address)
e429caa2
KH
451{
452 register bloc_ptr b = bloc;
ad3bb3d2 453
49f82b3d 454 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 455 if (r_alloc_freeze_level)
1088b922 456 emacs_abort ();
49f82b3d 457
e429caa2
KH
458 while (b)
459 {
abe9ff32
RS
460 /* If bloc B won't fit within HEAP,
461 move to the next heap and try again. */
91a211b5 462 while (heap && (char *) address + b->size > (char *) heap->end)
e429caa2
KH
463 {
464 heap = heap->next;
465 if (heap == NIL_HEAP)
466 break;
467 address = heap->bloc_start;
468 }
dcfdbac7 469
abe9ff32
RS
470 /* If BLOC won't fit in any heap,
471 get enough new space to hold BLOC and all following blocs. */
e429caa2
KH
472 if (heap == NIL_HEAP)
473 {
474 register bloc_ptr tb = b;
475 register SIZE s = 0;
476
abe9ff32 477 /* Add up the size of all the following blocs. */
e429caa2
KH
478 while (tb != NIL_BLOC)
479 {
177c0ea7 480 if (tb->variable)
49f82b3d
RS
481 s += tb->size;
482
e429caa2
KH
483 tb = tb->next;
484 }
485
abe9ff32
RS
486 /* Get that space. */
487 address = obtain (address, s);
488 if (address == 0)
e429caa2
KH
489 return 0;
490
491 heap = last_heap;
492 }
493
abe9ff32
RS
494 /* Record the new address of this bloc
495 and update where the next bloc can start. */
e429caa2 496 b->new_data = address;
177c0ea7 497 if (b->variable)
91a211b5 498 address = (char *) address + b->size;
e429caa2
KH
499 b = b->next;
500 }
501
502 return 1;
503}
47f13333
RS
504\f
505/* Update the records of which heaps contain which blocs, starting
506 with heap HEAP and bloc BLOC. */
507
508static void
971de7fb 509update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
abe9ff32
RS
510{
511 register bloc_ptr b;
512
47f13333
RS
513 /* Initialize HEAP's status to reflect blocs before BLOC. */
514 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
515 {
516 /* The previous bloc is in HEAP. */
517 heap->last_bloc = bloc->prev;
91a211b5 518 heap->free = (char *) bloc->prev->data + bloc->prev->size;
47f13333
RS
519 }
520 else
521 {
522 /* HEAP contains no blocs before BLOC. */
523 heap->first_bloc = NIL_BLOC;
524 heap->last_bloc = NIL_BLOC;
525 heap->free = heap->bloc_start;
526 }
527
abe9ff32
RS
528 /* Advance through blocs one by one. */
529 for (b = bloc; b != NIL_BLOC; b = b->next)
530 {
47f13333
RS
531 /* Advance through heaps, marking them empty,
532 till we get to the one that B is in. */
abe9ff32
RS
533 while (heap)
534 {
535 if (heap->bloc_start <= b->data && b->data <= heap->end)
536 break;
537 heap = heap->next;
47f13333
RS
538 /* We know HEAP is not null now,
539 because there has to be space for bloc B. */
540 heap->first_bloc = NIL_BLOC;
541 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
542 heap->free = heap->bloc_start;
543 }
47f13333
RS
544
545 /* Update HEAP's status for bloc B. */
91a211b5 546 heap->free = (char *) b->data + b->size;
47f13333
RS
547 heap->last_bloc = b;
548 if (heap->first_bloc == NIL_BLOC)
549 heap->first_bloc = b;
550
551 /* Record that B is in HEAP. */
552 b->heap = heap;
abe9ff32
RS
553 }
554
555 /* If there are any remaining heaps and no blocs left,
47f13333 556 mark those heaps as empty. */
abe9ff32
RS
557 heap = heap->next;
558 while (heap)
559 {
47f13333
RS
560 heap->first_bloc = NIL_BLOC;
561 heap->last_bloc = NIL_BLOC;
abe9ff32
RS
562 heap->free = heap->bloc_start;
563 heap = heap->next;
564 }
565}
47f13333 566\f
abe9ff32
RS
567/* Resize BLOC to SIZE bytes. This relocates the blocs
568 that come after BLOC in memory. */
569
e429caa2 570static int
971de7fb 571resize_bloc (bloc_ptr bloc, SIZE size)
dcfdbac7 572{
e429caa2
KH
573 register bloc_ptr b;
574 heap_ptr heap;
575 POINTER address;
576 SIZE old_size;
577
49f82b3d 578 /* No need to ever call this if arena is frozen, bug somewhere! */
177c0ea7 579 if (r_alloc_freeze_level)
1088b922 580 emacs_abort ();
49f82b3d 581
e429caa2
KH
582 if (bloc == NIL_BLOC || size == bloc->size)
583 return 1;
584
585 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
586 {
587 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
588 break;
589 }
590
591 if (heap == NIL_HEAP)
1088b922 592 emacs_abort ();
e429caa2
KH
593
594 old_size = bloc->size;
595 bloc->size = size;
596
abe9ff32 597 /* Note that bloc could be moved into the previous heap. */
91a211b5
GM
598 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
599 : (char *) first_heap->bloc_start);
e429caa2
KH
600 while (heap)
601 {
602 if (heap->bloc_start <= address && address <= heap->end)
603 break;
604 heap = heap->prev;
605 }
606
607 if (! relocate_blocs (bloc, heap, address))
608 {
609 bloc->size = old_size;
610 return 0;
611 }
612
613 if (size > old_size)
614 {
615 for (b = last_bloc; b != bloc; b = b->prev)
616 {
49f82b3d
RS
617 if (!b->variable)
618 {
619 b->size = 0;
620 b->data = b->new_data;
177c0ea7
JB
621 }
622 else
49f82b3d 623 {
78cef877
EZ
624 if (b->new_data != b->data)
625 memmove (b->new_data, b->data, b->size);
49f82b3d
RS
626 *b->variable = b->data = b->new_data;
627 }
628 }
629 if (!bloc->variable)
630 {
631 bloc->size = 0;
632 bloc->data = bloc->new_data;
633 }
634 else
635 {
78cef877
EZ
636 if (bloc->new_data != bloc->data)
637 memmove (bloc->new_data, bloc->data, old_size);
3ce2f8ac 638 memset ((char *) bloc->new_data + old_size, 0, size - old_size);
49f82b3d 639 *bloc->variable = bloc->data = bloc->new_data;
e429caa2 640 }
e429caa2
KH
641 }
642 else
dcfdbac7 643 {
ad3bb3d2
JB
644 for (b = bloc; b != NIL_BLOC; b = b->next)
645 {
49f82b3d
RS
646 if (!b->variable)
647 {
648 b->size = 0;
649 b->data = b->new_data;
177c0ea7
JB
650 }
651 else
49f82b3d 652 {
78cef877
EZ
653 if (b->new_data != b->data)
654 memmove (b->new_data, b->data, b->size);
49f82b3d
RS
655 *b->variable = b->data = b->new_data;
656 }
ad3bb3d2 657 }
ad3bb3d2 658 }
dcfdbac7 659
47f13333 660 update_heap_bloc_correspondence (bloc, heap);
abe9ff32 661
91a211b5
GM
662 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
663 : (char *) first_heap->bloc_start);
e429caa2
KH
664 return 1;
665}
47f13333 666\f
abe9ff32
RS
667/* Free BLOC from the chain of blocs, relocating any blocs above it.
668 This may return space to the system. */
dcfdbac7
JB
669
670static void
971de7fb 671free_bloc (bloc_ptr bloc)
dcfdbac7 672{
47f13333 673 heap_ptr heap = bloc->heap;
36c46f8e 674 heap_ptr h;
47f13333 675
49f82b3d
RS
676 if (r_alloc_freeze_level)
677 {
678 bloc->variable = (POINTER *) NIL;
679 return;
680 }
177c0ea7 681
e429caa2
KH
682 resize_bloc (bloc, 0);
683
dcfdbac7
JB
684 if (bloc == first_bloc && bloc == last_bloc)
685 {
686 first_bloc = last_bloc = NIL_BLOC;
687 }
688 else if (bloc == last_bloc)
689 {
690 last_bloc = bloc->prev;
691 last_bloc->next = NIL_BLOC;
692 }
693 else if (bloc == first_bloc)
694 {
695 first_bloc = bloc->next;
696 first_bloc->prev = NIL_BLOC;
dcfdbac7
JB
697 }
698 else
699 {
700 bloc->next->prev = bloc->prev;
701 bloc->prev->next = bloc->next;
dcfdbac7
JB
702 }
703
36c46f8e
EZ
704 /* Sometimes, 'heap' obtained from bloc->heap above is not really a
705 'heap' structure. It can even be beyond the current break point,
706 which will cause crashes when we dereference it below (see
707 bug#12242). Evidently, the reason is bloc allocations done while
708 use_relocatable_buffers was non-positive, because additional
709 memory we get then is not recorded in the heaps we manage. If
710 bloc->heap records such a "heap", we cannot (and don't need to)
711 update its records. So we validate the 'heap' value by making
712 sure it is one of the heaps we manage via the heaps linked list,
713 and don't touch a 'heap' that isn't found there. This avoids
714 accessing memory we know nothing about. */
715 for (h = first_heap; h != NIL_HEAP; h = h->next)
716 if (heap == h)
717 break;
718
719 if (h)
47f13333 720 {
36c46f8e
EZ
721 /* Update the records of which blocs are in HEAP. */
722 if (heap->first_bloc == bloc)
723 {
724 if (bloc->next != 0 && bloc->next->heap == heap)
725 heap->first_bloc = bloc->next;
726 else
727 heap->first_bloc = heap->last_bloc = NIL_BLOC;
728 }
729 if (heap->last_bloc == bloc)
730 {
731 if (bloc->prev != 0 && bloc->prev->heap == heap)
732 heap->last_bloc = bloc->prev;
733 else
734 heap->first_bloc = heap->last_bloc = NIL_BLOC;
735 }
47f13333
RS
736 }
737
e429caa2 738 relinquish ();
dcfdbac7
JB
739 free (bloc);
740}
741\f
956ace37
JB
742/* Interface routines. */
743
98b7fe02 744/* Obtain SIZE bytes of storage from the free pool, or the system, as
2c46d29f 745 necessary. If relocatable blocs are in use, this means relocating
98b7fe02
JB
746 them. This function gets plugged into the GNU malloc's __morecore
747 hook.
748
7516b7d5
RS
749 We provide hysteresis, never relocating by less than extra_bytes.
750
98b7fe02
JB
751 If we're out of memory, we should return zero, to imitate the other
752 __morecore hook values - in particular, __default_morecore in the
753 GNU malloc package. */
dcfdbac7 754
3539f31f 755static POINTER
62aba0d4 756r_alloc_sbrk (ptrdiff_t size)
dcfdbac7 757{
e429caa2
KH
758 register bloc_ptr b;
759 POINTER address;
dcfdbac7 760
44d3dec0
RS
761 if (! r_alloc_initialized)
762 r_alloc_init ();
763
e8a02204 764 if (use_relocatable_buffers <= 0)
bbc60227 765 return (*real_morecore) (size);
dcfdbac7 766
e429caa2
KH
767 if (size == 0)
768 return virtual_break_value;
7516b7d5 769
e429caa2 770 if (size > 0)
dcfdbac7 771 {
abe9ff32
RS
772 /* Allocate a page-aligned space. GNU malloc would reclaim an
773 extra space if we passed an unaligned one. But we could
8e6208c5 774 not always find a space which is contiguous to the previous. */
e429caa2
KH
775 POINTER new_bloc_start;
776 heap_ptr h = first_heap;
abe9ff32 777 SIZE get = ROUNDUP (size);
7516b7d5 778
abe9ff32 779 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 780
abe9ff32
RS
781 /* Search the list upward for a heap which is large enough. */
782 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
e429caa2
KH
783 {
784 h = h->next;
785 if (h == NIL_HEAP)
786 break;
abe9ff32 787 address = (POINTER) ROUNDUP (h->start);
e429caa2
KH
788 }
789
abe9ff32 790 /* If not found, obtain more space. */
e429caa2
KH
791 if (h == NIL_HEAP)
792 {
793 get += extra_bytes + page_size;
794
49f82b3d 795 if (! obtain (address, get))
e429caa2 796 return 0;
98b7fe02 797
e429caa2 798 if (first_heap == last_heap)
abe9ff32 799 address = (POINTER) ROUNDUP (virtual_break_value);
e429caa2 800 else
abe9ff32 801 address = (POINTER) ROUNDUP (last_heap->start);
e429caa2
KH
802 h = last_heap;
803 }
804
abe9ff32 805 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
e429caa2
KH
806
807 if (first_heap->bloc_start < new_bloc_start)
808 {
49f82b3d 809 /* This is no clean solution - no idea how to do it better. */
177c0ea7 810 if (r_alloc_freeze_level)
49f82b3d
RS
811 return NIL;
812
813 /* There is a bug here: if the above obtain call succeeded, but the
814 relocate_blocs call below does not succeed, we need to free
815 the memory that we got with obtain. */
816
abe9ff32 817 /* Move all blocs upward. */
49f82b3d 818 if (! relocate_blocs (first_bloc, h, new_bloc_start))
e429caa2
KH
819 return 0;
820
821 /* Note that (POINTER)(h+1) <= new_bloc_start since
822 get >= page_size, so the following does not destroy the heap
abe9ff32 823 header. */
e429caa2
KH
824 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
825 {
78cef877
EZ
826 if (b->new_data != b->data)
827 memmove (b->new_data, b->data, b->size);
e429caa2
KH
828 *b->variable = b->data = b->new_data;
829 }
830
831 h->bloc_start = new_bloc_start;
abe9ff32 832
47f13333 833 update_heap_bloc_correspondence (first_bloc, h);
e429caa2 834 }
e429caa2
KH
835 if (h != first_heap)
836 {
837 /* Give up managing heaps below the one the new
abe9ff32 838 virtual_break_value points to. */
e429caa2
KH
839 first_heap->prev = NIL_HEAP;
840 first_heap->next = h->next;
841 first_heap->start = h->start;
842 first_heap->end = h->end;
abe9ff32 843 first_heap->free = h->free;
47f13333
RS
844 first_heap->first_bloc = h->first_bloc;
845 first_heap->last_bloc = h->last_bloc;
e429caa2
KH
846 first_heap->bloc_start = h->bloc_start;
847
848 if (first_heap->next)
849 first_heap->next->prev = first_heap;
850 else
851 last_heap = first_heap;
852 }
853
72af86bd 854 memset (address, 0, size);
dcfdbac7 855 }
e429caa2 856 else /* size < 0 */
dcfdbac7 857 {
e429caa2
KH
858 SIZE excess = (char *)first_heap->bloc_start
859 - ((char *)virtual_break_value + size);
860
861 address = virtual_break_value;
862
863 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
864 {
865 excess -= extra_bytes;
866 first_heap->bloc_start
47f13333 867 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
e429caa2 868
abe9ff32 869 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
7516b7d5 870
e429caa2
KH
871 for (b = first_bloc; b != NIL_BLOC; b = b->next)
872 {
78cef877
EZ
873 if (b->new_data != b->data)
874 memmove (b->new_data, b->data, b->size);
e429caa2
KH
875 *b->variable = b->data = b->new_data;
876 }
877 }
878
879 if ((char *)virtual_break_value + size < (char *)first_heap->start)
880 {
881 /* We found an additional space below the first heap */
882 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
883 }
dcfdbac7
JB
884 }
885
e429caa2 886 virtual_break_value = (POINTER) ((char *)address + size);
47f13333 887 break_value = (last_bloc
91a211b5
GM
888 ? (char *) last_bloc->data + last_bloc->size
889 : (char *) first_heap->bloc_start);
e429caa2 890 if (size < 0)
abe9ff32 891 relinquish ();
7516b7d5 892
e429caa2 893 return address;
dcfdbac7
JB
894}
895
0a58f946 896
dcfdbac7
JB
897/* Allocate a relocatable bloc of storage of size SIZE. A pointer to
898 the data is returned in *PTR. PTR is thus the address of some variable
98b7fe02
JB
899 which will use the data area.
900
49f82b3d 901 The allocation of 0 bytes is valid.
f96f2c5b
JB
902 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
903 done before allocating a new area. Not yet done.
49f82b3d 904
98b7fe02
JB
905 If we can't allocate the necessary memory, set *PTR to zero, and
906 return zero. */
dcfdbac7
JB
907
908POINTER
971de7fb 909r_alloc (POINTER *ptr, SIZE size)
dcfdbac7
JB
910{
911 register bloc_ptr new_bloc;
912
2c46d29f
RS
913 if (! r_alloc_initialized)
914 r_alloc_init ();
915
abe9ff32 916 new_bloc = get_bloc (MEM_ROUNDUP (size));
98b7fe02
JB
917 if (new_bloc)
918 {
919 new_bloc->variable = ptr;
920 *ptr = new_bloc->data;
921 }
922 else
923 *ptr = 0;
dcfdbac7
JB
924
925 return *ptr;
926}
927
2c46d29f
RS
928/* Free a bloc of relocatable storage whose data is pointed to by PTR.
929 Store 0 in *PTR to show there's no block allocated. */
dcfdbac7
JB
930
931void
971de7fb 932r_alloc_free (register POINTER *ptr)
dcfdbac7
JB
933{
934 register bloc_ptr dead_bloc;
935
44d3dec0
RS
936 if (! r_alloc_initialized)
937 r_alloc_init ();
938
dcfdbac7
JB
939 dead_bloc = find_bloc (ptr);
940 if (dead_bloc == NIL_BLOC)
1088b922 941 emacs_abort (); /* Double free? PTR not originally used to allocate? */
dcfdbac7
JB
942
943 free_bloc (dead_bloc);
2c46d29f 944 *ptr = 0;
719b242f 945
d5179acc 946#ifdef emacs
719b242f 947 refill_memory_reserve ();
d5179acc 948#endif
dcfdbac7
JB
949}
950
16a5c729 951/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
98b7fe02
JB
952 Do this by shifting all blocks above this one up in memory, unless
953 SIZE is less than or equal to the current bloc size, in which case
954 do nothing.
dcfdbac7 955
f96f2c5b 956 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
8e6208c5 957 memory copied to it. Not very efficient. We could traverse the
49f82b3d
RS
958 bloc_list for a best fit of free blocs first.
959
98b7fe02
JB
960 Change *PTR to reflect the new bloc, and return this value.
961
962 If more memory cannot be allocated, then leave *PTR unchanged, and
963 return zero. */
dcfdbac7
JB
964
965POINTER
971de7fb 966r_re_alloc (POINTER *ptr, SIZE size)
dcfdbac7 967{
16a5c729 968 register bloc_ptr bloc;
dcfdbac7 969
44d3dec0
RS
970 if (! r_alloc_initialized)
971 r_alloc_init ();
972
49f82b3d
RS
973 if (!*ptr)
974 return r_alloc (ptr, size);
177c0ea7 975 if (!size)
49f82b3d
RS
976 {
977 r_alloc_free (ptr);
978 return r_alloc (ptr, 0);
979 }
980
16a5c729
JB
981 bloc = find_bloc (ptr);
982 if (bloc == NIL_BLOC)
1088b922 983 emacs_abort (); /* Already freed? PTR not originally used to allocate? */
dcfdbac7 984
177c0ea7 985 if (size < bloc->size)
49f82b3d
RS
986 {
987 /* Wouldn't it be useful to actually resize the bloc here? */
988 /* I think so too, but not if it's too expensive... */
177c0ea7
JB
989 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
990 && r_alloc_freeze_level == 0)
49f82b3d
RS
991 {
992 resize_bloc (bloc, MEM_ROUNDUP (size));
993 /* Never mind if this fails, just do nothing... */
994 /* It *should* be infallible! */
995 }
996 }
997 else if (size > bloc->size)
998 {
999 if (r_alloc_freeze_level)
1000 {
1001 bloc_ptr new_bloc;
1002 new_bloc = get_bloc (MEM_ROUNDUP (size));
1003 if (new_bloc)
1004 {
1005 new_bloc->variable = ptr;
1006 *ptr = new_bloc->data;
1007 bloc->variable = (POINTER *) NIL;
1008 }
1009 else
1010 return NIL;
1011 }
177c0ea7 1012 else
49f82b3d
RS
1013 {
1014 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1015 return NIL;
1016 }
1017 }
dcfdbac7
JB
1018 return *ptr;
1019}
81bd58e8 1020
dec41418
RS
1021
1022#if defined (emacs) && defined (DOUG_LEA_MALLOC)
1023
1024/* Reinitialize the morecore hook variables after restarting a dumped
1025 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1026void
971de7fb 1027r_alloc_reinit (void)
dec41418
RS
1028{
1029 /* Only do this if the hook has been reset, so that we don't get an
1030 infinite loop, in case Emacs was linked statically. */
1031 if (__morecore != r_alloc_sbrk)
1032 {
1033 real_morecore = __morecore;
1034 __morecore = r_alloc_sbrk;
1035 }
1036}
0a58f946
GM
1037
1038#endif /* emacs && DOUG_LEA_MALLOC */
dec41418 1039
e429caa2 1040#ifdef DEBUG
0a58f946 1041
e429caa2
KH
1042#include <assert.h>
1043
44d3dec0 1044void
268c2c36 1045r_alloc_check (void)
e429caa2 1046{
6d16dd06
RS
1047 int found = 0;
1048 heap_ptr h, ph = 0;
1049 bloc_ptr b, pb = 0;
1050
1051 if (!r_alloc_initialized)
1052 return;
1053
1054 assert (first_heap);
1055 assert (last_heap->end <= (POINTER) sbrk (0));
1056 assert ((POINTER) first_heap < first_heap->start);
1057 assert (first_heap->start <= virtual_break_value);
1058 assert (virtual_break_value <= first_heap->end);
1059
1060 for (h = first_heap; h; h = h->next)
1061 {
1062 assert (h->prev == ph);
1063 assert ((POINTER) ROUNDUP (h->end) == h->end);
40f3f04b
RS
1064#if 0 /* ??? The code in ralloc.c does not really try to ensure
1065 the heap start has any sort of alignment.
1066 Perhaps it should. */
6d16dd06 1067 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
40f3f04b 1068#endif
6d16dd06
RS
1069 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1070 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1071
1072 if (ph)
1073 {
1074 assert (ph->end < h->start);
1075 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1076 }
1077
1078 if (h->bloc_start <= break_value && break_value <= h->end)
1079 found = 1;
1080
1081 ph = h;
1082 }
1083
1084 assert (found);
1085 assert (last_heap == ph);
1086
1087 for (b = first_bloc; b; b = b->next)
1088 {
1089 assert (b->prev == pb);
1090 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1091 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1092
1093 ph = 0;
1094 for (h = first_heap; h; h = h->next)
1095 {
1096 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1097 break;
1098 ph = h;
1099 }
1100
1101 assert (h);
1102
1103 if (pb && pb->data + pb->size != b->data)
1104 {
1105 assert (ph && b->data == h->bloc_start);
1106 while (ph)
1107 {
1108 if (ph->bloc_start <= pb->data
1109 && pb->data + pb->size <= ph->end)
1110 {
1111 assert (pb->data + pb->size + b->size > ph->end);
1112 break;
1113 }
1114 else
1115 {
1116 assert (ph->bloc_start + b->size > ph->end);
1117 }
1118 ph = ph->prev;
1119 }
1120 }
1121 pb = b;
1122 }
1123
1124 assert (last_bloc == pb);
1125
1126 if (last_bloc)
1127 assert (last_bloc->data + last_bloc->size == break_value);
1128 else
1129 assert (first_heap->bloc_start == break_value);
e429caa2 1130}
0a58f946 1131
e429caa2 1132#endif /* DEBUG */
0a58f946 1133
baae5c2d
JR
1134/* Update the internal record of which variable points to some data to NEW.
1135 Used by buffer-swap-text in Emacs to restore consistency after it
1136 swaps the buffer text between two buffer objects. The OLD pointer
1137 is checked to ensure that memory corruption does not occur due to
1138 misuse. */
1139void
971de7fb 1140r_alloc_reset_variable (POINTER *old, POINTER *new)
baae5c2d
JR
1141{
1142 bloc_ptr bloc = first_bloc;
1143
1144 /* Find the bloc that corresponds to the data pointed to by pointer.
1145 find_bloc cannot be used, as it has internal consistency checks
0d26e0b6 1146 which fail when the variable needs resetting. */
baae5c2d
JR
1147 while (bloc != NIL_BLOC)
1148 {
1149 if (bloc->data == *new)
1150 break;
1151
1152 bloc = bloc->next;
1153 }
1154
1155 if (bloc == NIL_BLOC || bloc->variable != old)
1088b922 1156 emacs_abort (); /* Already freed? OLD not originally used to allocate? */
baae5c2d
JR
1157
1158 /* Update variable to point to the new location. */
1159 bloc->variable = new;
1160}
0a58f946 1161
52c55cc7
EZ
1162void
1163r_alloc_inhibit_buffer_relocation (int inhibit)
1164{
e8a02204
EZ
1165 if (use_relocatable_buffers > 1)
1166 use_relocatable_buffers = 1;
291d430f 1167 if (inhibit)
291d430f 1168 use_relocatable_buffers--;
e8a02204
EZ
1169 else if (use_relocatable_buffers < 1)
1170 use_relocatable_buffers++;
52c55cc7
EZ
1171}
1172
0a58f946
GM
1173\f
1174/***********************************************************************
1175 Initialization
1176 ***********************************************************************/
1177
0a58f946
GM
1178/* Initialize various things for memory allocation. */
1179
1180static void
971de7fb 1181r_alloc_init (void)
0a58f946
GM
1182{
1183 if (r_alloc_initialized)
1184 return;
0a58f946 1185 r_alloc_initialized = 1;
177c0ea7 1186
a2c23c92
DL
1187 page_size = PAGE;
1188#ifndef SYSTEM_MALLOC
0a58f946
GM
1189 real_morecore = __morecore;
1190 __morecore = r_alloc_sbrk;
1191
1192 first_heap = last_heap = &heap_base;
1193 first_heap->next = first_heap->prev = NIL_HEAP;
1194 first_heap->start = first_heap->bloc_start
1195 = virtual_break_value = break_value = (*real_morecore) (0);
1196 if (break_value == NIL)
1088b922 1197 emacs_abort ();
0a58f946 1198
0a58f946 1199 extra_bytes = ROUNDUP (50000);
a2c23c92 1200#endif
0a58f946
GM
1201
1202#ifdef DOUG_LEA_MALLOC
4d7e6e51 1203 block_input ();
1673df2e 1204 mallopt (M_TOP_PAD, 64 * 4096);
4d7e6e51 1205 unblock_input ();
0a58f946 1206#else
a2c23c92 1207#ifndef SYSTEM_MALLOC
45ba16c7
EZ
1208 /* Give GNU malloc's morecore some hysteresis so that we move all
1209 the relocatable blocks much less often. The number used to be
1210 64, but alloc.c would override that with 32 in code that was
1211 removed when SYNC_INPUT became the only input handling mode.
9b318728 1212 That code was conditioned on !DOUG_LEA_MALLOC, so the call to
45ba16c7
EZ
1213 mallopt above is left unchanged. (Actually, I think there's no
1214 system nowadays that uses DOUG_LEA_MALLOC and also uses
1215 REL_ALLOC.) */
1216 __malloc_extra_blocks = 32;
0a58f946 1217#endif
a2c23c92 1218#endif
0a58f946 1219
5ad25b24 1220#ifndef SYSTEM_MALLOC
0a58f946
GM
1221 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1222
1223 /* The extra call to real_morecore guarantees that the end of the
1224 address space is a multiple of page_size, even if page_size is
1225 not really the page size of the system running the binary in
1226 which page_size is stored. This allows a binary to be built on a
1227 system with one page size and run on a system with a smaller page
1228 size. */
91a211b5 1229 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
0a58f946
GM
1230
1231 /* Clear the rest of the last page; this memory is in our address space
1232 even though it is after the sbrk value. */
1233 /* Doubly true, with the additional call that explicitly adds the
1234 rest of that page to the address space. */
72af86bd
AS
1235 memset (first_heap->start, 0,
1236 (char *) first_heap->end - (char *) first_heap->start);
0a58f946 1237 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
a2c23c92 1238#endif
177c0ea7 1239
0a58f946
GM
1240 use_relocatable_buffers = 1;
1241}