Commit | Line | Data |
---|---|---|
dcfdbac7 | 1 | /* Block-relocating memory allocator. |
187996a8 | 2 | Copyright (C) 1993, 1995 Free Software Foundation, Inc. |
dcfdbac7 JB |
3 | |
4 | This file is part of GNU Emacs. | |
5 | ||
6 | GNU Emacs is free software; you can redistribute it and/or modify | |
7 | it under the terms of the GNU General Public License as published by | |
187996a8 | 8 | the Free Software Foundation; either version 2, or (at your option) |
dcfdbac7 JB |
9 | any later version. |
10 | ||
11 | GNU Emacs is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | GNU General Public License for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | along with GNU Emacs; see the file COPYING. If not, write to | |
18 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ | |
19 | ||
20 | /* NOTES: | |
21 | ||
eb8c3be9 | 22 | Only relocate the blocs necessary for SIZE in r_alloc_sbrk, |
dcfdbac7 | 23 | rather than all of them. This means allowing for a possible |
abe9ff32 | 24 | hole between the first bloc and the end of malloc storage. */ |
dcfdbac7 | 25 | |
2c46d29f | 26 | #ifdef emacs |
aef4d570 | 27 | |
18160b98 | 28 | #include <config.h> |
956ace37 | 29 | #include "lisp.h" /* Needed for VALBITS. */ |
2c46d29f | 30 | |
aef4d570 RM |
31 | #undef NULL |
32 | ||
f275fd9a RS |
33 | /* The important properties of this type are that 1) it's a pointer, and |
34 | 2) arithmetic on it should work as if the size of the object pointed | |
35 | to has a size of 1. */ | |
a8c0e5ea | 36 | #if 0 /* Arithmetic on void* is a GCC extension. */ |
f275fd9a RS |
37 | #ifdef __STDC__ |
38 | typedef void *POINTER; | |
39 | #else | |
1df181b6 RM |
40 | |
41 | #ifdef HAVE_CONFIG_H | |
42 | #include "config.h" | |
43 | #endif | |
44 | ||
f275fd9a | 45 | typedef char *POINTER; |
1df181b6 | 46 | |
f275fd9a | 47 | #endif |
a8c0e5ea RS |
48 | #endif /* 0 */ |
49 | ||
50 | /* Unconditionally use char * for this. */ | |
51 | typedef char *POINTER; | |
f275fd9a RS |
52 | |
53 | typedef unsigned long SIZE; | |
54 | ||
2c46d29f RS |
55 | /* Declared in dispnew.c, this version doesn't screw up if regions |
56 | overlap. */ | |
57 | extern void safe_bcopy (); | |
2c46d29f | 58 | |
49081834 RS |
59 | extern int __malloc_extra_blocks; |
60 | ||
d5179acc | 61 | #else /* not emacs */ |
aef4d570 | 62 | |
2c46d29f | 63 | #include <stddef.h> |
aef4d570 | 64 | |
2c46d29f RS |
65 | typedef size_t SIZE; |
66 | typedef void *POINTER; | |
aef4d570 | 67 | |
aef4d570 RM |
68 | #include <unistd.h> |
69 | #include <malloc.h> | |
70 | #include <string.h> | |
71 | ||
2c46d29f | 72 | #define safe_bcopy(x, y, z) memmove (y, x, z) |
d5179acc RS |
73 | #define bzero(x, len) memset (x, 0, len) |
74 | ||
75 | #endif /* not emacs */ | |
2c46d29f | 76 | |
d5179acc | 77 | #include "getpagesize.h" |
dcfdbac7 JB |
78 | |
79 | #define NIL ((POINTER) 0) | |
80 | ||
2c46d29f RS |
81 | /* A flag to indicate whether we have initialized ralloc yet. For |
82 | Emacs's sake, please do not make this local to malloc_init; on some | |
83 | machines, the dumping procedure makes all static variables | |
84 | read-only. On these machines, the word static is #defined to be | |
85 | the empty string, meaning that r_alloc_initialized becomes an | |
86 | automatic variable, and loses its value each time Emacs is started up. */ | |
87 | static int r_alloc_initialized = 0; | |
88 | ||
89 | static void r_alloc_init (); | |
dcfdbac7 | 90 | \f |
956ace37 JB |
91 | /* Declarations for working with the malloc, ralloc, and system breaks. */ |
92 | ||
abe9ff32 | 93 | /* Function to set the real break value. */ |
bbc60227 | 94 | static POINTER (*real_morecore) (); |
dcfdbac7 | 95 | |
abe9ff32 | 96 | /* The break value, as seen by malloc. */ |
dcfdbac7 JB |
97 | static POINTER virtual_break_value; |
98 | ||
abe9ff32 RS |
99 | /* The address of the end of the last data in use by ralloc, |
100 | including relocatable blocs as well as malloc data. */ | |
dcfdbac7 JB |
101 | static POINTER break_value; |
102 | ||
7516b7d5 RS |
103 | /* This is the size of a page. We round memory requests to this boundary. */ |
104 | static int page_size; | |
105 | ||
ad3bb3d2 JB |
106 | /* Whenever we get memory from the system, get this many extra bytes. This |
107 | must be a multiple of page_size. */ | |
7516b7d5 RS |
108 | static int extra_bytes; |
109 | ||
dcfdbac7 | 110 | /* Macros for rounding. Note that rounding to any value is possible |
abe9ff32 | 111 | by changing the definition of PAGE. */ |
dcfdbac7 | 112 | #define PAGE (getpagesize ()) |
f7a009a5 RM |
113 | #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0) |
114 | #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ | |
115 | & ~(page_size - 1)) | |
7516b7d5 | 116 | #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) |
e429caa2 KH |
117 | |
118 | #define MEM_ALIGN sizeof(double) | |
119 | #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \ | |
120 | & ~(MEM_ALIGN - 1)) | |
121 | \f | |
abe9ff32 RS |
122 | /* Data structures of heaps and blocs. */ |
123 | ||
124 | /* The relocatable objects, or blocs, and the malloc data | |
125 | both reside within one or more heaps. | |
126 | Each heap contains malloc data, running from `start' to `bloc_start', | |
127 | and relocatable objects, running from `bloc_start' to `free'. | |
128 | ||
129 | Relocatable objects may relocate within the same heap | |
130 | or may move into another heap; the heaps themselves may grow | |
131 | but they never move. | |
132 | ||
133 | We try to make just one heap and make it larger as necessary. | |
8e6208c5 | 134 | But sometimes we can't do that, because we can't get contiguous |
abe9ff32 RS |
135 | space to add onto the heap. When that happens, we start a new heap. */ |
136 | ||
e429caa2 KH |
137 | typedef struct heap |
138 | { | |
139 | struct heap *next; | |
140 | struct heap *prev; | |
abe9ff32 | 141 | /* Start of memory range of this heap. */ |
e429caa2 | 142 | POINTER start; |
abe9ff32 | 143 | /* End of memory range of this heap. */ |
e429caa2 | 144 | POINTER end; |
abe9ff32 RS |
145 | /* Start of relocatable data in this heap. */ |
146 | POINTER bloc_start; | |
147 | /* Start of unused space in this heap. */ | |
148 | POINTER free; | |
47f13333 RS |
149 | /* First bloc in this heap. */ |
150 | struct bp *first_bloc; | |
151 | /* Last bloc in this heap. */ | |
152 | struct bp *last_bloc; | |
e429caa2 KH |
153 | } *heap_ptr; |
154 | ||
155 | #define NIL_HEAP ((heap_ptr) 0) | |
156 | #define HEAP_PTR_SIZE (sizeof (struct heap)) | |
157 | ||
abe9ff32 RS |
158 | /* This is the first heap object. |
159 | If we need additional heap objects, each one resides at the beginning of | |
160 | the space it covers. */ | |
161 | static struct heap heap_base; | |
162 | ||
163 | /* Head and tail of the list of heaps. */ | |
e429caa2 KH |
164 | static heap_ptr first_heap, last_heap; |
165 | ||
166 | /* These structures are allocated in the malloc arena. | |
167 | The linked list is kept in order of increasing '.data' members. | |
168 | The data blocks abut each other; if b->next is non-nil, then | |
49f82b3d RS |
169 | b->data + b->size == b->next->data. |
170 | ||
171 | An element with variable==NIL denotes a freed block, which has not yet | |
172 | been collected. They may only appear while r_alloc_freeze > 0, and will be | |
173 | freed when the arena is thawed. Currently, these blocs are not reusable, | |
8e6208c5 | 174 | while the arena is frozen. Very inefficient. */ |
49f82b3d | 175 | |
e429caa2 KH |
176 | typedef struct bp |
177 | { | |
178 | struct bp *next; | |
179 | struct bp *prev; | |
180 | POINTER *variable; | |
181 | POINTER data; | |
182 | SIZE size; | |
8e6208c5 | 183 | POINTER new_data; /* temporarily used for relocation */ |
49f82b3d | 184 | struct heap *heap; /* Heap this bloc is in. */ |
e429caa2 KH |
185 | } *bloc_ptr; |
186 | ||
187 | #define NIL_BLOC ((bloc_ptr) 0) | |
188 | #define BLOC_PTR_SIZE (sizeof (struct bp)) | |
189 | ||
abe9ff32 | 190 | /* Head and tail of the list of relocatable blocs. */ |
e429caa2 KH |
191 | static bloc_ptr first_bloc, last_bloc; |
192 | ||
49f82b3d RS |
193 | static int use_relocatable_buffers; |
194 | ||
195 | /* If >0, no relocation whatsoever takes place. */ | |
196 | static int r_alloc_freeze_level; | |
197 | ||
dcfdbac7 | 198 | \f |
956ace37 JB |
199 | /* Functions to get and return memory from the system. */ |
200 | ||
abe9ff32 RS |
201 | /* Find the heap that ADDRESS falls within. */ |
202 | ||
203 | static heap_ptr | |
204 | find_heap (address) | |
205 | POINTER address; | |
206 | { | |
207 | heap_ptr heap; | |
208 | ||
209 | for (heap = last_heap; heap; heap = heap->prev) | |
210 | { | |
211 | if (heap->start <= address && address <= heap->end) | |
212 | return heap; | |
213 | } | |
214 | ||
215 | return NIL_HEAP; | |
216 | } | |
217 | ||
218 | /* Find SIZE bytes of space in a heap. | |
219 | Try to get them at ADDRESS (which must fall within some heap's range) | |
220 | if we can get that many within one heap. | |
221 | ||
e429caa2 | 222 | If enough space is not presently available in our reserve, this means |
8e6208c5 KH |
223 | getting more page-aligned space from the system. If the returned space |
224 | is not contiguous to the last heap, allocate a new heap, and append it | |
abe9ff32 RS |
225 | |
226 | obtain does not try to keep track of whether space is in use | |
227 | or not in use. It just returns the address of SIZE bytes that | |
228 | fall within a single heap. If you call obtain twice in a row | |
229 | with the same arguments, you typically get the same value. | |
230 | to the heap list. It's the caller's responsibility to keep | |
231 | track of what space is in use. | |
dcfdbac7 | 232 | |
e429caa2 KH |
233 | Return the address of the space if all went well, or zero if we couldn't |
234 | allocate the memory. */ | |
abe9ff32 | 235 | |
e429caa2 KH |
236 | static POINTER |
237 | obtain (address, size) | |
238 | POINTER address; | |
239 | SIZE size; | |
dcfdbac7 | 240 | { |
e429caa2 KH |
241 | heap_ptr heap; |
242 | SIZE already_available; | |
dcfdbac7 | 243 | |
abe9ff32 | 244 | /* Find the heap that ADDRESS falls within. */ |
e429caa2 | 245 | for (heap = last_heap; heap; heap = heap->prev) |
dcfdbac7 | 246 | { |
e429caa2 KH |
247 | if (heap->start <= address && address <= heap->end) |
248 | break; | |
249 | } | |
dcfdbac7 | 250 | |
e429caa2 | 251 | if (! heap) |
abe9ff32 | 252 | abort (); |
dcfdbac7 | 253 | |
abe9ff32 RS |
254 | /* If we can't fit SIZE bytes in that heap, |
255 | try successive later heaps. */ | |
e429caa2 KH |
256 | while (heap && address + size > heap->end) |
257 | { | |
258 | heap = heap->next; | |
259 | if (heap == NIL_HEAP) | |
260 | break; | |
261 | address = heap->bloc_start; | |
dcfdbac7 JB |
262 | } |
263 | ||
abe9ff32 RS |
264 | /* If we can't fit them within any existing heap, |
265 | get more space. */ | |
e429caa2 KH |
266 | if (heap == NIL_HEAP) |
267 | { | |
268 | POINTER new = (*real_morecore)(0); | |
269 | SIZE get; | |
98b7fe02 | 270 | |
e429caa2 | 271 | already_available = (char *)last_heap->end - (char *)address; |
dcfdbac7 | 272 | |
e429caa2 KH |
273 | if (new != last_heap->end) |
274 | { | |
abe9ff32 RS |
275 | /* Someone else called sbrk. Make a new heap. */ |
276 | ||
277 | heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new); | |
278 | POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1)); | |
e429caa2 KH |
279 | |
280 | if ((*real_morecore) (bloc_start - new) != new) | |
281 | return 0; | |
282 | ||
283 | new_heap->start = new; | |
284 | new_heap->end = bloc_start; | |
285 | new_heap->bloc_start = bloc_start; | |
abe9ff32 | 286 | new_heap->free = bloc_start; |
e429caa2 KH |
287 | new_heap->next = NIL_HEAP; |
288 | new_heap->prev = last_heap; | |
47f13333 RS |
289 | new_heap->first_bloc = NIL_BLOC; |
290 | new_heap->last_bloc = NIL_BLOC; | |
e429caa2 KH |
291 | last_heap->next = new_heap; |
292 | last_heap = new_heap; | |
293 | ||
294 | address = bloc_start; | |
295 | already_available = 0; | |
296 | } | |
dcfdbac7 | 297 | |
abe9ff32 RS |
298 | /* Add space to the last heap (which we may have just created). |
299 | Get some extra, so we can come here less often. */ | |
300 | ||
e429caa2 | 301 | get = size + extra_bytes - already_available; |
abe9ff32 | 302 | get = (char *) ROUNDUP ((char *)last_heap->end + get) |
e429caa2 | 303 | - (char *) last_heap->end; |
dcfdbac7 | 304 | |
e429caa2 KH |
305 | if ((*real_morecore) (get) != last_heap->end) |
306 | return 0; | |
307 | ||
308 | last_heap->end += get; | |
309 | } | |
310 | ||
311 | return address; | |
312 | } | |
dcfdbac7 | 313 | |
abe9ff32 RS |
314 | /* Return unused heap space to the system |
315 | if there is a lot of unused space now. | |
316 | This can make the last heap smaller; | |
317 | it can also eliminate the last heap entirely. */ | |
318 | ||
dcfdbac7 | 319 | static void |
e429caa2 | 320 | relinquish () |
dcfdbac7 | 321 | { |
e429caa2 KH |
322 | register heap_ptr h; |
323 | int excess = 0; | |
324 | ||
abe9ff32 RS |
325 | /* Add the amount of space beyond break_value |
326 | in all heaps which have extend beyond break_value at all. */ | |
327 | ||
e429caa2 KH |
328 | for (h = last_heap; h && break_value < h->end; h = h->prev) |
329 | { | |
330 | excess += (char *) h->end - (char *) ((break_value < h->bloc_start) | |
331 | ? h->bloc_start : break_value); | |
332 | } | |
333 | ||
334 | if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end) | |
dcfdbac7 | 335 | { |
7516b7d5 RS |
336 | /* Keep extra_bytes worth of empty space. |
337 | And don't free anything unless we can free at least extra_bytes. */ | |
e429caa2 | 338 | excess -= extra_bytes; |
dcfdbac7 | 339 | |
e429caa2 KH |
340 | if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess) |
341 | { | |
47f13333 RS |
342 | /* This heap should have no blocs in it. */ |
343 | if (last_heap->first_bloc != NIL_BLOC | |
344 | || last_heap->last_bloc != NIL_BLOC) | |
345 | abort (); | |
346 | ||
abe9ff32 | 347 | /* Return the last heap, with its header, to the system. */ |
e429caa2 KH |
348 | excess = (char *)last_heap->end - (char *)last_heap->start; |
349 | last_heap = last_heap->prev; | |
350 | last_heap->next = NIL_HEAP; | |
351 | } | |
352 | else | |
353 | { | |
354 | excess = (char *) last_heap->end | |
abe9ff32 | 355 | - (char *) ROUNDUP ((char *)last_heap->end - excess); |
e429caa2 KH |
356 | last_heap->end -= excess; |
357 | } | |
dcfdbac7 | 358 | |
e429caa2 KH |
359 | if ((*real_morecore) (- excess) == 0) |
360 | abort (); | |
361 | } | |
dcfdbac7 | 362 | } |
719b242f RS |
363 | |
364 | /* Return the total size in use by relocating allocator, | |
365 | above where malloc gets space. */ | |
366 | ||
367 | long | |
368 | r_alloc_size_in_use () | |
369 | { | |
370 | return break_value - virtual_break_value; | |
371 | } | |
dcfdbac7 | 372 | \f |
956ace37 JB |
373 | /* The meat - allocating, freeing, and relocating blocs. */ |
374 | ||
956ace37 | 375 | /* Find the bloc referenced by the address in PTR. Returns a pointer |
abe9ff32 | 376 | to that block. */ |
dcfdbac7 JB |
377 | |
378 | static bloc_ptr | |
379 | find_bloc (ptr) | |
380 | POINTER *ptr; | |
381 | { | |
382 | register bloc_ptr p = first_bloc; | |
383 | ||
384 | while (p != NIL_BLOC) | |
385 | { | |
386 | if (p->variable == ptr && p->data == *ptr) | |
387 | return p; | |
388 | ||
389 | p = p->next; | |
390 | } | |
391 | ||
392 | return p; | |
393 | } | |
394 | ||
395 | /* Allocate a bloc of SIZE bytes and append it to the chain of blocs. | |
98b7fe02 JB |
396 | Returns a pointer to the new bloc, or zero if we couldn't allocate |
397 | memory for the new block. */ | |
dcfdbac7 JB |
398 | |
399 | static bloc_ptr | |
400 | get_bloc (size) | |
401 | SIZE size; | |
402 | { | |
98b7fe02 | 403 | register bloc_ptr new_bloc; |
abe9ff32 | 404 | register heap_ptr heap; |
98b7fe02 JB |
405 | |
406 | if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) | |
e429caa2 | 407 | || ! (new_bloc->data = obtain (break_value, size))) |
98b7fe02 JB |
408 | { |
409 | if (new_bloc) | |
410 | free (new_bloc); | |
411 | ||
412 | return 0; | |
413 | } | |
dcfdbac7 | 414 | |
e429caa2 KH |
415 | break_value = new_bloc->data + size; |
416 | ||
dcfdbac7 JB |
417 | new_bloc->size = size; |
418 | new_bloc->next = NIL_BLOC; | |
8c7f1e35 | 419 | new_bloc->variable = (POINTER *) NIL; |
e429caa2 | 420 | new_bloc->new_data = 0; |
dcfdbac7 | 421 | |
abe9ff32 RS |
422 | /* Record in the heap that this space is in use. */ |
423 | heap = find_heap (new_bloc->data); | |
424 | heap->free = break_value; | |
425 | ||
47f13333 RS |
426 | /* Maintain the correspondence between heaps and blocs. */ |
427 | new_bloc->heap = heap; | |
428 | heap->last_bloc = new_bloc; | |
429 | if (heap->first_bloc == NIL_BLOC) | |
430 | heap->first_bloc = new_bloc; | |
431 | ||
abe9ff32 | 432 | /* Put this bloc on the doubly-linked list of blocs. */ |
dcfdbac7 JB |
433 | if (first_bloc) |
434 | { | |
435 | new_bloc->prev = last_bloc; | |
436 | last_bloc->next = new_bloc; | |
437 | last_bloc = new_bloc; | |
438 | } | |
439 | else | |
440 | { | |
441 | first_bloc = last_bloc = new_bloc; | |
442 | new_bloc->prev = NIL_BLOC; | |
443 | } | |
444 | ||
445 | return new_bloc; | |
446 | } | |
47f13333 | 447 | \f |
abe9ff32 RS |
448 | /* Calculate new locations of blocs in the list beginning with BLOC, |
449 | relocating it to start at ADDRESS, in heap HEAP. If enough space is | |
450 | not presently available in our reserve, call obtain for | |
e429caa2 KH |
451 | more space. |
452 | ||
abe9ff32 RS |
453 | Store the new location of each bloc in its new_data field. |
454 | Do not touch the contents of blocs or break_value. */ | |
dcfdbac7 | 455 | |
e429caa2 KH |
456 | static int |
457 | relocate_blocs (bloc, heap, address) | |
458 | bloc_ptr bloc; | |
459 | heap_ptr heap; | |
460 | POINTER address; | |
461 | { | |
462 | register bloc_ptr b = bloc; | |
ad3bb3d2 | 463 | |
49f82b3d RS |
464 | /* No need to ever call this if arena is frozen, bug somewhere! */ |
465 | if (r_alloc_freeze_level) | |
466 | abort(); | |
467 | ||
e429caa2 KH |
468 | while (b) |
469 | { | |
abe9ff32 RS |
470 | /* If bloc B won't fit within HEAP, |
471 | move to the next heap and try again. */ | |
e429caa2 KH |
472 | while (heap && address + b->size > heap->end) |
473 | { | |
474 | heap = heap->next; | |
475 | if (heap == NIL_HEAP) | |
476 | break; | |
477 | address = heap->bloc_start; | |
478 | } | |
dcfdbac7 | 479 | |
abe9ff32 RS |
480 | /* If BLOC won't fit in any heap, |
481 | get enough new space to hold BLOC and all following blocs. */ | |
e429caa2 KH |
482 | if (heap == NIL_HEAP) |
483 | { | |
484 | register bloc_ptr tb = b; | |
485 | register SIZE s = 0; | |
486 | ||
abe9ff32 | 487 | /* Add up the size of all the following blocs. */ |
e429caa2 KH |
488 | while (tb != NIL_BLOC) |
489 | { | |
49f82b3d RS |
490 | if (tb->variable) |
491 | s += tb->size; | |
492 | ||
e429caa2 KH |
493 | tb = tb->next; |
494 | } | |
495 | ||
abe9ff32 RS |
496 | /* Get that space. */ |
497 | address = obtain (address, s); | |
498 | if (address == 0) | |
e429caa2 KH |
499 | return 0; |
500 | ||
501 | heap = last_heap; | |
502 | } | |
503 | ||
abe9ff32 RS |
504 | /* Record the new address of this bloc |
505 | and update where the next bloc can start. */ | |
e429caa2 | 506 | b->new_data = address; |
49f82b3d RS |
507 | if (b->variable) |
508 | address += b->size; | |
e429caa2 KH |
509 | b = b->next; |
510 | } | |
511 | ||
512 | return 1; | |
513 | } | |
514 | ||
47f13333 RS |
515 | /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list. |
516 | This is necessary if we put the memory of space of BLOC | |
517 | before that of BEFORE. */ | |
518 | ||
519 | static void | |
520 | reorder_bloc (bloc, before) | |
521 | bloc_ptr bloc, before; | |
522 | { | |
523 | bloc_ptr prev, next; | |
524 | ||
525 | /* Splice BLOC out from where it is. */ | |
526 | prev = bloc->prev; | |
527 | next = bloc->next; | |
528 | ||
529 | if (prev) | |
530 | prev->next = next; | |
531 | if (next) | |
532 | next->prev = prev; | |
533 | ||
534 | /* Splice it in before BEFORE. */ | |
535 | prev = before->prev; | |
abe9ff32 | 536 | |
47f13333 RS |
537 | if (prev) |
538 | prev->next = bloc; | |
539 | bloc->prev = prev; | |
540 | ||
541 | before->prev = bloc; | |
542 | bloc->next = before; | |
543 | } | |
544 | \f | |
545 | /* Update the records of which heaps contain which blocs, starting | |
546 | with heap HEAP and bloc BLOC. */ | |
547 | ||
548 | static void | |
549 | update_heap_bloc_correspondence (bloc, heap) | |
abe9ff32 RS |
550 | bloc_ptr bloc; |
551 | heap_ptr heap; | |
552 | { | |
553 | register bloc_ptr b; | |
554 | ||
47f13333 RS |
555 | /* Initialize HEAP's status to reflect blocs before BLOC. */ |
556 | if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap) | |
557 | { | |
558 | /* The previous bloc is in HEAP. */ | |
559 | heap->last_bloc = bloc->prev; | |
560 | heap->free = bloc->prev->data + bloc->prev->size; | |
561 | } | |
562 | else | |
563 | { | |
564 | /* HEAP contains no blocs before BLOC. */ | |
565 | heap->first_bloc = NIL_BLOC; | |
566 | heap->last_bloc = NIL_BLOC; | |
567 | heap->free = heap->bloc_start; | |
568 | } | |
569 | ||
abe9ff32 RS |
570 | /* Advance through blocs one by one. */ |
571 | for (b = bloc; b != NIL_BLOC; b = b->next) | |
572 | { | |
47f13333 RS |
573 | /* Advance through heaps, marking them empty, |
574 | till we get to the one that B is in. */ | |
abe9ff32 RS |
575 | while (heap) |
576 | { | |
577 | if (heap->bloc_start <= b->data && b->data <= heap->end) | |
578 | break; | |
579 | heap = heap->next; | |
47f13333 RS |
580 | /* We know HEAP is not null now, |
581 | because there has to be space for bloc B. */ | |
582 | heap->first_bloc = NIL_BLOC; | |
583 | heap->last_bloc = NIL_BLOC; | |
abe9ff32 RS |
584 | heap->free = heap->bloc_start; |
585 | } | |
47f13333 RS |
586 | |
587 | /* Update HEAP's status for bloc B. */ | |
abe9ff32 | 588 | heap->free = b->data + b->size; |
47f13333 RS |
589 | heap->last_bloc = b; |
590 | if (heap->first_bloc == NIL_BLOC) | |
591 | heap->first_bloc = b; | |
592 | ||
593 | /* Record that B is in HEAP. */ | |
594 | b->heap = heap; | |
abe9ff32 RS |
595 | } |
596 | ||
597 | /* If there are any remaining heaps and no blocs left, | |
47f13333 | 598 | mark those heaps as empty. */ |
abe9ff32 RS |
599 | heap = heap->next; |
600 | while (heap) | |
601 | { | |
47f13333 RS |
602 | heap->first_bloc = NIL_BLOC; |
603 | heap->last_bloc = NIL_BLOC; | |
abe9ff32 RS |
604 | heap->free = heap->bloc_start; |
605 | heap = heap->next; | |
606 | } | |
607 | } | |
47f13333 | 608 | \f |
abe9ff32 RS |
609 | /* Resize BLOC to SIZE bytes. This relocates the blocs |
610 | that come after BLOC in memory. */ | |
611 | ||
e429caa2 KH |
612 | static int |
613 | resize_bloc (bloc, size) | |
614 | bloc_ptr bloc; | |
615 | SIZE size; | |
dcfdbac7 | 616 | { |
e429caa2 KH |
617 | register bloc_ptr b; |
618 | heap_ptr heap; | |
619 | POINTER address; | |
620 | SIZE old_size; | |
621 | ||
49f82b3d RS |
622 | /* No need to ever call this if arena is frozen, bug somewhere! */ |
623 | if (r_alloc_freeze_level) | |
624 | abort(); | |
625 | ||
e429caa2 KH |
626 | if (bloc == NIL_BLOC || size == bloc->size) |
627 | return 1; | |
628 | ||
629 | for (heap = first_heap; heap != NIL_HEAP; heap = heap->next) | |
630 | { | |
631 | if (heap->bloc_start <= bloc->data && bloc->data <= heap->end) | |
632 | break; | |
633 | } | |
634 | ||
635 | if (heap == NIL_HEAP) | |
abe9ff32 | 636 | abort (); |
e429caa2 KH |
637 | |
638 | old_size = bloc->size; | |
639 | bloc->size = size; | |
640 | ||
abe9ff32 RS |
641 | /* Note that bloc could be moved into the previous heap. */ |
642 | address = (bloc->prev ? bloc->prev->data + bloc->prev->size | |
643 | : first_heap->bloc_start); | |
e429caa2 KH |
644 | while (heap) |
645 | { | |
646 | if (heap->bloc_start <= address && address <= heap->end) | |
647 | break; | |
648 | heap = heap->prev; | |
649 | } | |
650 | ||
651 | if (! relocate_blocs (bloc, heap, address)) | |
652 | { | |
653 | bloc->size = old_size; | |
654 | return 0; | |
655 | } | |
656 | ||
657 | if (size > old_size) | |
658 | { | |
659 | for (b = last_bloc; b != bloc; b = b->prev) | |
660 | { | |
49f82b3d RS |
661 | if (!b->variable) |
662 | { | |
663 | b->size = 0; | |
664 | b->data = b->new_data; | |
665 | } | |
666 | else | |
667 | { | |
668 | safe_bcopy (b->data, b->new_data, b->size); | |
669 | *b->variable = b->data = b->new_data; | |
670 | } | |
671 | } | |
672 | if (!bloc->variable) | |
673 | { | |
674 | bloc->size = 0; | |
675 | bloc->data = bloc->new_data; | |
676 | } | |
677 | else | |
678 | { | |
679 | safe_bcopy (bloc->data, bloc->new_data, old_size); | |
680 | bzero (bloc->new_data + old_size, size - old_size); | |
681 | *bloc->variable = bloc->data = bloc->new_data; | |
e429caa2 | 682 | } |
e429caa2 KH |
683 | } |
684 | else | |
dcfdbac7 | 685 | { |
ad3bb3d2 JB |
686 | for (b = bloc; b != NIL_BLOC; b = b->next) |
687 | { | |
49f82b3d RS |
688 | if (!b->variable) |
689 | { | |
690 | b->size = 0; | |
691 | b->data = b->new_data; | |
692 | } | |
693 | else | |
694 | { | |
695 | safe_bcopy (b->data, b->new_data, b->size); | |
696 | *b->variable = b->data = b->new_data; | |
697 | } | |
ad3bb3d2 | 698 | } |
ad3bb3d2 | 699 | } |
dcfdbac7 | 700 | |
47f13333 | 701 | update_heap_bloc_correspondence (bloc, heap); |
abe9ff32 RS |
702 | |
703 | break_value = (last_bloc ? last_bloc->data + last_bloc->size | |
704 | : first_heap->bloc_start); | |
e429caa2 KH |
705 | return 1; |
706 | } | |
47f13333 | 707 | \f |
abe9ff32 RS |
708 | /* Free BLOC from the chain of blocs, relocating any blocs above it. |
709 | This may return space to the system. */ | |
dcfdbac7 JB |
710 | |
711 | static void | |
712 | free_bloc (bloc) | |
713 | bloc_ptr bloc; | |
714 | { | |
47f13333 RS |
715 | heap_ptr heap = bloc->heap; |
716 | ||
49f82b3d RS |
717 | if (r_alloc_freeze_level) |
718 | { | |
719 | bloc->variable = (POINTER *) NIL; | |
720 | return; | |
721 | } | |
722 | ||
e429caa2 KH |
723 | resize_bloc (bloc, 0); |
724 | ||
dcfdbac7 JB |
725 | if (bloc == first_bloc && bloc == last_bloc) |
726 | { | |
727 | first_bloc = last_bloc = NIL_BLOC; | |
728 | } | |
729 | else if (bloc == last_bloc) | |
730 | { | |
731 | last_bloc = bloc->prev; | |
732 | last_bloc->next = NIL_BLOC; | |
733 | } | |
734 | else if (bloc == first_bloc) | |
735 | { | |
736 | first_bloc = bloc->next; | |
737 | first_bloc->prev = NIL_BLOC; | |
dcfdbac7 JB |
738 | } |
739 | else | |
740 | { | |
741 | bloc->next->prev = bloc->prev; | |
742 | bloc->prev->next = bloc->next; | |
dcfdbac7 JB |
743 | } |
744 | ||
47f13333 RS |
745 | /* Update the records of which blocs are in HEAP. */ |
746 | if (heap->first_bloc == bloc) | |
747 | { | |
d5179acc | 748 | if (bloc->next != 0 && bloc->next->heap == heap) |
47f13333 RS |
749 | heap->first_bloc = bloc->next; |
750 | else | |
751 | heap->first_bloc = heap->last_bloc = NIL_BLOC; | |
752 | } | |
753 | if (heap->last_bloc == bloc) | |
754 | { | |
d5179acc | 755 | if (bloc->prev != 0 && bloc->prev->heap == heap) |
47f13333 RS |
756 | heap->last_bloc = bloc->prev; |
757 | else | |
758 | heap->first_bloc = heap->last_bloc = NIL_BLOC; | |
759 | } | |
760 | ||
e429caa2 | 761 | relinquish (); |
dcfdbac7 JB |
762 | free (bloc); |
763 | } | |
764 | \f | |
956ace37 JB |
765 | /* Interface routines. */ |
766 | ||
98b7fe02 | 767 | /* Obtain SIZE bytes of storage from the free pool, or the system, as |
2c46d29f | 768 | necessary. If relocatable blocs are in use, this means relocating |
98b7fe02 JB |
769 | them. This function gets plugged into the GNU malloc's __morecore |
770 | hook. | |
771 | ||
7516b7d5 RS |
772 | We provide hysteresis, never relocating by less than extra_bytes. |
773 | ||
98b7fe02 JB |
774 | If we're out of memory, we should return zero, to imitate the other |
775 | __morecore hook values - in particular, __default_morecore in the | |
776 | GNU malloc package. */ | |
dcfdbac7 JB |
777 | |
778 | POINTER | |
779 | r_alloc_sbrk (size) | |
780 | long size; | |
781 | { | |
e429caa2 KH |
782 | register bloc_ptr b; |
783 | POINTER address; | |
dcfdbac7 | 784 | |
44d3dec0 RS |
785 | if (! r_alloc_initialized) |
786 | r_alloc_init (); | |
787 | ||
dcfdbac7 | 788 | if (! use_relocatable_buffers) |
bbc60227 | 789 | return (*real_morecore) (size); |
dcfdbac7 | 790 | |
e429caa2 KH |
791 | if (size == 0) |
792 | return virtual_break_value; | |
7516b7d5 | 793 | |
e429caa2 | 794 | if (size > 0) |
dcfdbac7 | 795 | { |
abe9ff32 RS |
796 | /* Allocate a page-aligned space. GNU malloc would reclaim an |
797 | extra space if we passed an unaligned one. But we could | |
8e6208c5 | 798 | not always find a space which is contiguous to the previous. */ |
e429caa2 KH |
799 | POINTER new_bloc_start; |
800 | heap_ptr h = first_heap; | |
abe9ff32 | 801 | SIZE get = ROUNDUP (size); |
7516b7d5 | 802 | |
abe9ff32 | 803 | address = (POINTER) ROUNDUP (virtual_break_value); |
e429caa2 | 804 | |
abe9ff32 RS |
805 | /* Search the list upward for a heap which is large enough. */ |
806 | while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get)) | |
e429caa2 KH |
807 | { |
808 | h = h->next; | |
809 | if (h == NIL_HEAP) | |
810 | break; | |
abe9ff32 | 811 | address = (POINTER) ROUNDUP (h->start); |
e429caa2 KH |
812 | } |
813 | ||
abe9ff32 | 814 | /* If not found, obtain more space. */ |
e429caa2 KH |
815 | if (h == NIL_HEAP) |
816 | { | |
817 | get += extra_bytes + page_size; | |
818 | ||
49f82b3d | 819 | if (! obtain (address, get)) |
e429caa2 | 820 | return 0; |
98b7fe02 | 821 | |
e429caa2 | 822 | if (first_heap == last_heap) |
abe9ff32 | 823 | address = (POINTER) ROUNDUP (virtual_break_value); |
e429caa2 | 824 | else |
abe9ff32 | 825 | address = (POINTER) ROUNDUP (last_heap->start); |
e429caa2 KH |
826 | h = last_heap; |
827 | } | |
828 | ||
abe9ff32 | 829 | new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get); |
e429caa2 KH |
830 | |
831 | if (first_heap->bloc_start < new_bloc_start) | |
832 | { | |
49f82b3d RS |
833 | /* This is no clean solution - no idea how to do it better. */ |
834 | if (r_alloc_freeze_level) | |
835 | return NIL; | |
836 | ||
837 | /* There is a bug here: if the above obtain call succeeded, but the | |
838 | relocate_blocs call below does not succeed, we need to free | |
839 | the memory that we got with obtain. */ | |
840 | ||
abe9ff32 | 841 | /* Move all blocs upward. */ |
49f82b3d | 842 | if (! relocate_blocs (first_bloc, h, new_bloc_start)) |
e429caa2 KH |
843 | return 0; |
844 | ||
845 | /* Note that (POINTER)(h+1) <= new_bloc_start since | |
846 | get >= page_size, so the following does not destroy the heap | |
abe9ff32 | 847 | header. */ |
e429caa2 KH |
848 | for (b = last_bloc; b != NIL_BLOC; b = b->prev) |
849 | { | |
850 | safe_bcopy (b->data, b->new_data, b->size); | |
851 | *b->variable = b->data = b->new_data; | |
852 | } | |
853 | ||
854 | h->bloc_start = new_bloc_start; | |
abe9ff32 | 855 | |
47f13333 | 856 | update_heap_bloc_correspondence (first_bloc, h); |
e429caa2 | 857 | } |
e429caa2 KH |
858 | if (h != first_heap) |
859 | { | |
860 | /* Give up managing heaps below the one the new | |
abe9ff32 | 861 | virtual_break_value points to. */ |
e429caa2 KH |
862 | first_heap->prev = NIL_HEAP; |
863 | first_heap->next = h->next; | |
864 | first_heap->start = h->start; | |
865 | first_heap->end = h->end; | |
abe9ff32 | 866 | first_heap->free = h->free; |
47f13333 RS |
867 | first_heap->first_bloc = h->first_bloc; |
868 | first_heap->last_bloc = h->last_bloc; | |
e429caa2 KH |
869 | first_heap->bloc_start = h->bloc_start; |
870 | ||
871 | if (first_heap->next) | |
872 | first_heap->next->prev = first_heap; | |
873 | else | |
874 | last_heap = first_heap; | |
875 | } | |
876 | ||
877 | bzero (address, size); | |
dcfdbac7 | 878 | } |
e429caa2 | 879 | else /* size < 0 */ |
dcfdbac7 | 880 | { |
e429caa2 KH |
881 | SIZE excess = (char *)first_heap->bloc_start |
882 | - ((char *)virtual_break_value + size); | |
883 | ||
884 | address = virtual_break_value; | |
885 | ||
886 | if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes) | |
887 | { | |
888 | excess -= extra_bytes; | |
889 | first_heap->bloc_start | |
47f13333 | 890 | = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess); |
e429caa2 | 891 | |
abe9ff32 | 892 | relocate_blocs (first_bloc, first_heap, first_heap->bloc_start); |
7516b7d5 | 893 | |
e429caa2 KH |
894 | for (b = first_bloc; b != NIL_BLOC; b = b->next) |
895 | { | |
896 | safe_bcopy (b->data, b->new_data, b->size); | |
897 | *b->variable = b->data = b->new_data; | |
898 | } | |
899 | } | |
900 | ||
901 | if ((char *)virtual_break_value + size < (char *)first_heap->start) | |
902 | { | |
903 | /* We found an additional space below the first heap */ | |
904 | first_heap->start = (POINTER) ((char *)virtual_break_value + size); | |
905 | } | |
dcfdbac7 JB |
906 | } |
907 | ||
e429caa2 | 908 | virtual_break_value = (POINTER) ((char *)address + size); |
47f13333 RS |
909 | break_value = (last_bloc |
910 | ? last_bloc->data + last_bloc->size | |
911 | : first_heap->bloc_start); | |
e429caa2 | 912 | if (size < 0) |
abe9ff32 | 913 | relinquish (); |
7516b7d5 | 914 | |
e429caa2 | 915 | return address; |
dcfdbac7 JB |
916 | } |
917 | ||
918 | /* Allocate a relocatable bloc of storage of size SIZE. A pointer to | |
919 | the data is returned in *PTR. PTR is thus the address of some variable | |
98b7fe02 JB |
920 | which will use the data area. |
921 | ||
49f82b3d RS |
922 | The allocation of 0 bytes is valid. |
923 | In case r_alloc_freeze is set, a best fit of unused blocs could be done | |
924 | before allocating a new area. Not yet done. | |
925 | ||
98b7fe02 JB |
926 | If we can't allocate the necessary memory, set *PTR to zero, and |
927 | return zero. */ | |
dcfdbac7 JB |
928 | |
929 | POINTER | |
930 | r_alloc (ptr, size) | |
931 | POINTER *ptr; | |
932 | SIZE size; | |
933 | { | |
934 | register bloc_ptr new_bloc; | |
935 | ||
2c46d29f RS |
936 | if (! r_alloc_initialized) |
937 | r_alloc_init (); | |
938 | ||
abe9ff32 | 939 | new_bloc = get_bloc (MEM_ROUNDUP (size)); |
98b7fe02 JB |
940 | if (new_bloc) |
941 | { | |
942 | new_bloc->variable = ptr; | |
943 | *ptr = new_bloc->data; | |
944 | } | |
945 | else | |
946 | *ptr = 0; | |
dcfdbac7 JB |
947 | |
948 | return *ptr; | |
949 | } | |
950 | ||
2c46d29f RS |
951 | /* Free a bloc of relocatable storage whose data is pointed to by PTR. |
952 | Store 0 in *PTR to show there's no block allocated. */ | |
dcfdbac7 JB |
953 | |
954 | void | |
955 | r_alloc_free (ptr) | |
956 | register POINTER *ptr; | |
957 | { | |
958 | register bloc_ptr dead_bloc; | |
959 | ||
44d3dec0 RS |
960 | if (! r_alloc_initialized) |
961 | r_alloc_init (); | |
962 | ||
dcfdbac7 JB |
963 | dead_bloc = find_bloc (ptr); |
964 | if (dead_bloc == NIL_BLOC) | |
965 | abort (); | |
966 | ||
967 | free_bloc (dead_bloc); | |
2c46d29f | 968 | *ptr = 0; |
719b242f | 969 | |
d5179acc | 970 | #ifdef emacs |
719b242f | 971 | refill_memory_reserve (); |
d5179acc | 972 | #endif |
dcfdbac7 JB |
973 | } |
974 | ||
16a5c729 | 975 | /* Given a pointer at address PTR to relocatable data, resize it to SIZE. |
98b7fe02 JB |
976 | Do this by shifting all blocks above this one up in memory, unless |
977 | SIZE is less than or equal to the current bloc size, in which case | |
978 | do nothing. | |
dcfdbac7 | 979 | |
49f82b3d | 980 | In case r_alloc_freeze is set, a new bloc is allocated, and the |
8e6208c5 | 981 | memory copied to it. Not very efficient. We could traverse the |
49f82b3d RS |
982 | bloc_list for a best fit of free blocs first. |
983 | ||
98b7fe02 JB |
984 | Change *PTR to reflect the new bloc, and return this value. |
985 | ||
986 | If more memory cannot be allocated, then leave *PTR unchanged, and | |
987 | return zero. */ | |
dcfdbac7 JB |
988 | |
989 | POINTER | |
990 | r_re_alloc (ptr, size) | |
991 | POINTER *ptr; | |
992 | SIZE size; | |
993 | { | |
16a5c729 | 994 | register bloc_ptr bloc; |
dcfdbac7 | 995 | |
44d3dec0 RS |
996 | if (! r_alloc_initialized) |
997 | r_alloc_init (); | |
998 | ||
49f82b3d RS |
999 | if (!*ptr) |
1000 | return r_alloc (ptr, size); | |
1001 | if (!size) | |
1002 | { | |
1003 | r_alloc_free (ptr); | |
1004 | return r_alloc (ptr, 0); | |
1005 | } | |
1006 | ||
16a5c729 JB |
1007 | bloc = find_bloc (ptr); |
1008 | if (bloc == NIL_BLOC) | |
dcfdbac7 JB |
1009 | abort (); |
1010 | ||
49f82b3d RS |
1011 | if (size < bloc->size) |
1012 | { | |
1013 | /* Wouldn't it be useful to actually resize the bloc here? */ | |
1014 | /* I think so too, but not if it's too expensive... */ | |
1015 | if ((bloc->size - MEM_ROUNDUP (size) >= page_size) | |
1016 | && r_alloc_freeze_level == 0) | |
1017 | { | |
1018 | resize_bloc (bloc, MEM_ROUNDUP (size)); | |
1019 | /* Never mind if this fails, just do nothing... */ | |
1020 | /* It *should* be infallible! */ | |
1021 | } | |
1022 | } | |
1023 | else if (size > bloc->size) | |
1024 | { | |
1025 | if (r_alloc_freeze_level) | |
1026 | { | |
1027 | bloc_ptr new_bloc; | |
1028 | new_bloc = get_bloc (MEM_ROUNDUP (size)); | |
1029 | if (new_bloc) | |
1030 | { | |
1031 | new_bloc->variable = ptr; | |
1032 | *ptr = new_bloc->data; | |
1033 | bloc->variable = (POINTER *) NIL; | |
1034 | } | |
1035 | else | |
1036 | return NIL; | |
1037 | } | |
1038 | else | |
1039 | { | |
1040 | if (! resize_bloc (bloc, MEM_ROUNDUP (size))) | |
1041 | return NIL; | |
1042 | } | |
1043 | } | |
dcfdbac7 JB |
1044 | return *ptr; |
1045 | } | |
81bd58e8 KH |
1046 | |
1047 | /* Disable relocations, after making room for at least SIZE bytes | |
1048 | of non-relocatable heap if possible. The relocatable blocs are | |
1049 | guaranteed to hold still until thawed, even if this means that | |
1050 | malloc must return a null pointer. */ | |
abe9ff32 | 1051 | |
81bd58e8 KH |
1052 | void |
1053 | r_alloc_freeze (size) | |
1054 | long size; | |
1055 | { | |
44d3dec0 RS |
1056 | if (! r_alloc_initialized) |
1057 | r_alloc_init (); | |
1058 | ||
81bd58e8 KH |
1059 | /* If already frozen, we can't make any more room, so don't try. */ |
1060 | if (r_alloc_freeze_level > 0) | |
1061 | size = 0; | |
1062 | /* If we can't get the amount requested, half is better than nothing. */ | |
1063 | while (size > 0 && r_alloc_sbrk (size) == 0) | |
1064 | size /= 2; | |
1065 | ++r_alloc_freeze_level; | |
1066 | if (size > 0) | |
1067 | r_alloc_sbrk (-size); | |
1068 | } | |
1069 | ||
1070 | void | |
1071 | r_alloc_thaw () | |
1072 | { | |
49f82b3d RS |
1073 | |
1074 | if (! r_alloc_initialized) | |
1075 | r_alloc_init (); | |
1076 | ||
81bd58e8 KH |
1077 | if (--r_alloc_freeze_level < 0) |
1078 | abort (); | |
49f82b3d | 1079 | |
8e6208c5 | 1080 | /* This frees all unused blocs. It is not too inefficient, as the resize |
49f82b3d RS |
1081 | and bcopy is done only once. Afterwards, all unreferenced blocs are |
1082 | already shrunk to zero size. */ | |
1083 | if (!r_alloc_freeze_level) | |
1084 | { | |
1085 | bloc_ptr *b = &first_bloc; | |
1086 | while (*b) | |
1087 | if (!(*b)->variable) | |
1088 | free_bloc (*b); | |
1089 | else | |
1090 | b = &(*b)->next; | |
1091 | } | |
81bd58e8 | 1092 | } |
49f82b3d | 1093 | |
dcfdbac7 JB |
1094 | \f |
1095 | /* The hook `malloc' uses for the function which gets more space | |
1096 | from the system. */ | |
1097 | extern POINTER (*__morecore) (); | |
1098 | ||
abe9ff32 | 1099 | /* Initialize various things for memory allocation. */ |
dcfdbac7 | 1100 | |
2c46d29f RS |
1101 | static void |
1102 | r_alloc_init () | |
dcfdbac7 | 1103 | { |
2c46d29f | 1104 | if (r_alloc_initialized) |
dcfdbac7 JB |
1105 | return; |
1106 | ||
2c46d29f | 1107 | r_alloc_initialized = 1; |
bbc60227 | 1108 | real_morecore = __morecore; |
dcfdbac7 | 1109 | __morecore = r_alloc_sbrk; |
8c7f1e35 | 1110 | |
e429caa2 KH |
1111 | first_heap = last_heap = &heap_base; |
1112 | first_heap->next = first_heap->prev = NIL_HEAP; | |
1113 | first_heap->start = first_heap->bloc_start | |
1114 | = virtual_break_value = break_value = (*real_morecore) (0); | |
aef4d570 | 1115 | if (break_value == NIL) |
2c46d29f | 1116 | abort (); |
8c7f1e35 | 1117 | |
7516b7d5 RS |
1118 | page_size = PAGE; |
1119 | extra_bytes = ROUNDUP (50000); | |
1120 | ||
49081834 RS |
1121 | /* Give GNU malloc's morecore some hysteresis |
1122 | so that we move all the relocatable blocks much less often. */ | |
1123 | __malloc_extra_blocks = 64; | |
1124 | ||
e429caa2 | 1125 | first_heap->end = (POINTER) ROUNDUP (first_heap->start); |
0e93a7cf RS |
1126 | |
1127 | /* The extra call to real_morecore guarantees that the end of the | |
1128 | address space is a multiple of page_size, even if page_size is | |
1129 | not really the page size of the system running the binary in | |
1130 | which page_size is stored. This allows a binary to be built on a | |
1131 | system with one page size and run on a system with a smaller page | |
abe9ff32 | 1132 | size. */ |
e429caa2 | 1133 | (*real_morecore) (first_heap->end - first_heap->start); |
0e93a7cf | 1134 | |
2c46d29f RS |
1135 | /* Clear the rest of the last page; this memory is in our address space |
1136 | even though it is after the sbrk value. */ | |
0e93a7cf RS |
1137 | /* Doubly true, with the additional call that explicitly adds the |
1138 | rest of that page to the address space. */ | |
e429caa2 KH |
1139 | bzero (first_heap->start, first_heap->end - first_heap->start); |
1140 | virtual_break_value = break_value = first_heap->bloc_start = first_heap->end; | |
dcfdbac7 | 1141 | use_relocatable_buffers = 1; |
2c46d29f | 1142 | } |
e429caa2 KH |
1143 | #ifdef DEBUG |
1144 | #include <assert.h> | |
1145 | ||
44d3dec0 | 1146 | void |
e429caa2 KH |
1147 | r_alloc_check () |
1148 | { | |
6d16dd06 RS |
1149 | int found = 0; |
1150 | heap_ptr h, ph = 0; | |
1151 | bloc_ptr b, pb = 0; | |
1152 | ||
1153 | if (!r_alloc_initialized) | |
1154 | return; | |
1155 | ||
1156 | assert (first_heap); | |
1157 | assert (last_heap->end <= (POINTER) sbrk (0)); | |
1158 | assert ((POINTER) first_heap < first_heap->start); | |
1159 | assert (first_heap->start <= virtual_break_value); | |
1160 | assert (virtual_break_value <= first_heap->end); | |
1161 | ||
1162 | for (h = first_heap; h; h = h->next) | |
1163 | { | |
1164 | assert (h->prev == ph); | |
1165 | assert ((POINTER) ROUNDUP (h->end) == h->end); | |
1166 | assert ((POINTER) MEM_ROUNDUP (h->start) == h->start); | |
1167 | assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start); | |
1168 | assert (h->start <= h->bloc_start && h->bloc_start <= h->end); | |
1169 | ||
1170 | if (ph) | |
1171 | { | |
1172 | assert (ph->end < h->start); | |
1173 | assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start); | |
1174 | } | |
1175 | ||
1176 | if (h->bloc_start <= break_value && break_value <= h->end) | |
1177 | found = 1; | |
1178 | ||
1179 | ph = h; | |
1180 | } | |
1181 | ||
1182 | assert (found); | |
1183 | assert (last_heap == ph); | |
1184 | ||
1185 | for (b = first_bloc; b; b = b->next) | |
1186 | { | |
1187 | assert (b->prev == pb); | |
1188 | assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); | |
1189 | assert ((SIZE) MEM_ROUNDUP (b->size) == b->size); | |
1190 | ||
1191 | ph = 0; | |
1192 | for (h = first_heap; h; h = h->next) | |
1193 | { | |
1194 | if (h->bloc_start <= b->data && b->data + b->size <= h->end) | |
1195 | break; | |
1196 | ph = h; | |
1197 | } | |
1198 | ||
1199 | assert (h); | |
1200 | ||
1201 | if (pb && pb->data + pb->size != b->data) | |
1202 | { | |
1203 | assert (ph && b->data == h->bloc_start); | |
1204 | while (ph) | |
1205 | { | |
1206 | if (ph->bloc_start <= pb->data | |
1207 | && pb->data + pb->size <= ph->end) | |
1208 | { | |
1209 | assert (pb->data + pb->size + b->size > ph->end); | |
1210 | break; | |
1211 | } | |
1212 | else | |
1213 | { | |
1214 | assert (ph->bloc_start + b->size > ph->end); | |
1215 | } | |
1216 | ph = ph->prev; | |
1217 | } | |
1218 | } | |
1219 | pb = b; | |
1220 | } | |
1221 | ||
1222 | assert (last_bloc == pb); | |
1223 | ||
1224 | if (last_bloc) | |
1225 | assert (last_bloc->data + last_bloc->size == break_value); | |
1226 | else | |
1227 | assert (first_heap->bloc_start == break_value); | |
e429caa2 KH |
1228 | } |
1229 | #endif /* DEBUG */ |