Commit | Line | Data |
---|---|---|
dcfdbac7 | 1 | /* Block-relocating memory allocator. |
187996a8 | 2 | Copyright (C) 1993, 1995 Free Software Foundation, Inc. |
dcfdbac7 JB |
3 | |
4 | This file is part of GNU Emacs. | |
5 | ||
6 | GNU Emacs is free software; you can redistribute it and/or modify | |
7 | it under the terms of the GNU General Public License as published by | |
187996a8 | 8 | the Free Software Foundation; either version 2, or (at your option) |
dcfdbac7 JB |
9 | any later version. |
10 | ||
11 | GNU Emacs is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | GNU General Public License for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | along with GNU Emacs; see the file COPYING. If not, write to | |
3b7ad313 EN |
18 | the Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
19 | Boston, MA 02111-1307, USA. */ | |
dcfdbac7 JB |
20 | |
21 | /* NOTES: | |
22 | ||
eb8c3be9 | 23 | Only relocate the blocs necessary for SIZE in r_alloc_sbrk, |
dcfdbac7 | 24 | rather than all of them. This means allowing for a possible |
abe9ff32 | 25 | hole between the first bloc and the end of malloc storage. */ |
dcfdbac7 | 26 | |
2c46d29f | 27 | #ifdef emacs |
aef4d570 | 28 | |
18160b98 | 29 | #include <config.h> |
956ace37 | 30 | #include "lisp.h" /* Needed for VALBITS. */ |
642a1733 DL |
31 | #ifdef HAVE_UNISTD_H |
32 | #include <unistd.h> | |
33 | #endif | |
aef4d570 RM |
34 | #undef NULL |
35 | ||
f275fd9a RS |
36 | /* The important properties of this type are that 1) it's a pointer, and |
37 | 2) arithmetic on it should work as if the size of the object pointed | |
38 | to has a size of 1. */ | |
a8c0e5ea | 39 | #if 0 /* Arithmetic on void* is a GCC extension. */ |
f275fd9a RS |
40 | #ifdef __STDC__ |
41 | typedef void *POINTER; | |
42 | #else | |
1df181b6 RM |
43 | |
44 | #ifdef HAVE_CONFIG_H | |
45 | #include "config.h" | |
46 | #endif | |
47 | ||
f275fd9a | 48 | typedef char *POINTER; |
1df181b6 | 49 | |
f275fd9a | 50 | #endif |
a8c0e5ea RS |
51 | #endif /* 0 */ |
52 | ||
53 | /* Unconditionally use char * for this. */ | |
54 | typedef char *POINTER; | |
f275fd9a RS |
55 | |
56 | typedef unsigned long SIZE; | |
57 | ||
2c46d29f RS |
58 | /* Declared in dispnew.c, this version doesn't screw up if regions |
59 | overlap. */ | |
60 | extern void safe_bcopy (); | |
2c46d29f | 61 | |
b0119c68 RS |
62 | #ifdef DOUG_LEA_MALLOC |
63 | #define M_TOP_PAD -2 | |
64 | extern int mallopt (); | |
65 | #else | |
49081834 | 66 | extern int __malloc_extra_blocks; |
b0119c68 | 67 | #endif |
49081834 | 68 | |
d5179acc | 69 | #else /* not emacs */ |
aef4d570 | 70 | |
2c46d29f | 71 | #include <stddef.h> |
aef4d570 | 72 | |
2c46d29f RS |
73 | typedef size_t SIZE; |
74 | typedef void *POINTER; | |
aef4d570 | 75 | |
aef4d570 RM |
76 | #include <unistd.h> |
77 | #include <malloc.h> | |
78 | #include <string.h> | |
79 | ||
2c46d29f | 80 | #define safe_bcopy(x, y, z) memmove (y, x, z) |
d5179acc RS |
81 | #define bzero(x, len) memset (x, 0, len) |
82 | ||
83 | #endif /* not emacs */ | |
2c46d29f | 84 | |
d5179acc | 85 | #include "getpagesize.h" |
dcfdbac7 JB |
86 | |
87 | #define NIL ((POINTER) 0) | |
88 | ||
2c46d29f RS |
89 | /* A flag to indicate whether we have initialized ralloc yet. For |
90 | Emacs's sake, please do not make this local to malloc_init; on some | |
91 | machines, the dumping procedure makes all static variables | |
92 | read-only. On these machines, the word static is #defined to be | |
93 | the empty string, meaning that r_alloc_initialized becomes an | |
94 | automatic variable, and loses its value each time Emacs is started up. */ | |
95 | static int r_alloc_initialized = 0; | |
96 | ||
97 | static void r_alloc_init (); | |
dcfdbac7 | 98 | \f |
956ace37 JB |
99 | /* Declarations for working with the malloc, ralloc, and system breaks. */ |
100 | ||
abe9ff32 | 101 | /* Function to set the real break value. */ |
321ed47b | 102 | POINTER (*real_morecore) (); |
dcfdbac7 | 103 | |
abe9ff32 | 104 | /* The break value, as seen by malloc. */ |
dcfdbac7 JB |
105 | static POINTER virtual_break_value; |
106 | ||
abe9ff32 RS |
107 | /* The address of the end of the last data in use by ralloc, |
108 | including relocatable blocs as well as malloc data. */ | |
dcfdbac7 JB |
109 | static POINTER break_value; |
110 | ||
7516b7d5 RS |
111 | /* This is the size of a page. We round memory requests to this boundary. */ |
112 | static int page_size; | |
113 | ||
ad3bb3d2 JB |
114 | /* Whenever we get memory from the system, get this many extra bytes. This |
115 | must be a multiple of page_size. */ | |
7516b7d5 RS |
116 | static int extra_bytes; |
117 | ||
dcfdbac7 | 118 | /* Macros for rounding. Note that rounding to any value is possible |
abe9ff32 | 119 | by changing the definition of PAGE. */ |
dcfdbac7 | 120 | #define PAGE (getpagesize ()) |
f7a009a5 RM |
121 | #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0) |
122 | #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ | |
123 | & ~(page_size - 1)) | |
7516b7d5 | 124 | #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) |
e429caa2 KH |
125 | |
126 | #define MEM_ALIGN sizeof(double) | |
127 | #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \ | |
128 | & ~(MEM_ALIGN - 1)) | |
129 | \f | |
abe9ff32 RS |
130 | /* Data structures of heaps and blocs. */ |
131 | ||
132 | /* The relocatable objects, or blocs, and the malloc data | |
133 | both reside within one or more heaps. | |
134 | Each heap contains malloc data, running from `start' to `bloc_start', | |
135 | and relocatable objects, running from `bloc_start' to `free'. | |
136 | ||
137 | Relocatable objects may relocate within the same heap | |
138 | or may move into another heap; the heaps themselves may grow | |
139 | but they never move. | |
140 | ||
141 | We try to make just one heap and make it larger as necessary. | |
8e6208c5 | 142 | But sometimes we can't do that, because we can't get contiguous |
abe9ff32 RS |
143 | space to add onto the heap. When that happens, we start a new heap. */ |
144 | ||
e429caa2 KH |
145 | typedef struct heap |
146 | { | |
147 | struct heap *next; | |
148 | struct heap *prev; | |
abe9ff32 | 149 | /* Start of memory range of this heap. */ |
e429caa2 | 150 | POINTER start; |
abe9ff32 | 151 | /* End of memory range of this heap. */ |
e429caa2 | 152 | POINTER end; |
abe9ff32 RS |
153 | /* Start of relocatable data in this heap. */ |
154 | POINTER bloc_start; | |
155 | /* Start of unused space in this heap. */ | |
156 | POINTER free; | |
47f13333 RS |
157 | /* First bloc in this heap. */ |
158 | struct bp *first_bloc; | |
159 | /* Last bloc in this heap. */ | |
160 | struct bp *last_bloc; | |
e429caa2 KH |
161 | } *heap_ptr; |
162 | ||
163 | #define NIL_HEAP ((heap_ptr) 0) | |
164 | #define HEAP_PTR_SIZE (sizeof (struct heap)) | |
165 | ||
abe9ff32 RS |
166 | /* This is the first heap object. |
167 | If we need additional heap objects, each one resides at the beginning of | |
168 | the space it covers. */ | |
169 | static struct heap heap_base; | |
170 | ||
171 | /* Head and tail of the list of heaps. */ | |
e429caa2 KH |
172 | static heap_ptr first_heap, last_heap; |
173 | ||
174 | /* These structures are allocated in the malloc arena. | |
175 | The linked list is kept in order of increasing '.data' members. | |
176 | The data blocks abut each other; if b->next is non-nil, then | |
49f82b3d RS |
177 | b->data + b->size == b->next->data. |
178 | ||
179 | An element with variable==NIL denotes a freed block, which has not yet | |
180 | been collected. They may only appear while r_alloc_freeze > 0, and will be | |
181 | freed when the arena is thawed. Currently, these blocs are not reusable, | |
8e6208c5 | 182 | while the arena is frozen. Very inefficient. */ |
49f82b3d | 183 | |
e429caa2 KH |
184 | typedef struct bp |
185 | { | |
186 | struct bp *next; | |
187 | struct bp *prev; | |
188 | POINTER *variable; | |
189 | POINTER data; | |
190 | SIZE size; | |
8e6208c5 | 191 | POINTER new_data; /* temporarily used for relocation */ |
49f82b3d | 192 | struct heap *heap; /* Heap this bloc is in. */ |
e429caa2 KH |
193 | } *bloc_ptr; |
194 | ||
195 | #define NIL_BLOC ((bloc_ptr) 0) | |
196 | #define BLOC_PTR_SIZE (sizeof (struct bp)) | |
197 | ||
abe9ff32 | 198 | /* Head and tail of the list of relocatable blocs. */ |
e429caa2 KH |
199 | static bloc_ptr first_bloc, last_bloc; |
200 | ||
49f82b3d RS |
201 | static int use_relocatable_buffers; |
202 | ||
203 | /* If >0, no relocation whatsoever takes place. */ | |
204 | static int r_alloc_freeze_level; | |
205 | ||
dcfdbac7 | 206 | \f |
956ace37 JB |
207 | /* Functions to get and return memory from the system. */ |
208 | ||
abe9ff32 RS |
209 | /* Find the heap that ADDRESS falls within. */ |
210 | ||
211 | static heap_ptr | |
212 | find_heap (address) | |
213 | POINTER address; | |
214 | { | |
215 | heap_ptr heap; | |
216 | ||
217 | for (heap = last_heap; heap; heap = heap->prev) | |
218 | { | |
219 | if (heap->start <= address && address <= heap->end) | |
220 | return heap; | |
221 | } | |
222 | ||
223 | return NIL_HEAP; | |
224 | } | |
225 | ||
226 | /* Find SIZE bytes of space in a heap. | |
227 | Try to get them at ADDRESS (which must fall within some heap's range) | |
228 | if we can get that many within one heap. | |
229 | ||
e429caa2 | 230 | If enough space is not presently available in our reserve, this means |
8e6208c5 KH |
231 | getting more page-aligned space from the system. If the returned space |
232 | is not contiguous to the last heap, allocate a new heap, and append it | |
abe9ff32 RS |
233 | |
234 | obtain does not try to keep track of whether space is in use | |
235 | or not in use. It just returns the address of SIZE bytes that | |
236 | fall within a single heap. If you call obtain twice in a row | |
237 | with the same arguments, you typically get the same value. | |
238 | to the heap list. It's the caller's responsibility to keep | |
239 | track of what space is in use. | |
dcfdbac7 | 240 | |
e429caa2 KH |
241 | Return the address of the space if all went well, or zero if we couldn't |
242 | allocate the memory. */ | |
abe9ff32 | 243 | |
e429caa2 KH |
244 | static POINTER |
245 | obtain (address, size) | |
246 | POINTER address; | |
247 | SIZE size; | |
dcfdbac7 | 248 | { |
e429caa2 KH |
249 | heap_ptr heap; |
250 | SIZE already_available; | |
dcfdbac7 | 251 | |
abe9ff32 | 252 | /* Find the heap that ADDRESS falls within. */ |
e429caa2 | 253 | for (heap = last_heap; heap; heap = heap->prev) |
dcfdbac7 | 254 | { |
e429caa2 KH |
255 | if (heap->start <= address && address <= heap->end) |
256 | break; | |
257 | } | |
dcfdbac7 | 258 | |
e429caa2 | 259 | if (! heap) |
abe9ff32 | 260 | abort (); |
dcfdbac7 | 261 | |
abe9ff32 RS |
262 | /* If we can't fit SIZE bytes in that heap, |
263 | try successive later heaps. */ | |
e429caa2 KH |
264 | while (heap && address + size > heap->end) |
265 | { | |
266 | heap = heap->next; | |
267 | if (heap == NIL_HEAP) | |
268 | break; | |
269 | address = heap->bloc_start; | |
dcfdbac7 JB |
270 | } |
271 | ||
abe9ff32 RS |
272 | /* If we can't fit them within any existing heap, |
273 | get more space. */ | |
e429caa2 KH |
274 | if (heap == NIL_HEAP) |
275 | { | |
276 | POINTER new = (*real_morecore)(0); | |
277 | SIZE get; | |
98b7fe02 | 278 | |
e429caa2 | 279 | already_available = (char *)last_heap->end - (char *)address; |
dcfdbac7 | 280 | |
e429caa2 KH |
281 | if (new != last_heap->end) |
282 | { | |
abe9ff32 RS |
283 | /* Someone else called sbrk. Make a new heap. */ |
284 | ||
285 | heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new); | |
286 | POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1)); | |
e429caa2 KH |
287 | |
288 | if ((*real_morecore) (bloc_start - new) != new) | |
289 | return 0; | |
290 | ||
291 | new_heap->start = new; | |
292 | new_heap->end = bloc_start; | |
293 | new_heap->bloc_start = bloc_start; | |
abe9ff32 | 294 | new_heap->free = bloc_start; |
e429caa2 KH |
295 | new_heap->next = NIL_HEAP; |
296 | new_heap->prev = last_heap; | |
47f13333 RS |
297 | new_heap->first_bloc = NIL_BLOC; |
298 | new_heap->last_bloc = NIL_BLOC; | |
e429caa2 KH |
299 | last_heap->next = new_heap; |
300 | last_heap = new_heap; | |
301 | ||
302 | address = bloc_start; | |
303 | already_available = 0; | |
304 | } | |
dcfdbac7 | 305 | |
abe9ff32 RS |
306 | /* Add space to the last heap (which we may have just created). |
307 | Get some extra, so we can come here less often. */ | |
308 | ||
e429caa2 | 309 | get = size + extra_bytes - already_available; |
abe9ff32 | 310 | get = (char *) ROUNDUP ((char *)last_heap->end + get) |
e429caa2 | 311 | - (char *) last_heap->end; |
dcfdbac7 | 312 | |
e429caa2 KH |
313 | if ((*real_morecore) (get) != last_heap->end) |
314 | return 0; | |
315 | ||
316 | last_heap->end += get; | |
317 | } | |
318 | ||
319 | return address; | |
320 | } | |
dcfdbac7 | 321 | |
abe9ff32 RS |
322 | /* Return unused heap space to the system |
323 | if there is a lot of unused space now. | |
324 | This can make the last heap smaller; | |
325 | it can also eliminate the last heap entirely. */ | |
326 | ||
dcfdbac7 | 327 | static void |
e429caa2 | 328 | relinquish () |
dcfdbac7 | 329 | { |
e429caa2 KH |
330 | register heap_ptr h; |
331 | int excess = 0; | |
332 | ||
abe9ff32 RS |
333 | /* Add the amount of space beyond break_value |
334 | in all heaps which have extend beyond break_value at all. */ | |
335 | ||
e429caa2 KH |
336 | for (h = last_heap; h && break_value < h->end; h = h->prev) |
337 | { | |
338 | excess += (char *) h->end - (char *) ((break_value < h->bloc_start) | |
339 | ? h->bloc_start : break_value); | |
340 | } | |
341 | ||
342 | if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end) | |
dcfdbac7 | 343 | { |
7516b7d5 RS |
344 | /* Keep extra_bytes worth of empty space. |
345 | And don't free anything unless we can free at least extra_bytes. */ | |
e429caa2 | 346 | excess -= extra_bytes; |
dcfdbac7 | 347 | |
e429caa2 KH |
348 | if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess) |
349 | { | |
47f13333 RS |
350 | /* This heap should have no blocs in it. */ |
351 | if (last_heap->first_bloc != NIL_BLOC | |
352 | || last_heap->last_bloc != NIL_BLOC) | |
353 | abort (); | |
354 | ||
abe9ff32 | 355 | /* Return the last heap, with its header, to the system. */ |
e429caa2 KH |
356 | excess = (char *)last_heap->end - (char *)last_heap->start; |
357 | last_heap = last_heap->prev; | |
358 | last_heap->next = NIL_HEAP; | |
359 | } | |
360 | else | |
361 | { | |
362 | excess = (char *) last_heap->end | |
abe9ff32 | 363 | - (char *) ROUNDUP ((char *)last_heap->end - excess); |
e429caa2 KH |
364 | last_heap->end -= excess; |
365 | } | |
dcfdbac7 | 366 | |
e429caa2 | 367 | if ((*real_morecore) (- excess) == 0) |
21532667 KH |
368 | { |
369 | /* If the system didn't want that much memory back, adjust | |
370 | the end of the last heap to reflect that. This can occur | |
371 | if break_value is still within the original data segment. */ | |
372 | last_heap->end += excess; | |
373 | /* Make sure that the result of the adjustment is accurate. | |
374 | It should be, for the else clause above; the other case, | |
375 | which returns the entire last heap to the system, seems | |
376 | unlikely to trigger this mode of failure. */ | |
377 | if (last_heap->end != (*real_morecore) (0)) | |
378 | abort (); | |
379 | } | |
e429caa2 | 380 | } |
dcfdbac7 | 381 | } |
719b242f RS |
382 | |
383 | /* Return the total size in use by relocating allocator, | |
384 | above where malloc gets space. */ | |
385 | ||
386 | long | |
387 | r_alloc_size_in_use () | |
388 | { | |
389 | return break_value - virtual_break_value; | |
390 | } | |
dcfdbac7 | 391 | \f |
956ace37 JB |
392 | /* The meat - allocating, freeing, and relocating blocs. */ |
393 | ||
956ace37 | 394 | /* Find the bloc referenced by the address in PTR. Returns a pointer |
abe9ff32 | 395 | to that block. */ |
dcfdbac7 JB |
396 | |
397 | static bloc_ptr | |
398 | find_bloc (ptr) | |
399 | POINTER *ptr; | |
400 | { | |
401 | register bloc_ptr p = first_bloc; | |
402 | ||
403 | while (p != NIL_BLOC) | |
404 | { | |
405 | if (p->variable == ptr && p->data == *ptr) | |
406 | return p; | |
407 | ||
408 | p = p->next; | |
409 | } | |
410 | ||
411 | return p; | |
412 | } | |
413 | ||
414 | /* Allocate a bloc of SIZE bytes and append it to the chain of blocs. | |
98b7fe02 JB |
415 | Returns a pointer to the new bloc, or zero if we couldn't allocate |
416 | memory for the new block. */ | |
dcfdbac7 JB |
417 | |
418 | static bloc_ptr | |
419 | get_bloc (size) | |
420 | SIZE size; | |
421 | { | |
98b7fe02 | 422 | register bloc_ptr new_bloc; |
abe9ff32 | 423 | register heap_ptr heap; |
98b7fe02 JB |
424 | |
425 | if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) | |
e429caa2 | 426 | || ! (new_bloc->data = obtain (break_value, size))) |
98b7fe02 JB |
427 | { |
428 | if (new_bloc) | |
429 | free (new_bloc); | |
430 | ||
431 | return 0; | |
432 | } | |
dcfdbac7 | 433 | |
e429caa2 KH |
434 | break_value = new_bloc->data + size; |
435 | ||
dcfdbac7 JB |
436 | new_bloc->size = size; |
437 | new_bloc->next = NIL_BLOC; | |
8c7f1e35 | 438 | new_bloc->variable = (POINTER *) NIL; |
e429caa2 | 439 | new_bloc->new_data = 0; |
dcfdbac7 | 440 | |
abe9ff32 RS |
441 | /* Record in the heap that this space is in use. */ |
442 | heap = find_heap (new_bloc->data); | |
443 | heap->free = break_value; | |
444 | ||
47f13333 RS |
445 | /* Maintain the correspondence between heaps and blocs. */ |
446 | new_bloc->heap = heap; | |
447 | heap->last_bloc = new_bloc; | |
448 | if (heap->first_bloc == NIL_BLOC) | |
449 | heap->first_bloc = new_bloc; | |
450 | ||
abe9ff32 | 451 | /* Put this bloc on the doubly-linked list of blocs. */ |
dcfdbac7 JB |
452 | if (first_bloc) |
453 | { | |
454 | new_bloc->prev = last_bloc; | |
455 | last_bloc->next = new_bloc; | |
456 | last_bloc = new_bloc; | |
457 | } | |
458 | else | |
459 | { | |
460 | first_bloc = last_bloc = new_bloc; | |
461 | new_bloc->prev = NIL_BLOC; | |
462 | } | |
463 | ||
464 | return new_bloc; | |
465 | } | |
47f13333 | 466 | \f |
abe9ff32 RS |
467 | /* Calculate new locations of blocs in the list beginning with BLOC, |
468 | relocating it to start at ADDRESS, in heap HEAP. If enough space is | |
469 | not presently available in our reserve, call obtain for | |
e429caa2 KH |
470 | more space. |
471 | ||
abe9ff32 RS |
472 | Store the new location of each bloc in its new_data field. |
473 | Do not touch the contents of blocs or break_value. */ | |
dcfdbac7 | 474 | |
e429caa2 KH |
475 | static int |
476 | relocate_blocs (bloc, heap, address) | |
477 | bloc_ptr bloc; | |
478 | heap_ptr heap; | |
479 | POINTER address; | |
480 | { | |
481 | register bloc_ptr b = bloc; | |
ad3bb3d2 | 482 | |
49f82b3d RS |
483 | /* No need to ever call this if arena is frozen, bug somewhere! */ |
484 | if (r_alloc_freeze_level) | |
485 | abort(); | |
486 | ||
e429caa2 KH |
487 | while (b) |
488 | { | |
abe9ff32 RS |
489 | /* If bloc B won't fit within HEAP, |
490 | move to the next heap and try again. */ | |
e429caa2 KH |
491 | while (heap && address + b->size > heap->end) |
492 | { | |
493 | heap = heap->next; | |
494 | if (heap == NIL_HEAP) | |
495 | break; | |
496 | address = heap->bloc_start; | |
497 | } | |
dcfdbac7 | 498 | |
abe9ff32 RS |
499 | /* If BLOC won't fit in any heap, |
500 | get enough new space to hold BLOC and all following blocs. */ | |
e429caa2 KH |
501 | if (heap == NIL_HEAP) |
502 | { | |
503 | register bloc_ptr tb = b; | |
504 | register SIZE s = 0; | |
505 | ||
abe9ff32 | 506 | /* Add up the size of all the following blocs. */ |
e429caa2 KH |
507 | while (tb != NIL_BLOC) |
508 | { | |
49f82b3d RS |
509 | if (tb->variable) |
510 | s += tb->size; | |
511 | ||
e429caa2 KH |
512 | tb = tb->next; |
513 | } | |
514 | ||
abe9ff32 RS |
515 | /* Get that space. */ |
516 | address = obtain (address, s); | |
517 | if (address == 0) | |
e429caa2 KH |
518 | return 0; |
519 | ||
520 | heap = last_heap; | |
521 | } | |
522 | ||
abe9ff32 RS |
523 | /* Record the new address of this bloc |
524 | and update where the next bloc can start. */ | |
e429caa2 | 525 | b->new_data = address; |
49f82b3d RS |
526 | if (b->variable) |
527 | address += b->size; | |
e429caa2 KH |
528 | b = b->next; |
529 | } | |
530 | ||
531 | return 1; | |
532 | } | |
533 | ||
47f13333 RS |
534 | /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list. |
535 | This is necessary if we put the memory of space of BLOC | |
536 | before that of BEFORE. */ | |
537 | ||
538 | static void | |
539 | reorder_bloc (bloc, before) | |
540 | bloc_ptr bloc, before; | |
541 | { | |
542 | bloc_ptr prev, next; | |
543 | ||
544 | /* Splice BLOC out from where it is. */ | |
545 | prev = bloc->prev; | |
546 | next = bloc->next; | |
547 | ||
548 | if (prev) | |
549 | prev->next = next; | |
550 | if (next) | |
551 | next->prev = prev; | |
552 | ||
553 | /* Splice it in before BEFORE. */ | |
554 | prev = before->prev; | |
abe9ff32 | 555 | |
47f13333 RS |
556 | if (prev) |
557 | prev->next = bloc; | |
558 | bloc->prev = prev; | |
559 | ||
560 | before->prev = bloc; | |
561 | bloc->next = before; | |
562 | } | |
563 | \f | |
564 | /* Update the records of which heaps contain which blocs, starting | |
565 | with heap HEAP and bloc BLOC. */ | |
566 | ||
567 | static void | |
568 | update_heap_bloc_correspondence (bloc, heap) | |
abe9ff32 RS |
569 | bloc_ptr bloc; |
570 | heap_ptr heap; | |
571 | { | |
572 | register bloc_ptr b; | |
573 | ||
47f13333 RS |
574 | /* Initialize HEAP's status to reflect blocs before BLOC. */ |
575 | if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap) | |
576 | { | |
577 | /* The previous bloc is in HEAP. */ | |
578 | heap->last_bloc = bloc->prev; | |
579 | heap->free = bloc->prev->data + bloc->prev->size; | |
580 | } | |
581 | else | |
582 | { | |
583 | /* HEAP contains no blocs before BLOC. */ | |
584 | heap->first_bloc = NIL_BLOC; | |
585 | heap->last_bloc = NIL_BLOC; | |
586 | heap->free = heap->bloc_start; | |
587 | } | |
588 | ||
abe9ff32 RS |
589 | /* Advance through blocs one by one. */ |
590 | for (b = bloc; b != NIL_BLOC; b = b->next) | |
591 | { | |
47f13333 RS |
592 | /* Advance through heaps, marking them empty, |
593 | till we get to the one that B is in. */ | |
abe9ff32 RS |
594 | while (heap) |
595 | { | |
596 | if (heap->bloc_start <= b->data && b->data <= heap->end) | |
597 | break; | |
598 | heap = heap->next; | |
47f13333 RS |
599 | /* We know HEAP is not null now, |
600 | because there has to be space for bloc B. */ | |
601 | heap->first_bloc = NIL_BLOC; | |
602 | heap->last_bloc = NIL_BLOC; | |
abe9ff32 RS |
603 | heap->free = heap->bloc_start; |
604 | } | |
47f13333 RS |
605 | |
606 | /* Update HEAP's status for bloc B. */ | |
abe9ff32 | 607 | heap->free = b->data + b->size; |
47f13333 RS |
608 | heap->last_bloc = b; |
609 | if (heap->first_bloc == NIL_BLOC) | |
610 | heap->first_bloc = b; | |
611 | ||
612 | /* Record that B is in HEAP. */ | |
613 | b->heap = heap; | |
abe9ff32 RS |
614 | } |
615 | ||
616 | /* If there are any remaining heaps and no blocs left, | |
47f13333 | 617 | mark those heaps as empty. */ |
abe9ff32 RS |
618 | heap = heap->next; |
619 | while (heap) | |
620 | { | |
47f13333 RS |
621 | heap->first_bloc = NIL_BLOC; |
622 | heap->last_bloc = NIL_BLOC; | |
abe9ff32 RS |
623 | heap->free = heap->bloc_start; |
624 | heap = heap->next; | |
625 | } | |
626 | } | |
47f13333 | 627 | \f |
abe9ff32 RS |
628 | /* Resize BLOC to SIZE bytes. This relocates the blocs |
629 | that come after BLOC in memory. */ | |
630 | ||
e429caa2 KH |
631 | static int |
632 | resize_bloc (bloc, size) | |
633 | bloc_ptr bloc; | |
634 | SIZE size; | |
dcfdbac7 | 635 | { |
e429caa2 KH |
636 | register bloc_ptr b; |
637 | heap_ptr heap; | |
638 | POINTER address; | |
639 | SIZE old_size; | |
640 | ||
49f82b3d RS |
641 | /* No need to ever call this if arena is frozen, bug somewhere! */ |
642 | if (r_alloc_freeze_level) | |
643 | abort(); | |
644 | ||
e429caa2 KH |
645 | if (bloc == NIL_BLOC || size == bloc->size) |
646 | return 1; | |
647 | ||
648 | for (heap = first_heap; heap != NIL_HEAP; heap = heap->next) | |
649 | { | |
650 | if (heap->bloc_start <= bloc->data && bloc->data <= heap->end) | |
651 | break; | |
652 | } | |
653 | ||
654 | if (heap == NIL_HEAP) | |
abe9ff32 | 655 | abort (); |
e429caa2 KH |
656 | |
657 | old_size = bloc->size; | |
658 | bloc->size = size; | |
659 | ||
abe9ff32 RS |
660 | /* Note that bloc could be moved into the previous heap. */ |
661 | address = (bloc->prev ? bloc->prev->data + bloc->prev->size | |
662 | : first_heap->bloc_start); | |
e429caa2 KH |
663 | while (heap) |
664 | { | |
665 | if (heap->bloc_start <= address && address <= heap->end) | |
666 | break; | |
667 | heap = heap->prev; | |
668 | } | |
669 | ||
670 | if (! relocate_blocs (bloc, heap, address)) | |
671 | { | |
672 | bloc->size = old_size; | |
673 | return 0; | |
674 | } | |
675 | ||
676 | if (size > old_size) | |
677 | { | |
678 | for (b = last_bloc; b != bloc; b = b->prev) | |
679 | { | |
49f82b3d RS |
680 | if (!b->variable) |
681 | { | |
682 | b->size = 0; | |
683 | b->data = b->new_data; | |
684 | } | |
685 | else | |
686 | { | |
687 | safe_bcopy (b->data, b->new_data, b->size); | |
688 | *b->variable = b->data = b->new_data; | |
689 | } | |
690 | } | |
691 | if (!bloc->variable) | |
692 | { | |
693 | bloc->size = 0; | |
694 | bloc->data = bloc->new_data; | |
695 | } | |
696 | else | |
697 | { | |
698 | safe_bcopy (bloc->data, bloc->new_data, old_size); | |
699 | bzero (bloc->new_data + old_size, size - old_size); | |
700 | *bloc->variable = bloc->data = bloc->new_data; | |
e429caa2 | 701 | } |
e429caa2 KH |
702 | } |
703 | else | |
dcfdbac7 | 704 | { |
ad3bb3d2 JB |
705 | for (b = bloc; b != NIL_BLOC; b = b->next) |
706 | { | |
49f82b3d RS |
707 | if (!b->variable) |
708 | { | |
709 | b->size = 0; | |
710 | b->data = b->new_data; | |
711 | } | |
712 | else | |
713 | { | |
714 | safe_bcopy (b->data, b->new_data, b->size); | |
715 | *b->variable = b->data = b->new_data; | |
716 | } | |
ad3bb3d2 | 717 | } |
ad3bb3d2 | 718 | } |
dcfdbac7 | 719 | |
47f13333 | 720 | update_heap_bloc_correspondence (bloc, heap); |
abe9ff32 RS |
721 | |
722 | break_value = (last_bloc ? last_bloc->data + last_bloc->size | |
723 | : first_heap->bloc_start); | |
e429caa2 KH |
724 | return 1; |
725 | } | |
47f13333 | 726 | \f |
abe9ff32 RS |
727 | /* Free BLOC from the chain of blocs, relocating any blocs above it. |
728 | This may return space to the system. */ | |
dcfdbac7 JB |
729 | |
730 | static void | |
731 | free_bloc (bloc) | |
732 | bloc_ptr bloc; | |
733 | { | |
47f13333 RS |
734 | heap_ptr heap = bloc->heap; |
735 | ||
49f82b3d RS |
736 | if (r_alloc_freeze_level) |
737 | { | |
738 | bloc->variable = (POINTER *) NIL; | |
739 | return; | |
740 | } | |
741 | ||
e429caa2 KH |
742 | resize_bloc (bloc, 0); |
743 | ||
dcfdbac7 JB |
744 | if (bloc == first_bloc && bloc == last_bloc) |
745 | { | |
746 | first_bloc = last_bloc = NIL_BLOC; | |
747 | } | |
748 | else if (bloc == last_bloc) | |
749 | { | |
750 | last_bloc = bloc->prev; | |
751 | last_bloc->next = NIL_BLOC; | |
752 | } | |
753 | else if (bloc == first_bloc) | |
754 | { | |
755 | first_bloc = bloc->next; | |
756 | first_bloc->prev = NIL_BLOC; | |
dcfdbac7 JB |
757 | } |
758 | else | |
759 | { | |
760 | bloc->next->prev = bloc->prev; | |
761 | bloc->prev->next = bloc->next; | |
dcfdbac7 JB |
762 | } |
763 | ||
47f13333 RS |
764 | /* Update the records of which blocs are in HEAP. */ |
765 | if (heap->first_bloc == bloc) | |
766 | { | |
d5179acc | 767 | if (bloc->next != 0 && bloc->next->heap == heap) |
47f13333 RS |
768 | heap->first_bloc = bloc->next; |
769 | else | |
770 | heap->first_bloc = heap->last_bloc = NIL_BLOC; | |
771 | } | |
772 | if (heap->last_bloc == bloc) | |
773 | { | |
d5179acc | 774 | if (bloc->prev != 0 && bloc->prev->heap == heap) |
47f13333 RS |
775 | heap->last_bloc = bloc->prev; |
776 | else | |
777 | heap->first_bloc = heap->last_bloc = NIL_BLOC; | |
778 | } | |
779 | ||
e429caa2 | 780 | relinquish (); |
dcfdbac7 JB |
781 | free (bloc); |
782 | } | |
783 | \f | |
956ace37 JB |
784 | /* Interface routines. */ |
785 | ||
98b7fe02 | 786 | /* Obtain SIZE bytes of storage from the free pool, or the system, as |
2c46d29f | 787 | necessary. If relocatable blocs are in use, this means relocating |
98b7fe02 JB |
788 | them. This function gets plugged into the GNU malloc's __morecore |
789 | hook. | |
790 | ||
7516b7d5 RS |
791 | We provide hysteresis, never relocating by less than extra_bytes. |
792 | ||
98b7fe02 JB |
793 | If we're out of memory, we should return zero, to imitate the other |
794 | __morecore hook values - in particular, __default_morecore in the | |
795 | GNU malloc package. */ | |
dcfdbac7 JB |
796 | |
797 | POINTER | |
798 | r_alloc_sbrk (size) | |
799 | long size; | |
800 | { | |
e429caa2 KH |
801 | register bloc_ptr b; |
802 | POINTER address; | |
dcfdbac7 | 803 | |
44d3dec0 RS |
804 | if (! r_alloc_initialized) |
805 | r_alloc_init (); | |
806 | ||
dcfdbac7 | 807 | if (! use_relocatable_buffers) |
bbc60227 | 808 | return (*real_morecore) (size); |
dcfdbac7 | 809 | |
e429caa2 KH |
810 | if (size == 0) |
811 | return virtual_break_value; | |
7516b7d5 | 812 | |
e429caa2 | 813 | if (size > 0) |
dcfdbac7 | 814 | { |
abe9ff32 RS |
815 | /* Allocate a page-aligned space. GNU malloc would reclaim an |
816 | extra space if we passed an unaligned one. But we could | |
8e6208c5 | 817 | not always find a space which is contiguous to the previous. */ |
e429caa2 KH |
818 | POINTER new_bloc_start; |
819 | heap_ptr h = first_heap; | |
abe9ff32 | 820 | SIZE get = ROUNDUP (size); |
7516b7d5 | 821 | |
abe9ff32 | 822 | address = (POINTER) ROUNDUP (virtual_break_value); |
e429caa2 | 823 | |
abe9ff32 RS |
824 | /* Search the list upward for a heap which is large enough. */ |
825 | while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get)) | |
e429caa2 KH |
826 | { |
827 | h = h->next; | |
828 | if (h == NIL_HEAP) | |
829 | break; | |
abe9ff32 | 830 | address = (POINTER) ROUNDUP (h->start); |
e429caa2 KH |
831 | } |
832 | ||
abe9ff32 | 833 | /* If not found, obtain more space. */ |
e429caa2 KH |
834 | if (h == NIL_HEAP) |
835 | { | |
836 | get += extra_bytes + page_size; | |
837 | ||
49f82b3d | 838 | if (! obtain (address, get)) |
e429caa2 | 839 | return 0; |
98b7fe02 | 840 | |
e429caa2 | 841 | if (first_heap == last_heap) |
abe9ff32 | 842 | address = (POINTER) ROUNDUP (virtual_break_value); |
e429caa2 | 843 | else |
abe9ff32 | 844 | address = (POINTER) ROUNDUP (last_heap->start); |
e429caa2 KH |
845 | h = last_heap; |
846 | } | |
847 | ||
abe9ff32 | 848 | new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get); |
e429caa2 KH |
849 | |
850 | if (first_heap->bloc_start < new_bloc_start) | |
851 | { | |
49f82b3d RS |
852 | /* This is no clean solution - no idea how to do it better. */ |
853 | if (r_alloc_freeze_level) | |
854 | return NIL; | |
855 | ||
856 | /* There is a bug here: if the above obtain call succeeded, but the | |
857 | relocate_blocs call below does not succeed, we need to free | |
858 | the memory that we got with obtain. */ | |
859 | ||
abe9ff32 | 860 | /* Move all blocs upward. */ |
49f82b3d | 861 | if (! relocate_blocs (first_bloc, h, new_bloc_start)) |
e429caa2 KH |
862 | return 0; |
863 | ||
864 | /* Note that (POINTER)(h+1) <= new_bloc_start since | |
865 | get >= page_size, so the following does not destroy the heap | |
abe9ff32 | 866 | header. */ |
e429caa2 KH |
867 | for (b = last_bloc; b != NIL_BLOC; b = b->prev) |
868 | { | |
869 | safe_bcopy (b->data, b->new_data, b->size); | |
870 | *b->variable = b->data = b->new_data; | |
871 | } | |
872 | ||
873 | h->bloc_start = new_bloc_start; | |
abe9ff32 | 874 | |
47f13333 | 875 | update_heap_bloc_correspondence (first_bloc, h); |
e429caa2 | 876 | } |
e429caa2 KH |
877 | if (h != first_heap) |
878 | { | |
879 | /* Give up managing heaps below the one the new | |
abe9ff32 | 880 | virtual_break_value points to. */ |
e429caa2 KH |
881 | first_heap->prev = NIL_HEAP; |
882 | first_heap->next = h->next; | |
883 | first_heap->start = h->start; | |
884 | first_heap->end = h->end; | |
abe9ff32 | 885 | first_heap->free = h->free; |
47f13333 RS |
886 | first_heap->first_bloc = h->first_bloc; |
887 | first_heap->last_bloc = h->last_bloc; | |
e429caa2 KH |
888 | first_heap->bloc_start = h->bloc_start; |
889 | ||
890 | if (first_heap->next) | |
891 | first_heap->next->prev = first_heap; | |
892 | else | |
893 | last_heap = first_heap; | |
894 | } | |
895 | ||
896 | bzero (address, size); | |
dcfdbac7 | 897 | } |
e429caa2 | 898 | else /* size < 0 */ |
dcfdbac7 | 899 | { |
e429caa2 KH |
900 | SIZE excess = (char *)first_heap->bloc_start |
901 | - ((char *)virtual_break_value + size); | |
902 | ||
903 | address = virtual_break_value; | |
904 | ||
905 | if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes) | |
906 | { | |
907 | excess -= extra_bytes; | |
908 | first_heap->bloc_start | |
47f13333 | 909 | = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess); |
e429caa2 | 910 | |
abe9ff32 | 911 | relocate_blocs (first_bloc, first_heap, first_heap->bloc_start); |
7516b7d5 | 912 | |
e429caa2 KH |
913 | for (b = first_bloc; b != NIL_BLOC; b = b->next) |
914 | { | |
915 | safe_bcopy (b->data, b->new_data, b->size); | |
916 | *b->variable = b->data = b->new_data; | |
917 | } | |
918 | } | |
919 | ||
920 | if ((char *)virtual_break_value + size < (char *)first_heap->start) | |
921 | { | |
922 | /* We found an additional space below the first heap */ | |
923 | first_heap->start = (POINTER) ((char *)virtual_break_value + size); | |
924 | } | |
dcfdbac7 JB |
925 | } |
926 | ||
e429caa2 | 927 | virtual_break_value = (POINTER) ((char *)address + size); |
47f13333 RS |
928 | break_value = (last_bloc |
929 | ? last_bloc->data + last_bloc->size | |
930 | : first_heap->bloc_start); | |
e429caa2 | 931 | if (size < 0) |
abe9ff32 | 932 | relinquish (); |
7516b7d5 | 933 | |
e429caa2 | 934 | return address; |
dcfdbac7 JB |
935 | } |
936 | ||
937 | /* Allocate a relocatable bloc of storage of size SIZE. A pointer to | |
938 | the data is returned in *PTR. PTR is thus the address of some variable | |
98b7fe02 JB |
939 | which will use the data area. |
940 | ||
49f82b3d RS |
941 | The allocation of 0 bytes is valid. |
942 | In case r_alloc_freeze is set, a best fit of unused blocs could be done | |
943 | before allocating a new area. Not yet done. | |
944 | ||
98b7fe02 JB |
945 | If we can't allocate the necessary memory, set *PTR to zero, and |
946 | return zero. */ | |
dcfdbac7 JB |
947 | |
948 | POINTER | |
949 | r_alloc (ptr, size) | |
950 | POINTER *ptr; | |
951 | SIZE size; | |
952 | { | |
953 | register bloc_ptr new_bloc; | |
954 | ||
2c46d29f RS |
955 | if (! r_alloc_initialized) |
956 | r_alloc_init (); | |
957 | ||
abe9ff32 | 958 | new_bloc = get_bloc (MEM_ROUNDUP (size)); |
98b7fe02 JB |
959 | if (new_bloc) |
960 | { | |
961 | new_bloc->variable = ptr; | |
962 | *ptr = new_bloc->data; | |
963 | } | |
964 | else | |
965 | *ptr = 0; | |
dcfdbac7 JB |
966 | |
967 | return *ptr; | |
968 | } | |
969 | ||
2c46d29f RS |
970 | /* Free a bloc of relocatable storage whose data is pointed to by PTR. |
971 | Store 0 in *PTR to show there's no block allocated. */ | |
dcfdbac7 JB |
972 | |
973 | void | |
974 | r_alloc_free (ptr) | |
975 | register POINTER *ptr; | |
976 | { | |
977 | register bloc_ptr dead_bloc; | |
978 | ||
44d3dec0 RS |
979 | if (! r_alloc_initialized) |
980 | r_alloc_init (); | |
981 | ||
dcfdbac7 JB |
982 | dead_bloc = find_bloc (ptr); |
983 | if (dead_bloc == NIL_BLOC) | |
984 | abort (); | |
985 | ||
986 | free_bloc (dead_bloc); | |
2c46d29f | 987 | *ptr = 0; |
719b242f | 988 | |
d5179acc | 989 | #ifdef emacs |
719b242f | 990 | refill_memory_reserve (); |
d5179acc | 991 | #endif |
dcfdbac7 JB |
992 | } |
993 | ||
16a5c729 | 994 | /* Given a pointer at address PTR to relocatable data, resize it to SIZE. |
98b7fe02 JB |
995 | Do this by shifting all blocks above this one up in memory, unless |
996 | SIZE is less than or equal to the current bloc size, in which case | |
997 | do nothing. | |
dcfdbac7 | 998 | |
49f82b3d | 999 | In case r_alloc_freeze is set, a new bloc is allocated, and the |
8e6208c5 | 1000 | memory copied to it. Not very efficient. We could traverse the |
49f82b3d RS |
1001 | bloc_list for a best fit of free blocs first. |
1002 | ||
98b7fe02 JB |
1003 | Change *PTR to reflect the new bloc, and return this value. |
1004 | ||
1005 | If more memory cannot be allocated, then leave *PTR unchanged, and | |
1006 | return zero. */ | |
dcfdbac7 JB |
1007 | |
1008 | POINTER | |
1009 | r_re_alloc (ptr, size) | |
1010 | POINTER *ptr; | |
1011 | SIZE size; | |
1012 | { | |
16a5c729 | 1013 | register bloc_ptr bloc; |
dcfdbac7 | 1014 | |
44d3dec0 RS |
1015 | if (! r_alloc_initialized) |
1016 | r_alloc_init (); | |
1017 | ||
49f82b3d RS |
1018 | if (!*ptr) |
1019 | return r_alloc (ptr, size); | |
1020 | if (!size) | |
1021 | { | |
1022 | r_alloc_free (ptr); | |
1023 | return r_alloc (ptr, 0); | |
1024 | } | |
1025 | ||
16a5c729 JB |
1026 | bloc = find_bloc (ptr); |
1027 | if (bloc == NIL_BLOC) | |
dcfdbac7 JB |
1028 | abort (); |
1029 | ||
49f82b3d RS |
1030 | if (size < bloc->size) |
1031 | { | |
1032 | /* Wouldn't it be useful to actually resize the bloc here? */ | |
1033 | /* I think so too, but not if it's too expensive... */ | |
1034 | if ((bloc->size - MEM_ROUNDUP (size) >= page_size) | |
1035 | && r_alloc_freeze_level == 0) | |
1036 | { | |
1037 | resize_bloc (bloc, MEM_ROUNDUP (size)); | |
1038 | /* Never mind if this fails, just do nothing... */ | |
1039 | /* It *should* be infallible! */ | |
1040 | } | |
1041 | } | |
1042 | else if (size > bloc->size) | |
1043 | { | |
1044 | if (r_alloc_freeze_level) | |
1045 | { | |
1046 | bloc_ptr new_bloc; | |
1047 | new_bloc = get_bloc (MEM_ROUNDUP (size)); | |
1048 | if (new_bloc) | |
1049 | { | |
1050 | new_bloc->variable = ptr; | |
1051 | *ptr = new_bloc->data; | |
1052 | bloc->variable = (POINTER *) NIL; | |
1053 | } | |
1054 | else | |
1055 | return NIL; | |
1056 | } | |
1057 | else | |
1058 | { | |
1059 | if (! resize_bloc (bloc, MEM_ROUNDUP (size))) | |
1060 | return NIL; | |
1061 | } | |
1062 | } | |
dcfdbac7 JB |
1063 | return *ptr; |
1064 | } | |
81bd58e8 KH |
1065 | |
1066 | /* Disable relocations, after making room for at least SIZE bytes | |
1067 | of non-relocatable heap if possible. The relocatable blocs are | |
1068 | guaranteed to hold still until thawed, even if this means that | |
1069 | malloc must return a null pointer. */ | |
abe9ff32 | 1070 | |
81bd58e8 KH |
1071 | void |
1072 | r_alloc_freeze (size) | |
1073 | long size; | |
1074 | { | |
44d3dec0 RS |
1075 | if (! r_alloc_initialized) |
1076 | r_alloc_init (); | |
1077 | ||
81bd58e8 KH |
1078 | /* If already frozen, we can't make any more room, so don't try. */ |
1079 | if (r_alloc_freeze_level > 0) | |
1080 | size = 0; | |
1081 | /* If we can't get the amount requested, half is better than nothing. */ | |
1082 | while (size > 0 && r_alloc_sbrk (size) == 0) | |
1083 | size /= 2; | |
1084 | ++r_alloc_freeze_level; | |
1085 | if (size > 0) | |
1086 | r_alloc_sbrk (-size); | |
1087 | } | |
1088 | ||
1089 | void | |
1090 | r_alloc_thaw () | |
1091 | { | |
49f82b3d RS |
1092 | |
1093 | if (! r_alloc_initialized) | |
1094 | r_alloc_init (); | |
1095 | ||
81bd58e8 KH |
1096 | if (--r_alloc_freeze_level < 0) |
1097 | abort (); | |
49f82b3d | 1098 | |
8e6208c5 | 1099 | /* This frees all unused blocs. It is not too inefficient, as the resize |
49f82b3d RS |
1100 | and bcopy is done only once. Afterwards, all unreferenced blocs are |
1101 | already shrunk to zero size. */ | |
1102 | if (!r_alloc_freeze_level) | |
1103 | { | |
1104 | bloc_ptr *b = &first_bloc; | |
1105 | while (*b) | |
1106 | if (!(*b)->variable) | |
1107 | free_bloc (*b); | |
1108 | else | |
1109 | b = &(*b)->next; | |
1110 | } | |
81bd58e8 | 1111 | } |
49f82b3d | 1112 | |
dcfdbac7 JB |
1113 | \f |
1114 | /* The hook `malloc' uses for the function which gets more space | |
1115 | from the system. */ | |
1116 | extern POINTER (*__morecore) (); | |
1117 | ||
abe9ff32 | 1118 | /* Initialize various things for memory allocation. */ |
dcfdbac7 | 1119 | |
2c46d29f RS |
1120 | static void |
1121 | r_alloc_init () | |
dcfdbac7 | 1122 | { |
2c46d29f | 1123 | if (r_alloc_initialized) |
dcfdbac7 JB |
1124 | return; |
1125 | ||
2c46d29f | 1126 | r_alloc_initialized = 1; |
bbc60227 | 1127 | real_morecore = __morecore; |
dcfdbac7 | 1128 | __morecore = r_alloc_sbrk; |
8c7f1e35 | 1129 | |
e429caa2 KH |
1130 | first_heap = last_heap = &heap_base; |
1131 | first_heap->next = first_heap->prev = NIL_HEAP; | |
1132 | first_heap->start = first_heap->bloc_start | |
1133 | = virtual_break_value = break_value = (*real_morecore) (0); | |
aef4d570 | 1134 | if (break_value == NIL) |
2c46d29f | 1135 | abort (); |
8c7f1e35 | 1136 | |
7516b7d5 RS |
1137 | page_size = PAGE; |
1138 | extra_bytes = ROUNDUP (50000); | |
1139 | ||
b0119c68 RS |
1140 | #ifdef DOUG_LEA_MALLOC |
1141 | mallopt (M_TOP_PAD, 64 * 4096); | |
1142 | #else | |
49081834 RS |
1143 | /* Give GNU malloc's morecore some hysteresis |
1144 | so that we move all the relocatable blocks much less often. */ | |
1145 | __malloc_extra_blocks = 64; | |
b0119c68 | 1146 | #endif |
49081834 | 1147 | |
e429caa2 | 1148 | first_heap->end = (POINTER) ROUNDUP (first_heap->start); |
0e93a7cf RS |
1149 | |
1150 | /* The extra call to real_morecore guarantees that the end of the | |
1151 | address space is a multiple of page_size, even if page_size is | |
1152 | not really the page size of the system running the binary in | |
1153 | which page_size is stored. This allows a binary to be built on a | |
1154 | system with one page size and run on a system with a smaller page | |
abe9ff32 | 1155 | size. */ |
e429caa2 | 1156 | (*real_morecore) (first_heap->end - first_heap->start); |
0e93a7cf | 1157 | |
2c46d29f RS |
1158 | /* Clear the rest of the last page; this memory is in our address space |
1159 | even though it is after the sbrk value. */ | |
0e93a7cf RS |
1160 | /* Doubly true, with the additional call that explicitly adds the |
1161 | rest of that page to the address space. */ | |
e429caa2 KH |
1162 | bzero (first_heap->start, first_heap->end - first_heap->start); |
1163 | virtual_break_value = break_value = first_heap->bloc_start = first_heap->end; | |
dcfdbac7 | 1164 | use_relocatable_buffers = 1; |
2c46d29f | 1165 | } |
dec41418 RS |
1166 | |
1167 | #if defined (emacs) && defined (DOUG_LEA_MALLOC) | |
1168 | ||
1169 | /* Reinitialize the morecore hook variables after restarting a dumped | |
1170 | Emacs. This is needed when using Doug Lea's malloc from GNU libc. */ | |
1171 | void | |
1172 | r_alloc_reinit () | |
1173 | { | |
1174 | /* Only do this if the hook has been reset, so that we don't get an | |
1175 | infinite loop, in case Emacs was linked statically. */ | |
1176 | if (__morecore != r_alloc_sbrk) | |
1177 | { | |
1178 | real_morecore = __morecore; | |
1179 | __morecore = r_alloc_sbrk; | |
1180 | } | |
1181 | } | |
1182 | #endif | |
1183 | ||
e429caa2 KH |
1184 | #ifdef DEBUG |
1185 | #include <assert.h> | |
1186 | ||
44d3dec0 | 1187 | void |
e429caa2 KH |
1188 | r_alloc_check () |
1189 | { | |
6d16dd06 RS |
1190 | int found = 0; |
1191 | heap_ptr h, ph = 0; | |
1192 | bloc_ptr b, pb = 0; | |
1193 | ||
1194 | if (!r_alloc_initialized) | |
1195 | return; | |
1196 | ||
1197 | assert (first_heap); | |
1198 | assert (last_heap->end <= (POINTER) sbrk (0)); | |
1199 | assert ((POINTER) first_heap < first_heap->start); | |
1200 | assert (first_heap->start <= virtual_break_value); | |
1201 | assert (virtual_break_value <= first_heap->end); | |
1202 | ||
1203 | for (h = first_heap; h; h = h->next) | |
1204 | { | |
1205 | assert (h->prev == ph); | |
1206 | assert ((POINTER) ROUNDUP (h->end) == h->end); | |
40f3f04b RS |
1207 | #if 0 /* ??? The code in ralloc.c does not really try to ensure |
1208 | the heap start has any sort of alignment. | |
1209 | Perhaps it should. */ | |
6d16dd06 | 1210 | assert ((POINTER) MEM_ROUNDUP (h->start) == h->start); |
40f3f04b | 1211 | #endif |
6d16dd06 RS |
1212 | assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start); |
1213 | assert (h->start <= h->bloc_start && h->bloc_start <= h->end); | |
1214 | ||
1215 | if (ph) | |
1216 | { | |
1217 | assert (ph->end < h->start); | |
1218 | assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start); | |
1219 | } | |
1220 | ||
1221 | if (h->bloc_start <= break_value && break_value <= h->end) | |
1222 | found = 1; | |
1223 | ||
1224 | ph = h; | |
1225 | } | |
1226 | ||
1227 | assert (found); | |
1228 | assert (last_heap == ph); | |
1229 | ||
1230 | for (b = first_bloc; b; b = b->next) | |
1231 | { | |
1232 | assert (b->prev == pb); | |
1233 | assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); | |
1234 | assert ((SIZE) MEM_ROUNDUP (b->size) == b->size); | |
1235 | ||
1236 | ph = 0; | |
1237 | for (h = first_heap; h; h = h->next) | |
1238 | { | |
1239 | if (h->bloc_start <= b->data && b->data + b->size <= h->end) | |
1240 | break; | |
1241 | ph = h; | |
1242 | } | |
1243 | ||
1244 | assert (h); | |
1245 | ||
1246 | if (pb && pb->data + pb->size != b->data) | |
1247 | { | |
1248 | assert (ph && b->data == h->bloc_start); | |
1249 | while (ph) | |
1250 | { | |
1251 | if (ph->bloc_start <= pb->data | |
1252 | && pb->data + pb->size <= ph->end) | |
1253 | { | |
1254 | assert (pb->data + pb->size + b->size > ph->end); | |
1255 | break; | |
1256 | } | |
1257 | else | |
1258 | { | |
1259 | assert (ph->bloc_start + b->size > ph->end); | |
1260 | } | |
1261 | ph = ph->prev; | |
1262 | } | |
1263 | } | |
1264 | pb = b; | |
1265 | } | |
1266 | ||
1267 | assert (last_bloc == pb); | |
1268 | ||
1269 | if (last_bloc) | |
1270 | assert (last_bloc->data + last_bloc->size == break_value); | |
1271 | else | |
1272 | assert (first_heap->bloc_start == break_value); | |
e429caa2 KH |
1273 | } |
1274 | #endif /* DEBUG */ |