(r_alloc_freeze_level): New variable.
[bpt/emacs.git] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 1, or (at your option)
9 any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
19
20 /* NOTES:
21
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
25
26 #ifdef emacs
27
28 #include <config.h>
29 #include "lisp.h" /* Needed for VALBITS. */
30
31 #undef NULL
32
33 /* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
36 #if 0 /* Arithmetic on void* is a GCC extension. */
37 #ifdef __STDC__
38 typedef void *POINTER;
39 #else
40
41 #ifdef HAVE_CONFIG_H
42 #include "config.h"
43 #endif
44
45 typedef char *POINTER;
46
47 #endif
48 #endif /* 0 */
49
50 /* Unconditionally use char * for this. */
51 typedef char *POINTER;
52
53 typedef unsigned long SIZE;
54
55 /* Declared in dispnew.c, this version doesn't screw up if regions
56 overlap. */
57 extern void safe_bcopy ();
58
59 #include "getpagesize.h"
60
61 #else /* Not emacs. */
62
63 #include <stddef.h>
64
65 typedef size_t SIZE;
66 typedef void *POINTER;
67
68 #include <unistd.h>
69 #include <malloc.h>
70 #include <string.h>
71
72 #define safe_bcopy(x, y, z) memmove (y, x, z)
73
74 #endif /* emacs. */
75
76 #define NIL ((POINTER) 0)
77
78 /* A flag to indicate whether we have initialized ralloc yet. For
79 Emacs's sake, please do not make this local to malloc_init; on some
80 machines, the dumping procedure makes all static variables
81 read-only. On these machines, the word static is #defined to be
82 the empty string, meaning that r_alloc_initialized becomes an
83 automatic variable, and loses its value each time Emacs is started up. */
84 static int r_alloc_initialized = 0;
85
86 static void r_alloc_init ();
87 \f
88 /* Declarations for working with the malloc, ralloc, and system breaks. */
89
90 /* Function to set the real break value. */
91 static POINTER (*real_morecore) ();
92
93 /* The break value, as seen by malloc (). */
94 static POINTER virtual_break_value;
95
96 /* The break value, viewed by the relocatable blocs. */
97 static POINTER break_value;
98
99 /* The REAL (i.e., page aligned) break value of the process. */
100 static POINTER page_break_value;
101
102 /* This is the size of a page. We round memory requests to this boundary. */
103 static int page_size;
104
105 /* Whenever we get memory from the system, get this many extra bytes. This
106 must be a multiple of page_size. */
107 static int extra_bytes;
108
109 /* Macros for rounding. Note that rounding to any value is possible
110 by changing the definition of PAGE. */
111 #define PAGE (getpagesize ())
112 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
113 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
114 & ~(page_size - 1))
115 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
116 \f
117 /* Functions to get and return memory from the system. */
118
119 /* Obtain SIZE bytes of space. If enough space is not presently available
120 in our process reserve, (i.e., (page_break_value - break_value)),
121 this means getting more page-aligned space from the system.
122
123 Return non-zero if all went well, or zero if we couldn't allocate
124 the memory. */
125 static int
126 obtain (size)
127 SIZE size;
128 {
129 SIZE already_available = page_break_value - break_value;
130
131 if (already_available < size)
132 {
133 SIZE get = ROUNDUP (size - already_available);
134 /* Get some extra, so we can come here less often. */
135 get += extra_bytes;
136
137 if ((*real_morecore) (get) == 0)
138 return 0;
139
140 page_break_value += get;
141 }
142
143 break_value += size;
144
145 return 1;
146 }
147
148 /* Obtain SIZE bytes of space and return a pointer to the new area.
149 If we could not allocate the space, return zero. */
150
151 static POINTER
152 get_more_space (size)
153 SIZE size;
154 {
155 POINTER ptr = break_value;
156 if (obtain (size))
157 return ptr;
158 else
159 return 0;
160 }
161
162 /* Note that SIZE bytes of space have been relinquished by the process.
163 If SIZE is more than a page, return the space to the system. */
164
165 static void
166 relinquish (size)
167 SIZE size;
168 {
169 POINTER new_page_break;
170 int excess;
171
172 break_value -= size;
173 new_page_break = (POINTER) ROUNDUP (break_value);
174 excess = (char *) page_break_value - (char *) new_page_break;
175
176 if (excess > extra_bytes * 2)
177 {
178 /* Keep extra_bytes worth of empty space.
179 And don't free anything unless we can free at least extra_bytes. */
180 if ((*real_morecore) (extra_bytes - excess) == 0)
181 abort ();
182
183 page_break_value += extra_bytes - excess;
184 }
185
186 /* Zero the space from the end of the "official" break to the actual
187 break, so that bugs show up faster. */
188 bzero (break_value, ((char *) page_break_value - (char *) break_value));
189 }
190 \f
191 /* The meat - allocating, freeing, and relocating blocs. */
192
193 /* These structures are allocated in the malloc arena.
194 The linked list is kept in order of increasing '.data' members.
195 The data blocks abut each other; if b->next is non-nil, then
196 b->data + b->size == b->next->data. */
197 typedef struct bp
198 {
199 struct bp *next;
200 struct bp *prev;
201 POINTER *variable;
202 POINTER data;
203 SIZE size;
204 } *bloc_ptr;
205
206 #define NIL_BLOC ((bloc_ptr) 0)
207 #define BLOC_PTR_SIZE (sizeof (struct bp))
208
209 /* Head and tail of the list of relocatable blocs. */
210 static bloc_ptr first_bloc, last_bloc;
211
212 /* Find the bloc referenced by the address in PTR. Returns a pointer
213 to that block. */
214
215 static bloc_ptr
216 find_bloc (ptr)
217 POINTER *ptr;
218 {
219 register bloc_ptr p = first_bloc;
220
221 while (p != NIL_BLOC)
222 {
223 if (p->variable == ptr && p->data == *ptr)
224 return p;
225
226 p = p->next;
227 }
228
229 return p;
230 }
231
232 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
233 Returns a pointer to the new bloc, or zero if we couldn't allocate
234 memory for the new block. */
235
236 static bloc_ptr
237 get_bloc (size)
238 SIZE size;
239 {
240 register bloc_ptr new_bloc;
241
242 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
243 || ! (new_bloc->data = get_more_space (size)))
244 {
245 if (new_bloc)
246 free (new_bloc);
247
248 return 0;
249 }
250
251 new_bloc->size = size;
252 new_bloc->next = NIL_BLOC;
253 new_bloc->variable = (POINTER *) NIL;
254
255 if (first_bloc)
256 {
257 new_bloc->prev = last_bloc;
258 last_bloc->next = new_bloc;
259 last_bloc = new_bloc;
260 }
261 else
262 {
263 first_bloc = last_bloc = new_bloc;
264 new_bloc->prev = NIL_BLOC;
265 }
266
267 return new_bloc;
268 }
269
270 /* Relocate all blocs from BLOC on upward in the list to the zone
271 indicated by ADDRESS. Direction of relocation is determined by
272 the position of ADDRESS relative to BLOC->data.
273
274 If BLOC is NIL_BLOC, nothing is done.
275
276 Note that ordering of blocs is not affected by this function. */
277
278 static void
279 relocate_some_blocs (bloc, address)
280 bloc_ptr bloc;
281 POINTER address;
282 {
283 if (bloc != NIL_BLOC)
284 {
285 register SIZE offset = address - bloc->data;
286 register SIZE data_size = 0;
287 register bloc_ptr b;
288
289 for (b = bloc; b != NIL_BLOC; b = b->next)
290 {
291 data_size += b->size;
292 b->data += offset;
293 *b->variable = b->data;
294 }
295
296 safe_bcopy (address - offset, address, data_size);
297 }
298 }
299
300
301 /* Free BLOC from the chain of blocs, relocating any blocs above it
302 and returning BLOC->size bytes to the free area. */
303
304 static void
305 free_bloc (bloc)
306 bloc_ptr bloc;
307 {
308 if (bloc == first_bloc && bloc == last_bloc)
309 {
310 first_bloc = last_bloc = NIL_BLOC;
311 }
312 else if (bloc == last_bloc)
313 {
314 last_bloc = bloc->prev;
315 last_bloc->next = NIL_BLOC;
316 }
317 else if (bloc == first_bloc)
318 {
319 first_bloc = bloc->next;
320 first_bloc->prev = NIL_BLOC;
321 }
322 else
323 {
324 bloc->next->prev = bloc->prev;
325 bloc->prev->next = bloc->next;
326 }
327
328 relocate_some_blocs (bloc->next, bloc->data);
329 relinquish (bloc->size);
330 free (bloc);
331 }
332 \f
333 /* Interface routines. */
334
335 static int use_relocatable_buffers;
336 static int r_alloc_freeze_level;
337
338 /* Obtain SIZE bytes of storage from the free pool, or the system, as
339 necessary. If relocatable blocs are in use, this means relocating
340 them. This function gets plugged into the GNU malloc's __morecore
341 hook.
342
343 We provide hysteresis, never relocating by less than extra_bytes.
344
345 If we're out of memory, we should return zero, to imitate the other
346 __morecore hook values - in particular, __default_morecore in the
347 GNU malloc package. */
348
349 POINTER
350 r_alloc_sbrk (size)
351 long size;
352 {
353 /* This is the first address not currently available for the heap. */
354 POINTER top;
355 /* Amount of empty space below that. */
356 /* It is not correct to use SIZE here, because that is usually unsigned.
357 ptrdiff_t would be okay, but is not always available.
358 `long' will work in all cases, in practice. */
359 long already_available;
360 POINTER ptr;
361
362 if (! use_relocatable_buffers)
363 return (*real_morecore) (size);
364
365 top = first_bloc ? first_bloc->data : page_break_value;
366 already_available = (char *) top - (char *) virtual_break_value;
367
368 /* Do we not have enough gap already? */
369 if (size > 0 && already_available < size)
370 {
371 /* Get what we need, plus some extra so we can come here less often. */
372 SIZE get = size - already_available + extra_bytes;
373
374 if (r_alloc_freeze_level > 0 || ! obtain (get))
375 return 0;
376
377 if (first_bloc)
378 relocate_some_blocs (first_bloc, first_bloc->data + get);
379
380 /* Zero out the space we just allocated, to help catch bugs
381 quickly. */
382 bzero (virtual_break_value, get);
383 }
384 /* Can we keep extra_bytes of gap while freeing at least extra_bytes? */
385 else if (size < 0 && already_available - size > 2 * extra_bytes
386 && r_alloc_freeze_level == 0)
387 {
388 /* Ok, do so. This is how many to free. */
389 SIZE give_back = already_available - size - extra_bytes;
390
391 if (first_bloc)
392 relocate_some_blocs (first_bloc, first_bloc->data - give_back);
393 relinquish (give_back);
394 }
395
396 ptr = virtual_break_value;
397 virtual_break_value += size;
398
399 return ptr;
400 }
401
402 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
403 the data is returned in *PTR. PTR is thus the address of some variable
404 which will use the data area.
405
406 If we can't allocate the necessary memory, set *PTR to zero, and
407 return zero. */
408
409 POINTER
410 r_alloc (ptr, size)
411 POINTER *ptr;
412 SIZE size;
413 {
414 register bloc_ptr new_bloc;
415
416 if (! r_alloc_initialized)
417 r_alloc_init ();
418
419 new_bloc = get_bloc (size);
420 if (new_bloc)
421 {
422 new_bloc->variable = ptr;
423 *ptr = new_bloc->data;
424 }
425 else
426 *ptr = 0;
427
428 return *ptr;
429 }
430
431 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
432 Store 0 in *PTR to show there's no block allocated. */
433
434 void
435 r_alloc_free (ptr)
436 register POINTER *ptr;
437 {
438 register bloc_ptr dead_bloc;
439
440 dead_bloc = find_bloc (ptr);
441 if (dead_bloc == NIL_BLOC)
442 abort ();
443
444 free_bloc (dead_bloc);
445 *ptr = 0;
446 }
447
448 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
449 Do this by shifting all blocks above this one up in memory, unless
450 SIZE is less than or equal to the current bloc size, in which case
451 do nothing.
452
453 Change *PTR to reflect the new bloc, and return this value.
454
455 If more memory cannot be allocated, then leave *PTR unchanged, and
456 return zero. */
457
458 POINTER
459 r_re_alloc (ptr, size)
460 POINTER *ptr;
461 SIZE size;
462 {
463 register bloc_ptr bloc;
464
465 bloc = find_bloc (ptr);
466 if (bloc == NIL_BLOC)
467 abort ();
468
469 if (size <= bloc->size)
470 /* Wouldn't it be useful to actually resize the bloc here? */
471 return *ptr;
472
473 if (! obtain (size - bloc->size))
474 return 0;
475
476 relocate_some_blocs (bloc->next, bloc->data + size);
477
478 /* Zero out the new space in the bloc, to help catch bugs faster. */
479 bzero (bloc->data + bloc->size, size - bloc->size);
480
481 /* Indicate that this block has a new size. */
482 bloc->size = size;
483
484 return *ptr;
485 }
486
487 /* Disable relocations, after making room for at least SIZE bytes
488 of non-relocatable heap if possible. The relocatable blocs are
489 guaranteed to hold still until thawed, even if this means that
490 malloc must return a null pointer. */
491 void
492 r_alloc_freeze (size)
493 long size;
494 {
495 /* If already frozen, we can't make any more room, so don't try. */
496 if (r_alloc_freeze_level > 0)
497 size = 0;
498 /* If we can't get the amount requested, half is better than nothing. */
499 while (size > 0 && r_alloc_sbrk (size) == 0)
500 size /= 2;
501 ++r_alloc_freeze_level;
502 if (size > 0)
503 r_alloc_sbrk (-size);
504 }
505
506 void
507 r_alloc_thaw ()
508 {
509 if (--r_alloc_freeze_level < 0)
510 abort ();
511 }
512 \f
513 /* The hook `malloc' uses for the function which gets more space
514 from the system. */
515 extern POINTER (*__morecore) ();
516
517 /* Initialize various things for memory allocation. */
518
519 static void
520 r_alloc_init ()
521 {
522 if (r_alloc_initialized)
523 return;
524
525 r_alloc_initialized = 1;
526 real_morecore = __morecore;
527 __morecore = r_alloc_sbrk;
528
529 virtual_break_value = break_value = (*real_morecore) (0);
530 if (break_value == NIL)
531 abort ();
532
533 page_size = PAGE;
534 extra_bytes = ROUNDUP (50000);
535
536 page_break_value = (POINTER) ROUNDUP (break_value);
537
538 /* The extra call to real_morecore guarantees that the end of the
539 address space is a multiple of page_size, even if page_size is
540 not really the page size of the system running the binary in
541 which page_size is stored. This allows a binary to be built on a
542 system with one page size and run on a system with a smaller page
543 size. */
544 (*real_morecore) (page_break_value - break_value);
545
546 /* Clear the rest of the last page; this memory is in our address space
547 even though it is after the sbrk value. */
548 /* Doubly true, with the additional call that explicitly adds the
549 rest of that page to the address space. */
550 bzero (break_value, (page_break_value - break_value));
551 virtual_break_value = break_value = page_break_value;
552 use_relocatable_buffers = 1;
553 }