(Info-menu-update): When there are no menus and/or no
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f
KH
1/* This file is no longer automatically generated from libc. */
2
3#define _MALLOC_INTERNAL
4
5/* The malloc headers and source files from the C library follow here. */
6
7/* Declarations for `malloc' and friends.
68c45bf0 8 Copyright 1990, 91, 92, 93, 95, 96, 99 Free Software Foundation, Inc.
74ad5c7f
KH
9 Written May 1989 by Mike Haertel.
10
11This library is free software; you can redistribute it and/or
12modify it under the terms of the GNU Library General Public License as
13published by the Free Software Foundation; either version 2 of the
14License, or (at your option) any later version.
15
16This library is distributed in the hope that it will be useful,
17but WITHOUT ANY WARRANTY; without even the implied warranty of
18MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19Library General Public License for more details.
20
21You should have received a copy of the GNU Library General Public
22License along with this library; see the file COPYING.LIB. If
23not, write to the Free Software Foundation, Inc., 675 Mass Ave,
24Cambridge, MA 02139, USA.
25
26 The author may be reached (Email) at the address mike@ai.mit.edu,
27 or (US mail) as Mike Haertel c/o Free Software Foundation. */
28
29#ifndef _MALLOC_H
30
31#define _MALLOC_H 1
32
33#ifdef _MALLOC_INTERNAL
34
35#ifdef HAVE_CONFIG_H
36#include <config.h>
37#endif
38
9e67b01b
DL
39#if defined __cplusplus || (defined (__STDC__) && __STDC__) || \
40 defined STDC_HEADERS || defined PROTOTYPES
0a27e8ed
RS
41#undef PP
42#define PP(args) args
74ad5c7f
KH
43#undef __ptr_t
44#define __ptr_t void *
45#else /* Not C++ or ANSI C. */
0a27e8ed
RS
46#undef PP
47#define PP(args) ()
74ad5c7f
KH
48#undef __ptr_t
49#define __ptr_t char *
50#endif /* C++ or ANSI C. */
51
52#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
53#include <string.h>
54#else
55#ifndef memset
56#define memset(s, zero, n) bzero ((s), (n))
57#endif
58#ifndef memcpy
59#define memcpy(d, s, n) bcopy ((s), (d), (n))
60#endif
61#endif
62
ca9c0567 63#ifdef HAVE_LIMITS_H
74ad5c7f 64#include <limits.h>
ca9c0567 65#endif
74ad5c7f
KH
66#ifndef CHAR_BIT
67#define CHAR_BIT 8
68#endif
74ad5c7f
KH
69
70#ifdef HAVE_UNISTD_H
71#include <unistd.h>
72#endif
73
74#endif /* _MALLOC_INTERNAL. */
75
76
77#ifdef __cplusplus
78extern "C"
79{
80#endif
81
ca9c0567 82#ifdef STDC_HEADERS
74ad5c7f
KH
83#include <stddef.h>
84#define __malloc_size_t size_t
85#define __malloc_ptrdiff_t ptrdiff_t
86#else
87#define __malloc_size_t unsigned int
88#define __malloc_ptrdiff_t int
89#endif
90
91#ifndef NULL
92#define NULL 0
93#endif
94
ca9c0567
PE
95#ifndef FREE_RETURN_TYPE
96#define FREE_RETURN_TYPE void
97#endif
98
74ad5c7f
KH
99
100/* Allocate SIZE bytes of memory. */
0a27e8ed 101extern __ptr_t malloc PP ((__malloc_size_t __size));
74ad5c7f
KH
102/* Re-allocate the previously allocated block
103 in __ptr_t, making the new block SIZE bytes long. */
0a27e8ed 104extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f 105/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
0a27e8ed 106extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
74ad5c7f 107/* Free a block allocated by `malloc', `realloc' or `calloc'. */
ca9c0567 108extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr));
74ad5c7f
KH
109
110/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
111#if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
0a27e8ed
RS
112extern __ptr_t memalign PP ((__malloc_size_t __alignment,
113 __malloc_size_t __size));
74ad5c7f
KH
114#endif
115
116/* Allocate SIZE bytes on a page boundary. */
117#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
0a27e8ed 118extern __ptr_t valloc PP ((__malloc_size_t __size));
74ad5c7f
KH
119#endif
120
121
122#ifdef _MALLOC_INTERNAL
123
124/* The allocator divides the heap into blocks of fixed size; large
125 requests receive one or more whole blocks, and small requests
126 receive a fragment of a block. Fragment sizes are powers of two,
127 and all fragments of a block are the same size. When all the
128 fragments in a block have been freed, the block itself is freed. */
129#define INT_BIT (CHAR_BIT * sizeof(int))
130#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
131#define BLOCKSIZE (1 << BLOCKLOG)
132#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
133
134/* Determine the amount of memory spanned by the initial heap table
135 (not an absolute limit). */
136#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
137
138/* Number of contiguous free blocks allowed to build up at the end of
139 memory before they will be returned to the system. */
140#define FINAL_FREE_BLOCKS 8
141
142/* Data structure giving per-block information. */
143typedef union
144 {
145 /* Heap information for a busy block. */
146 struct
147 {
148 /* Zero for a large (multiblock) object, or positive giving the
149 logarithm to the base two of the fragment size. */
150 int type;
151 union
152 {
153 struct
154 {
155 __malloc_size_t nfree; /* Free frags in a fragmented block. */
156 __malloc_size_t first; /* First free fragment of the block. */
157 } frag;
158 /* For a large object, in its first block, this has the number
159 of blocks in the object. In the other blocks, this has a
160 negative number which says how far back the first block is. */
161 __malloc_ptrdiff_t size;
162 } info;
163 } busy;
164 /* Heap information for a free block
165 (that may be the first of a free cluster). */
166 struct
167 {
168 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
169 __malloc_size_t next; /* Index of next free cluster. */
170 __malloc_size_t prev; /* Index of previous free cluster. */
171 } free;
172 } malloc_info;
173
174/* Pointer to first block of the heap. */
175extern char *_heapbase;
176
177/* Table indexed by block number giving per-block information. */
178extern malloc_info *_heapinfo;
179
180/* Address to block number and vice versa. */
181#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
182#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
183
184/* Current search index for the heap table. */
185extern __malloc_size_t _heapindex;
186
187/* Limit of valid info table indices. */
188extern __malloc_size_t _heaplimit;
189
190/* Doubly linked lists of free fragments. */
191struct list
192 {
193 struct list *next;
194 struct list *prev;
195 };
196
197/* Free list headers for each fragment size. */
198extern struct list _fraghead[];
199
200/* List of blocks allocated with `memalign' (or `valloc'). */
201struct alignlist
202 {
203 struct alignlist *next;
204 __ptr_t aligned; /* The address that memaligned returned. */
205 __ptr_t exact; /* The address that malloc returned. */
206 };
207extern struct alignlist *_aligned_blocks;
208
209/* Instrumentation. */
210extern __malloc_size_t _chunks_used;
211extern __malloc_size_t _bytes_used;
212extern __malloc_size_t _chunks_free;
213extern __malloc_size_t _bytes_free;
214
215/* Internal versions of `malloc', `realloc', and `free'
216 used when these functions need to call each other.
217 They are the same but don't call the hooks. */
0a27e8ed
RS
218extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
219extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
220extern void _free_internal PP ((__ptr_t __ptr));
74ad5c7f
KH
221
222#endif /* _MALLOC_INTERNAL. */
223
224/* Given an address in the middle of a malloc'd object,
225 return the address of the beginning of the object. */
0a27e8ed 226extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
74ad5c7f
KH
227
228/* Underlying allocation function; successive calls should
229 return contiguous pieces of memory. */
0a27e8ed 230extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
231
232/* Default value of `__morecore'. */
0a27e8ed 233extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
234
235/* If not NULL, this function is called after each time
236 `__morecore' is called to increase the data size. */
0a27e8ed 237extern void (*__after_morecore_hook) PP ((void));
74ad5c7f
KH
238
239/* Number of extra blocks to get each time we ask for more core.
240 This reduces the frequency of calling `(*__morecore)'. */
241extern __malloc_size_t __malloc_extra_blocks;
242
243/* Nonzero if `malloc' has been called and done its initialization. */
244extern int __malloc_initialized;
245/* Function called to initialize malloc data structures. */
0a27e8ed 246extern int __malloc_initialize PP ((void));
74ad5c7f
KH
247
248/* Hooks for debugging versions. */
0a27e8ed
RS
249extern void (*__malloc_initialize_hook) PP ((void));
250extern void (*__free_hook) PP ((__ptr_t __ptr));
251extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
252extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
253extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
254 __malloc_size_t __alignment));
74ad5c7f
KH
255
256/* Return values for `mprobe': these are the kinds of inconsistencies that
257 `mcheck' enables detection of. */
258enum mcheck_status
259 {
260 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
261 MCHECK_OK, /* Block is fine. */
262 MCHECK_FREE, /* Block freed twice. */
263 MCHECK_HEAD, /* Memory before the block was clobbered. */
264 MCHECK_TAIL /* Memory after the block was clobbered. */
265 };
266
267/* Activate a standard collection of debugging hooks. This must be called
268 before `malloc' is ever called. ABORTFUNC is called with an error code
269 (see enum above) when an inconsistency is detected. If ABORTFUNC is
270 null, the standard function prints on stderr and then calls `abort'. */
0a27e8ed 271extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
74ad5c7f
KH
272
273/* Check for aberrations in a particular malloc'd block. You must have
274 called `mcheck' already. These are the same checks that `mcheck' does
275 when you free or reallocate a block. */
0a27e8ed 276extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
74ad5c7f
KH
277
278/* Activate a standard collection of tracing hooks. */
0a27e8ed
RS
279extern void mtrace PP ((void));
280extern void muntrace PP ((void));
74ad5c7f
KH
281
282/* Statistics available to the user. */
283struct mstats
284 {
285 __malloc_size_t bytes_total; /* Total size of the heap. */
286 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
287 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
288 __malloc_size_t chunks_free; /* Chunks in the free list. */
289 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
290 };
291
292/* Pick up the current statistics. */
0a27e8ed 293extern struct mstats mstats PP ((void));
74ad5c7f
KH
294
295/* Call WARNFUN with a warning message when memory usage is high. */
0a27e8ed
RS
296extern void memory_warnings PP ((__ptr_t __start,
297 void (*__warnfun) PP ((const char *))));
74ad5c7f
KH
298
299
300/* Relocating allocator. */
301
302/* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
0a27e8ed 303extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
304
305/* Free the storage allocated in HANDLEPTR. */
0a27e8ed 306extern void r_alloc_free PP ((__ptr_t *__handleptr));
74ad5c7f
KH
307
308/* Adjust the block at HANDLEPTR to be SIZE bytes long. */
0a27e8ed 309extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
310
311
312#ifdef __cplusplus
313}
314#endif
315
316#endif /* malloc.h */
317/* Memory allocator `malloc'.
318 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
319 Written May 1989 by Mike Haertel.
320
321This library is free software; you can redistribute it and/or
322modify it under the terms of the GNU Library General Public License as
323published by the Free Software Foundation; either version 2 of the
324License, or (at your option) any later version.
325
326This library is distributed in the hope that it will be useful,
327but WITHOUT ANY WARRANTY; without even the implied warranty of
328MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
329Library General Public License for more details.
330
331You should have received a copy of the GNU Library General Public
332License along with this library; see the file COPYING.LIB. If
333not, write to the Free Software Foundation, Inc., 675 Mass Ave,
334Cambridge, MA 02139, USA.
335
336 The author may be reached (Email) at the address mike@ai.mit.edu,
337 or (US mail) as Mike Haertel c/o Free Software Foundation. */
338
339#ifndef _MALLOC_INTERNAL
340#define _MALLOC_INTERNAL
341#include <malloc.h>
342#endif
343#include <errno.h>
344
345/* How to really get more memory. */
0a27e8ed 346__ptr_t (*__morecore) PP ((ptrdiff_t __size)) = __default_morecore;
74ad5c7f
KH
347
348/* Debugging hook for `malloc'. */
0a27e8ed 349__ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
74ad5c7f
KH
350
351/* Pointer to the base of the first block. */
352char *_heapbase;
353
354/* Block information table. Allocated with align/__free (not malloc/free). */
355malloc_info *_heapinfo;
356
357/* Number of info entries. */
358static __malloc_size_t heapsize;
359
360/* Search index in the info table. */
361__malloc_size_t _heapindex;
362
363/* Limit of valid info table indices. */
364__malloc_size_t _heaplimit;
365
366/* Free lists for each fragment size. */
367struct list _fraghead[BLOCKLOG];
368
369/* Instrumentation. */
370__malloc_size_t _chunks_used;
371__malloc_size_t _bytes_used;
372__malloc_size_t _chunks_free;
373__malloc_size_t _bytes_free;
374
375/* Are you experienced? */
376int __malloc_initialized;
377
378__malloc_size_t __malloc_extra_blocks;
379
0a27e8ed
RS
380void (*__malloc_initialize_hook) PP ((void));
381void (*__after_morecore_hook) PP ((void));
74ad5c7f 382
5dcab13e
GM
383#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
384
385/* Some code for hunting a bug writing into _heapinfo.
386
387 Call this macro with argument PROT non-zero to protect internal
388 malloc state against writing to it, call it with a zero argument to
389 make it readable and writable.
390
391 Note that this only works if BLOCKSIZE == page size, which is
392 the case on the i386. */
393
394#include <sys/types.h>
395#include <sys/mman.h>
396
397static int state_protected_p;
398static __malloc_size_t last_state_size;
399static malloc_info *last_heapinfo;
400
401void
402protect_malloc_state (protect_p)
403 int protect_p;
404{
405 /* If _heapinfo has been relocated, make sure its old location
406 isn't left read-only; it will be reused by malloc. */
407 if (_heapinfo != last_heapinfo
408 && last_heapinfo
409 && state_protected_p)
410 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
411
412 last_state_size = _heaplimit * sizeof *_heapinfo;
413 last_heapinfo = _heapinfo;
414
415 if (protect_p != state_protected_p)
416 {
417 state_protected_p = protect_p;
418 if (mprotect (_heapinfo, last_state_size,
419 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
420 abort ();
421 }
422}
423
424#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
425
426#else
427#define PROTECT_MALLOC_STATE(PROT) /* empty */
428#endif
429
74ad5c7f
KH
430
431/* Aligned allocation. */
0a27e8ed 432static __ptr_t align PP ((__malloc_size_t));
74ad5c7f
KH
433static __ptr_t
434align (size)
435 __malloc_size_t size;
436{
437 __ptr_t result;
438 unsigned long int adj;
439
ceeb3d7d
EZ
440 /* align accepts an unsigned argument, but __morecore accepts a
441 signed one. This could lead to trouble if SIZE overflows a
442 signed int type accepted by __morecore. We just punt in that
443 case, since they are requesting a ludicrous amount anyway. */
444 if ((__malloc_ptrdiff_t)size < 0)
445 result = 0;
446 else
447 result = (*__morecore) (size);
74ad5c7f
KH
448 adj = (unsigned long int) ((unsigned long int) ((char *) result -
449 (char *) NULL)) % BLOCKSIZE;
450 if (adj != 0)
451 {
452 __ptr_t new;
453 adj = BLOCKSIZE - adj;
454 new = (*__morecore) (adj);
455 result = (char *) result + adj;
456 }
457
458 if (__after_morecore_hook)
459 (*__after_morecore_hook) ();
460
461 return result;
462}
463
464/* Get SIZE bytes, if we can get them starting at END.
465 Return the address of the space we got.
466 If we cannot get space at END, fail and return 0. */
0a27e8ed 467static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
74ad5c7f
KH
468static __ptr_t
469get_contiguous_space (size, position)
470 __malloc_ptrdiff_t size;
471 __ptr_t position;
472{
473 __ptr_t before;
474 __ptr_t after;
475
476 before = (*__morecore) (0);
477 /* If we can tell in advance that the break is at the wrong place,
478 fail now. */
479 if (before != position)
480 return 0;
481
482 /* Allocate SIZE bytes and get the address of them. */
483 after = (*__morecore) (size);
484 if (!after)
485 return 0;
486
487 /* It was not contiguous--reject it. */
488 if (after != position)
489 {
490 (*__morecore) (- size);
491 return 0;
492 }
493
494 return after;
495}
496
497
498/* This is called when `_heapinfo' and `heapsize' have just
499 been set to describe a new info table. Set up the table
500 to describe itself and account for it in the statistics. */
0a27e8ed 501static void register_heapinfo PP ((void));
74ad5c7f
KH
502#ifdef __GNUC__
503__inline__
504#endif
505static void
506register_heapinfo ()
507{
508 __malloc_size_t block, blocks;
509
510 block = BLOCK (_heapinfo);
511 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
512
513 /* Account for the _heapinfo block itself in the statistics. */
514 _bytes_used += blocks * BLOCKSIZE;
515 ++_chunks_used;
516
517 /* Describe the heapinfo block itself in the heapinfo. */
518 _heapinfo[block].busy.type = 0;
519 _heapinfo[block].busy.info.size = blocks;
520 /* Leave back-pointers for malloc_find_address. */
521 while (--blocks > 0)
522 _heapinfo[block + blocks].busy.info.size = -blocks;
523}
524
525/* Set everything up and remember that we have. */
526int
527__malloc_initialize ()
528{
529 if (__malloc_initialized)
530 return 0;
531
a3ba27da
GM
532#ifdef GC_MCHECK
533 mcheck (NULL);
534#endif
535
74ad5c7f
KH
536 if (__malloc_initialize_hook)
537 (*__malloc_initialize_hook) ();
538
539 heapsize = HEAP / BLOCKSIZE;
540 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
541 if (_heapinfo == NULL)
542 return 0;
543 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
544 _heapinfo[0].free.size = 0;
545 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
546 _heapindex = 0;
547 _heapbase = (char *) _heapinfo;
548 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
549
550 register_heapinfo ();
551
552 __malloc_initialized = 1;
5dcab13e 553 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
554 return 1;
555}
556
557static int morecore_recursing;
558
559/* Get neatly aligned memory, initializing or
560 growing the heap info table as necessary. */
0a27e8ed 561static __ptr_t morecore PP ((__malloc_size_t));
74ad5c7f
KH
562static __ptr_t
563morecore (size)
564 __malloc_size_t size;
565{
566 __ptr_t result;
567 malloc_info *newinfo, *oldinfo;
568 __malloc_size_t newsize;
569
570 if (morecore_recursing)
571 /* Avoid recursion. The caller will know how to handle a null return. */
572 return NULL;
573
574 result = align (size);
575 if (result == NULL)
576 return NULL;
577
5dcab13e
GM
578 PROTECT_MALLOC_STATE (0);
579
74ad5c7f
KH
580 /* Check if we need to grow the info table. */
581 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
582 {
583 /* Calculate the new _heapinfo table size. We do not account for the
584 added blocks in the table itself, as we hope to place them in
585 existing free space, which is already covered by part of the
586 existing table. */
587 newsize = heapsize;
588 do
589 newsize *= 2;
590 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
591
592 /* We must not reuse existing core for the new info table when called
593 from realloc in the case of growing a large block, because the
594 block being grown is momentarily marked as free. In this case
595 _heaplimit is zero so we know not to reuse space for internal
596 allocation. */
597 if (_heaplimit != 0)
598 {
599 /* First try to allocate the new info table in core we already
600 have, in the usual way using realloc. If realloc cannot
601 extend it in place or relocate it to existing sufficient core,
602 we will get called again, and the code above will notice the
603 `morecore_recursing' flag and return null. */
604 int save = errno; /* Don't want to clobber errno with ENOMEM. */
605 morecore_recursing = 1;
606 newinfo = (malloc_info *) _realloc_internal
607 (_heapinfo, newsize * sizeof (malloc_info));
608 morecore_recursing = 0;
609 if (newinfo == NULL)
610 errno = save;
611 else
612 {
613 /* We found some space in core, and realloc has put the old
614 table's blocks on the free list. Now zero the new part
615 of the table and install the new table location. */
616 memset (&newinfo[heapsize], 0,
617 (newsize - heapsize) * sizeof (malloc_info));
618 _heapinfo = newinfo;
619 heapsize = newsize;
620 goto got_heap;
621 }
622 }
623
624 /* Allocate new space for the malloc info table. */
625 while (1)
626 {
627 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
628
629 /* Did it fail? */
630 if (newinfo == NULL)
631 {
632 (*__morecore) (-size);
633 return NULL;
634 }
635
636 /* Is it big enough to record status for its own space?
637 If so, we win. */
638 if ((__malloc_size_t) BLOCK ((char *) newinfo
639 + newsize * sizeof (malloc_info))
640 < newsize)
641 break;
642
643 /* Must try again. First give back most of what we just got. */
644 (*__morecore) (- newsize * sizeof (malloc_info));
645 newsize *= 2;
646 }
647
648 /* Copy the old table to the beginning of the new,
649 and zero the rest of the new table. */
650 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
651 memset (&newinfo[heapsize], 0,
652 (newsize - heapsize) * sizeof (malloc_info));
653 oldinfo = _heapinfo;
654 _heapinfo = newinfo;
655 heapsize = newsize;
656
657 register_heapinfo ();
658
659 /* Reset _heaplimit so _free_internal never decides
660 it can relocate or resize the info table. */
661 _heaplimit = 0;
662 _free_internal (oldinfo);
5dcab13e 663 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
664
665 /* The new heap limit includes the new table just allocated. */
666 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
667 return result;
668 }
669
670 got_heap:
671 _heaplimit = BLOCK ((char *) result + size);
672 return result;
673}
674
675/* Allocate memory from the heap. */
676__ptr_t
677_malloc_internal (size)
678 __malloc_size_t size;
679{
680 __ptr_t result;
681 __malloc_size_t block, blocks, lastblocks, start;
682 register __malloc_size_t i;
683 struct list *next;
684
685 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
686 valid address you can realloc and free (though not dereference).
687
688 It turns out that some extant code (sunrpc, at least Ultrix's version)
689 expects `malloc (0)' to return non-NULL and breaks otherwise.
690 Be compatible. */
691
692#if 0
693 if (size == 0)
694 return NULL;
695#endif
696
5dcab13e
GM
697 PROTECT_MALLOC_STATE (0);
698
74ad5c7f
KH
699 if (size < sizeof (struct list))
700 size = sizeof (struct list);
701
702#ifdef SUNOS_LOCALTIME_BUG
703 if (size < 16)
704 size = 16;
705#endif
706
707 /* Determine the allocation policy based on the request size. */
708 if (size <= BLOCKSIZE / 2)
709 {
710 /* Small allocation to receive a fragment of a block.
711 Determine the logarithm to base two of the fragment size. */
712 register __malloc_size_t log = 1;
713 --size;
714 while ((size /= 2) != 0)
715 ++log;
716
717 /* Look in the fragment lists for a
718 free fragment of the desired size. */
719 next = _fraghead[log].next;
720 if (next != NULL)
721 {
722 /* There are free fragments of this size.
723 Pop a fragment out of the fragment list and return it.
724 Update the block's nfree and first counters. */
725 result = (__ptr_t) next;
726 next->prev->next = next->next;
727 if (next->next != NULL)
728 next->next->prev = next->prev;
729 block = BLOCK (result);
730 if (--_heapinfo[block].busy.info.frag.nfree != 0)
731 _heapinfo[block].busy.info.frag.first = (unsigned long int)
732 ((unsigned long int) ((char *) next->next - (char *) NULL)
733 % BLOCKSIZE) >> log;
734
735 /* Update the statistics. */
736 ++_chunks_used;
737 _bytes_used += 1 << log;
738 --_chunks_free;
739 _bytes_free -= 1 << log;
740 }
741 else
742 {
743 /* No free fragments of the desired size, so get a new block
744 and break it into fragments, returning the first. */
8094989b
GM
745#ifdef GC_MALLOC_CHECK
746 result = _malloc_internal (BLOCKSIZE);
5dcab13e 747 PROTECT_MALLOC_STATE (0);
8094989b 748#else
74ad5c7f 749 result = malloc (BLOCKSIZE);
8094989b 750#endif
74ad5c7f 751 if (result == NULL)
5dcab13e
GM
752 {
753 PROTECT_MALLOC_STATE (1);
754 return NULL;
755 }
74ad5c7f
KH
756
757 /* Link all fragments but the first into the free list. */
758 next = (struct list *) ((char *) result + (1 << log));
759 next->next = NULL;
760 next->prev = &_fraghead[log];
761 _fraghead[log].next = next;
762
763 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
764 {
765 next = (struct list *) ((char *) result + (i << log));
766 next->next = _fraghead[log].next;
767 next->prev = &_fraghead[log];
768 next->prev->next = next;
769 next->next->prev = next;
770 }
771
772 /* Initialize the nfree and first counters for this block. */
773 block = BLOCK (result);
774 _heapinfo[block].busy.type = log;
775 _heapinfo[block].busy.info.frag.nfree = i - 1;
776 _heapinfo[block].busy.info.frag.first = i - 1;
777
778 _chunks_free += (BLOCKSIZE >> log) - 1;
779 _bytes_free += BLOCKSIZE - (1 << log);
780 _bytes_used -= BLOCKSIZE - (1 << log);
781 }
782 }
783 else
784 {
785 /* Large allocation to receive one or more blocks.
786 Search the free list in a circle starting at the last place visited.
787 If we loop completely around without finding a large enough
788 space we will have to get more memory from the system. */
789 blocks = BLOCKIFY (size);
790 start = block = _heapindex;
791 while (_heapinfo[block].free.size < blocks)
792 {
793 block = _heapinfo[block].free.next;
794 if (block == start)
795 {
796 /* Need to get more from the system. Get a little extra. */
797 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
798 block = _heapinfo[0].free.prev;
799 lastblocks = _heapinfo[block].free.size;
800 /* Check to see if the new core will be contiguous with the
801 final free block; if so we don't need to get as much. */
802 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
803 /* We can't do this if we will have to make the heap info
804 table bigger to accomodate the new space. */
805 block + wantblocks <= heapsize &&
806 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
807 ADDRESS (block + lastblocks)))
808 {
809 /* We got it contiguously. Which block we are extending
810 (the `final free block' referred to above) might have
811 changed, if it got combined with a freed info table. */
812 block = _heapinfo[0].free.prev;
813 _heapinfo[block].free.size += (wantblocks - lastblocks);
814 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
815 _heaplimit += wantblocks - lastblocks;
816 continue;
817 }
818 result = morecore (wantblocks * BLOCKSIZE);
819 if (result == NULL)
820 return NULL;
821 block = BLOCK (result);
822 /* Put the new block at the end of the free list. */
823 _heapinfo[block].free.size = wantblocks;
824 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
825 _heapinfo[block].free.next = 0;
826 _heapinfo[0].free.prev = block;
827 _heapinfo[_heapinfo[block].free.prev].free.next = block;
828 ++_chunks_free;
829 /* Now loop to use some of that block for this allocation. */
830 }
831 }
832
833 /* At this point we have found a suitable free list entry.
834 Figure out how to remove what we need from the list. */
835 result = ADDRESS (block);
836 if (_heapinfo[block].free.size > blocks)
837 {
838 /* The block we found has a bit left over,
839 so relink the tail end back into the free list. */
840 _heapinfo[block + blocks].free.size
841 = _heapinfo[block].free.size - blocks;
842 _heapinfo[block + blocks].free.next
843 = _heapinfo[block].free.next;
844 _heapinfo[block + blocks].free.prev
845 = _heapinfo[block].free.prev;
846 _heapinfo[_heapinfo[block].free.prev].free.next
847 = _heapinfo[_heapinfo[block].free.next].free.prev
848 = _heapindex = block + blocks;
849 }
850 else
851 {
852 /* The block exactly matches our requirements,
853 so just remove it from the list. */
854 _heapinfo[_heapinfo[block].free.next].free.prev
855 = _heapinfo[block].free.prev;
856 _heapinfo[_heapinfo[block].free.prev].free.next
857 = _heapindex = _heapinfo[block].free.next;
858 --_chunks_free;
859 }
860
861 _heapinfo[block].busy.type = 0;
862 _heapinfo[block].busy.info.size = blocks;
863 ++_chunks_used;
864 _bytes_used += blocks * BLOCKSIZE;
865 _bytes_free -= blocks * BLOCKSIZE;
866
867 /* Mark all the blocks of the object just allocated except for the
868 first with a negative number so you can find the first block by
869 adding that adjustment. */
870 while (--blocks > 0)
871 _heapinfo[block + blocks].busy.info.size = -blocks;
872 }
873
5dcab13e 874 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
875 return result;
876}
877
878__ptr_t
879malloc (size)
880 __malloc_size_t size;
881{
882 if (!__malloc_initialized && !__malloc_initialize ())
883 return NULL;
884
885 return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size);
886}
887\f
888#ifndef _LIBC
889
890/* On some ANSI C systems, some libc functions call _malloc, _free
891 and _realloc. Make them use the GNU functions. */
892
893__ptr_t
894_malloc (size)
895 __malloc_size_t size;
896{
897 return malloc (size);
898}
899
900void
901_free (ptr)
902 __ptr_t ptr;
903{
904 free (ptr);
905}
906
907__ptr_t
908_realloc (ptr, size)
909 __ptr_t ptr;
910 __malloc_size_t size;
911{
912 return realloc (ptr, size);
913}
914
915#endif
916/* Free a block of memory allocated by `malloc'.
917 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
918 Written May 1989 by Mike Haertel.
919
920This library is free software; you can redistribute it and/or
921modify it under the terms of the GNU Library General Public License as
922published by the Free Software Foundation; either version 2 of the
923License, or (at your option) any later version.
924
925This library is distributed in the hope that it will be useful,
926but WITHOUT ANY WARRANTY; without even the implied warranty of
927MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
928Library General Public License for more details.
929
930You should have received a copy of the GNU Library General Public
931License along with this library; see the file COPYING.LIB. If
932not, write to the Free Software Foundation, Inc., 675 Mass Ave,
933Cambridge, MA 02139, USA.
934
935 The author may be reached (Email) at the address mike@ai.mit.edu,
936 or (US mail) as Mike Haertel c/o Free Software Foundation. */
937
938#ifndef _MALLOC_INTERNAL
939#define _MALLOC_INTERNAL
940#include <malloc.h>
941#endif
942
943
944/* Cope with systems lacking `memmove'. */
945#ifndef memmove
946#if (defined (MEMMOVE_MISSING) || \
947 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
948#ifdef emacs
949#undef __malloc_safe_bcopy
950#define __malloc_safe_bcopy safe_bcopy
951#endif
952/* This function is defined in realloc.c. */
0a27e8ed 953extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
954#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
955#endif
956#endif
957
958
959/* Debugging hook for free. */
0a27e8ed 960void (*__free_hook) PP ((__ptr_t __ptr));
74ad5c7f
KH
961
962/* List of blocks allocated by memalign. */
963struct alignlist *_aligned_blocks = NULL;
964
965/* Return memory to the heap.
966 Like `free' but don't call a __free_hook if there is one. */
967void
968_free_internal (ptr)
969 __ptr_t ptr;
970{
971 int type;
972 __malloc_size_t block, blocks;
973 register __malloc_size_t i;
974 struct list *prev, *next;
975 __ptr_t curbrk;
976 const __malloc_size_t lesscore_threshold
977 /* Threshold of free space at which we will return some to the system. */
978 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
979
980 register struct alignlist *l;
981
982 if (ptr == NULL)
983 return;
984
5dcab13e
GM
985 PROTECT_MALLOC_STATE (0);
986
74ad5c7f
KH
987 for (l = _aligned_blocks; l != NULL; l = l->next)
988 if (l->aligned == ptr)
989 {
990 l->aligned = NULL; /* Mark the slot in the list as free. */
991 ptr = l->exact;
992 break;
993 }
994
995 block = BLOCK (ptr);
996
997 type = _heapinfo[block].busy.type;
998 switch (type)
999 {
1000 case 0:
1001 /* Get as many statistics as early as we can. */
1002 --_chunks_used;
1003 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1004 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1005
1006 /* Find the free cluster previous to this one in the free list.
1007 Start searching at the last block referenced; this may benefit
1008 programs with locality of allocation. */
1009 i = _heapindex;
1010 if (i > block)
1011 while (i > block)
1012 i = _heapinfo[i].free.prev;
1013 else
1014 {
1015 do
1016 i = _heapinfo[i].free.next;
1017 while (i > 0 && i < block);
1018 i = _heapinfo[i].free.prev;
1019 }
1020
1021 /* Determine how to link this block into the free list. */
1022 if (block == i + _heapinfo[i].free.size)
1023 {
1024 /* Coalesce this block with its predecessor. */
1025 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1026 block = i;
1027 }
1028 else
1029 {
1030 /* Really link this block back into the free list. */
1031 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1032 _heapinfo[block].free.next = _heapinfo[i].free.next;
1033 _heapinfo[block].free.prev = i;
1034 _heapinfo[i].free.next = block;
1035 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1036 ++_chunks_free;
1037 }
1038
1039 /* Now that the block is linked in, see if we can coalesce it
1040 with its successor (by deleting its successor from the list
1041 and adding in its size). */
1042 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1043 {
1044 _heapinfo[block].free.size
1045 += _heapinfo[_heapinfo[block].free.next].free.size;
1046 _heapinfo[block].free.next
1047 = _heapinfo[_heapinfo[block].free.next].free.next;
1048 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1049 --_chunks_free;
1050 }
1051
1052 /* How many trailing free blocks are there now? */
1053 blocks = _heapinfo[block].free.size;
1054
1055 /* Where is the current end of accessible core? */
1056 curbrk = (*__morecore) (0);
1057
1058 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1059 {
1060 /* The end of the malloc heap is at the end of accessible core.
1061 It's possible that moving _heapinfo will allow us to
1062 return some space to the system. */
1063
1064 __malloc_size_t info_block = BLOCK (_heapinfo);
1065 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1066 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1067 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1068 __malloc_size_t next_block = _heapinfo[block].free.next;
1069 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1070
1071 if (/* Win if this block being freed is last in core, the info table
1072 is just before it, the previous free block is just before the
1073 info table, and the two free blocks together form a useful
1074 amount to return to the system. */
1075 (block + blocks == _heaplimit &&
1076 info_block + info_blocks == block &&
1077 prev_block != 0 && prev_block + prev_blocks == info_block &&
1078 blocks + prev_blocks >= lesscore_threshold) ||
1079 /* Nope, not the case. We can also win if this block being
1080 freed is just before the info table, and the table extends
1081 to the end of core or is followed only by a free block,
1082 and the total free space is worth returning to the system. */
1083 (block + blocks == info_block &&
1084 ((info_block + info_blocks == _heaplimit &&
1085 blocks >= lesscore_threshold) ||
1086 (info_block + info_blocks == next_block &&
1087 next_block + next_blocks == _heaplimit &&
1088 blocks + next_blocks >= lesscore_threshold)))
1089 )
1090 {
1091 malloc_info *newinfo;
1092 __malloc_size_t oldlimit = _heaplimit;
1093
1094 /* Free the old info table, clearing _heaplimit to avoid
1095 recursion into this code. We don't want to return the
1096 table's blocks to the system before we have copied them to
1097 the new location. */
1098 _heaplimit = 0;
1099 _free_internal (_heapinfo);
1100 _heaplimit = oldlimit;
1101
1102 /* Tell malloc to search from the beginning of the heap for
1103 free blocks, so it doesn't reuse the ones just freed. */
1104 _heapindex = 0;
1105
1106 /* Allocate new space for the info table and move its data. */
1107 newinfo = (malloc_info *) _malloc_internal (info_blocks
1108 * BLOCKSIZE);
5dcab13e 1109 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1110 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1111 _heapinfo = newinfo;
1112
1113 /* We should now have coalesced the free block with the
1114 blocks freed from the old info table. Examine the entire
1115 trailing free block to decide below whether to return some
1116 to the system. */
1117 block = _heapinfo[0].free.prev;
1118 blocks = _heapinfo[block].free.size;
1119 }
1120
1121 /* Now see if we can return stuff to the system. */
1122 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1123 {
1124 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1125 _heaplimit -= blocks;
1126 (*__morecore) (-bytes);
1127 _heapinfo[_heapinfo[block].free.prev].free.next
1128 = _heapinfo[block].free.next;
1129 _heapinfo[_heapinfo[block].free.next].free.prev
1130 = _heapinfo[block].free.prev;
1131 block = _heapinfo[block].free.prev;
1132 --_chunks_free;
1133 _bytes_free -= bytes;
1134 }
1135 }
1136
1137 /* Set the next search to begin at this block. */
1138 _heapindex = block;
1139 break;
1140
1141 default:
1142 /* Do some of the statistics. */
1143 --_chunks_used;
1144 _bytes_used -= 1 << type;
1145 ++_chunks_free;
1146 _bytes_free += 1 << type;
1147
1148 /* Get the address of the first free fragment in this block. */
1149 prev = (struct list *) ((char *) ADDRESS (block) +
1150 (_heapinfo[block].busy.info.frag.first << type));
1151
1152 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1153 {
1154 /* If all fragments of this block are free, remove them
1155 from the fragment list and free the whole block. */
1156 next = prev;
1157 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1158 next = next->next;
1159 prev->prev->next = next;
1160 if (next != NULL)
1161 next->prev = prev->prev;
1162 _heapinfo[block].busy.type = 0;
1163 _heapinfo[block].busy.info.size = 1;
1164
1165 /* Keep the statistics accurate. */
1166 ++_chunks_used;
1167 _bytes_used += BLOCKSIZE;
1168 _chunks_free -= BLOCKSIZE >> type;
1169 _bytes_free -= BLOCKSIZE;
1170
8094989b
GM
1171#ifdef GC_MALLOC_CHECK
1172 _free_internal (ADDRESS (block));
1173#else
74ad5c7f 1174 free (ADDRESS (block));
8094989b 1175#endif
74ad5c7f
KH
1176 }
1177 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1178 {
1179 /* If some fragments of this block are free, link this
1180 fragment into the fragment list after the first free
1181 fragment of this block. */
1182 next = (struct list *) ptr;
1183 next->next = prev->next;
1184 next->prev = prev;
1185 prev->next = next;
1186 if (next->next != NULL)
1187 next->next->prev = next;
1188 ++_heapinfo[block].busy.info.frag.nfree;
1189 }
1190 else
1191 {
1192 /* No fragments of this block are free, so link this
1193 fragment into the fragment list and announce that
1194 it is the first free fragment of this block. */
1195 prev = (struct list *) ptr;
1196 _heapinfo[block].busy.info.frag.nfree = 1;
1197 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1198 ((unsigned long int) ((char *) ptr - (char *) NULL)
1199 % BLOCKSIZE >> type);
1200 prev->next = _fraghead[type].next;
1201 prev->prev = &_fraghead[type];
1202 prev->prev->next = prev;
1203 if (prev->next != NULL)
1204 prev->next->prev = prev;
1205 }
1206 break;
1207 }
5dcab13e
GM
1208
1209 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
1210}
1211
1212/* Return memory to the heap. */
ca9c0567
PE
1213
1214FREE_RETURN_TYPE
74ad5c7f
KH
1215free (ptr)
1216 __ptr_t ptr;
1217{
1218 if (__free_hook != NULL)
1219 (*__free_hook) (ptr);
1220 else
1221 _free_internal (ptr);
1222}
1223
1224/* Define the `cfree' alias for `free'. */
1225#ifdef weak_alias
1226weak_alias (free, cfree)
1227#else
1228void
1229cfree (ptr)
1230 __ptr_t ptr;
1231{
1232 free (ptr);
1233}
1234#endif
1235/* Change the size of a block allocated by `malloc'.
1236 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1237 Written May 1989 by Mike Haertel.
1238
1239This library is free software; you can redistribute it and/or
1240modify it under the terms of the GNU Library General Public License as
1241published by the Free Software Foundation; either version 2 of the
1242License, or (at your option) any later version.
1243
1244This library is distributed in the hope that it will be useful,
1245but WITHOUT ANY WARRANTY; without even the implied warranty of
1246MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1247Library General Public License for more details.
1248
1249You should have received a copy of the GNU Library General Public
1250License along with this library; see the file COPYING.LIB. If
1251not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1252Cambridge, MA 02139, USA.
1253
1254 The author may be reached (Email) at the address mike@ai.mit.edu,
1255 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1256
1257#ifndef _MALLOC_INTERNAL
1258#define _MALLOC_INTERNAL
1259#include <malloc.h>
1260#endif
1261
1262
1263
1264/* Cope with systems lacking `memmove'. */
1265#if (defined (MEMMOVE_MISSING) || \
1266 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1267
1268#ifdef emacs
1269#undef __malloc_safe_bcopy
1270#define __malloc_safe_bcopy safe_bcopy
1271#else
1272
1273/* Snarfed directly from Emacs src/dispnew.c:
1274 XXX Should use system bcopy if it handles overlap. */
1275
1276/* Like bcopy except never gets confused by overlap. */
1277
1278void
1279__malloc_safe_bcopy (afrom, ato, size)
1280 __ptr_t afrom;
1281 __ptr_t ato;
1282 __malloc_size_t size;
1283{
1284 char *from = afrom, *to = ato;
1285
1286 if (size <= 0 || from == to)
1287 return;
1288
1289 /* If the source and destination don't overlap, then bcopy can
1290 handle it. If they do overlap, but the destination is lower in
1291 memory than the source, we'll assume bcopy can handle that. */
1292 if (to < from || from + size <= to)
1293 bcopy (from, to, size);
1294
1295 /* Otherwise, we'll copy from the end. */
1296 else
1297 {
1298 register char *endf = from + size;
1299 register char *endt = to + size;
1300
1301 /* If TO - FROM is large, then we should break the copy into
1302 nonoverlapping chunks of TO - FROM bytes each. However, if
1303 TO - FROM is small, then the bcopy function call overhead
1304 makes this not worth it. The crossover point could be about
1305 anywhere. Since I don't think the obvious copy loop is too
1306 bad, I'm trying to err in its favor. */
1307 if (to - from < 64)
1308 {
1309 do
1310 *--endt = *--endf;
1311 while (endf != from);
1312 }
1313 else
1314 {
1315 for (;;)
1316 {
1317 endt -= (to - from);
1318 endf -= (to - from);
1319
1320 if (endt < to)
1321 break;
1322
1323 bcopy (endf, endt, to - from);
1324 }
1325
1326 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1327 little left over. The amount left over is
1328 (endt + (to - from)) - to, which is endt - from. */
1329 bcopy (from, to, endt - from);
1330 }
1331 }
1332}
1333#endif /* emacs */
1334
1335#ifndef memmove
0a27e8ed 1336extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
1337#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1338#endif
1339
1340#endif
1341
1342
1343#define min(A, B) ((A) < (B) ? (A) : (B))
1344
1345/* Debugging hook for realloc. */
0a27e8ed 1346__ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f
KH
1347
1348/* Resize the given region to the new size, returning a pointer
1349 to the (possibly moved) region. This is optimized for speed;
1350 some benchmarks seem to indicate that greater compactness is
1351 achieved by unconditionally allocating and copying to a
1352 new region. This module has incestuous knowledge of the
1353 internals of both free and malloc. */
1354__ptr_t
1355_realloc_internal (ptr, size)
1356 __ptr_t ptr;
1357 __malloc_size_t size;
1358{
1359 __ptr_t result;
1360 int type;
1361 __malloc_size_t block, blocks, oldlimit;
1362
1363 if (size == 0)
1364 {
1365 _free_internal (ptr);
1366 return _malloc_internal (0);
1367 }
1368 else if (ptr == NULL)
1369 return _malloc_internal (size);
1370
1371 block = BLOCK (ptr);
1372
5dcab13e
GM
1373 PROTECT_MALLOC_STATE (0);
1374
74ad5c7f
KH
1375 type = _heapinfo[block].busy.type;
1376 switch (type)
1377 {
1378 case 0:
1379 /* Maybe reallocate a large block to a small fragment. */
1380 if (size <= BLOCKSIZE / 2)
1381 {
1382 result = _malloc_internal (size);
1383 if (result != NULL)
1384 {
1385 memcpy (result, ptr, size);
1386 _free_internal (ptr);
1387 return result;
1388 }
1389 }
1390
1391 /* The new size is a large allocation as well;
1392 see if we can hold it in place. */
1393 blocks = BLOCKIFY (size);
1394 if (blocks < _heapinfo[block].busy.info.size)
1395 {
1396 /* The new size is smaller; return
1397 excess memory to the free list. */
1398 _heapinfo[block + blocks].busy.type = 0;
1399 _heapinfo[block + blocks].busy.info.size
1400 = _heapinfo[block].busy.info.size - blocks;
1401 _heapinfo[block].busy.info.size = blocks;
1402 /* We have just created a new chunk by splitting a chunk in two.
1403 Now we will free this chunk; increment the statistics counter
1404 so it doesn't become wrong when _free_internal decrements it. */
1405 ++_chunks_used;
1406 _free_internal (ADDRESS (block + blocks));
1407 result = ptr;
1408 }
1409 else if (blocks == _heapinfo[block].busy.info.size)
1410 /* No size change necessary. */
1411 result = ptr;
1412 else
1413 {
1414 /* Won't fit, so allocate a new region that will.
1415 Free the old region first in case there is sufficient
1416 adjacent free space to grow without moving. */
1417 blocks = _heapinfo[block].busy.info.size;
1418 /* Prevent free from actually returning memory to the system. */
1419 oldlimit = _heaplimit;
1420 _heaplimit = 0;
1421 _free_internal (ptr);
1422 result = _malloc_internal (size);
5dcab13e 1423 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1424 if (_heaplimit == 0)
1425 _heaplimit = oldlimit;
1426 if (result == NULL)
1427 {
1428 /* Now we're really in trouble. We have to unfree
1429 the thing we just freed. Unfortunately it might
1430 have been coalesced with its neighbors. */
1431 if (_heapindex == block)
1432 (void) _malloc_internal (blocks * BLOCKSIZE);
1433 else
1434 {
1435 __ptr_t previous
1436 = _malloc_internal ((block - _heapindex) * BLOCKSIZE);
1437 (void) _malloc_internal (blocks * BLOCKSIZE);
1438 _free_internal (previous);
1439 }
1440 return NULL;
1441 }
1442 if (ptr != result)
1443 memmove (result, ptr, blocks * BLOCKSIZE);
1444 }
1445 break;
1446
1447 default:
1448 /* Old size is a fragment; type is logarithm
1449 to base two of the fragment size. */
1450 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1451 size <= (__malloc_size_t) (1 << type))
1452 /* The new size is the same kind of fragment. */
1453 result = ptr;
1454 else
1455 {
1456 /* The new size is different; allocate a new space,
1457 and copy the lesser of the new size and the old. */
1458 result = _malloc_internal (size);
1459 if (result == NULL)
1460 return NULL;
1461 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1462 _free_internal (ptr);
1463 }
1464 break;
1465 }
1466
5dcab13e 1467 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
1468 return result;
1469}
1470
1471__ptr_t
1472realloc (ptr, size)
1473 __ptr_t ptr;
1474 __malloc_size_t size;
1475{
1476 if (!__malloc_initialized && !__malloc_initialize ())
1477 return NULL;
1478
1479 return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal)
1480 (ptr, size);
1481}
1482/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1483
1484This library is free software; you can redistribute it and/or
1485modify it under the terms of the GNU Library General Public License as
1486published by the Free Software Foundation; either version 2 of the
1487License, or (at your option) any later version.
1488
1489This library is distributed in the hope that it will be useful,
1490but WITHOUT ANY WARRANTY; without even the implied warranty of
1491MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1492Library General Public License for more details.
1493
1494You should have received a copy of the GNU Library General Public
1495License along with this library; see the file COPYING.LIB. If
1496not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1497Cambridge, MA 02139, USA.
1498
1499 The author may be reached (Email) at the address mike@ai.mit.edu,
1500 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1501
1502#ifndef _MALLOC_INTERNAL
1503#define _MALLOC_INTERNAL
1504#include <malloc.h>
1505#endif
1506
1507/* Allocate an array of NMEMB elements each SIZE bytes long.
1508 The entire array is initialized to zeros. */
1509__ptr_t
1510calloc (nmemb, size)
1511 register __malloc_size_t nmemb;
1512 register __malloc_size_t size;
1513{
1514 register __ptr_t result = malloc (nmemb * size);
1515
1516 if (result != NULL)
1517 (void) memset (result, 0, nmemb * size);
1518
1519 return result;
1520}
1521/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1522This file is part of the GNU C Library.
1523
1524The GNU C Library is free software; you can redistribute it and/or modify
1525it under the terms of the GNU General Public License as published by
1526the Free Software Foundation; either version 2, or (at your option)
1527any later version.
1528
1529The GNU C Library is distributed in the hope that it will be useful,
1530but WITHOUT ANY WARRANTY; without even the implied warranty of
1531MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1532GNU General Public License for more details.
1533
1534You should have received a copy of the GNU General Public License
1535along with the GNU C Library; see the file COPYING. If not, write to
1536the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
1537
1538#ifndef _MALLOC_INTERNAL
1539#define _MALLOC_INTERNAL
1540#include <malloc.h>
1541#endif
1542
1543#ifndef __GNU_LIBRARY__
1544#define __sbrk sbrk
1545#endif
1546
1547#ifdef __GNU_LIBRARY__
1548/* It is best not to declare this and cast its result on foreign operating
1549 systems with potentially hostile include files. */
1550
1551#include <stddef.h>
0a27e8ed 1552extern __ptr_t __sbrk PP ((ptrdiff_t increment));
74ad5c7f
KH
1553#endif
1554
1555#ifndef NULL
1556#define NULL 0
1557#endif
1558
1559/* Allocate INCREMENT more bytes of data space,
1560 and return the start of data space, or NULL on errors.
1561 If INCREMENT is negative, shrink data space. */
1562__ptr_t
1563__default_morecore (increment)
1564 __malloc_ptrdiff_t increment;
1565{
1566 __ptr_t result = (__ptr_t) __sbrk (increment);
1567 if (result == (__ptr_t) -1)
1568 return NULL;
1569 return result;
1570}
1571/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1572
1573This library is free software; you can redistribute it and/or
1574modify it under the terms of the GNU Library General Public License as
1575published by the Free Software Foundation; either version 2 of the
1576License, or (at your option) any later version.
1577
1578This library is distributed in the hope that it will be useful,
1579but WITHOUT ANY WARRANTY; without even the implied warranty of
1580MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1581Library General Public License for more details.
1582
1583You should have received a copy of the GNU Library General Public
1584License along with this library; see the file COPYING.LIB. If
1585not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1586Cambridge, MA 02139, USA. */
1587
1588#ifndef _MALLOC_INTERNAL
1589#define _MALLOC_INTERNAL
1590#include <malloc.h>
1591#endif
1592
1593#if __DJGPP__ - 0 == 1
1594
1595/* There is some problem with memalign in DJGPP v1 and we are supposed
1596 to omit it. Noone told me why, they just told me to do it. */
1597
1598#else
1599
0a27e8ed 1600__ptr_t (*__memalign_hook) PP ((size_t __size, size_t __alignment));
74ad5c7f
KH
1601
1602__ptr_t
1603memalign (alignment, size)
1604 __malloc_size_t alignment;
1605 __malloc_size_t size;
1606{
1607 __ptr_t result;
1608 unsigned long int adj, lastadj;
1609
1610 if (__memalign_hook)
1611 return (*__memalign_hook) (alignment, size);
1612
1613 /* Allocate a block with enough extra space to pad the block with up to
1614 (ALIGNMENT - 1) bytes if necessary. */
1615 result = malloc (size + alignment - 1);
1616 if (result == NULL)
1617 return NULL;
1618
1619 /* Figure out how much we will need to pad this particular block
1620 to achieve the required alignment. */
1621 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1622
1623 do
1624 {
1625 /* Reallocate the block with only as much excess as it needs. */
1626 free (result);
1627 result = malloc (adj + size);
1628 if (result == NULL) /* Impossible unless interrupted. */
1629 return NULL;
1630
1631 lastadj = adj;
1632 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1633 /* It's conceivable we might have been so unlucky as to get a
1634 different block with weaker alignment. If so, this block is too
1635 short to contain SIZE after alignment correction. So we must
1636 try again and get another block, slightly larger. */
1637 } while (adj > lastadj);
1638
1639 if (adj != 0)
1640 {
1641 /* Record this block in the list of aligned blocks, so that `free'
1642 can identify the pointer it is passed, which will be in the middle
1643 of an allocated block. */
1644
1645 struct alignlist *l;
1646 for (l = _aligned_blocks; l != NULL; l = l->next)
1647 if (l->aligned == NULL)
1648 /* This slot is free. Use it. */
1649 break;
1650 if (l == NULL)
1651 {
1652 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1653 if (l == NULL)
1654 {
1655 free (result);
1656 return NULL;
1657 }
1658 l->next = _aligned_blocks;
1659 _aligned_blocks = l;
1660 }
1661 l->exact = result;
1662 result = l->aligned = (char *) result + alignment - adj;
1663 }
1664
1665 return result;
1666}
1667
1668#endif /* Not DJGPP v1 */
1669/* Allocate memory on a page boundary.
1670 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1671
1672This library is free software; you can redistribute it and/or
1673modify it under the terms of the GNU Library General Public License as
1674published by the Free Software Foundation; either version 2 of the
1675License, or (at your option) any later version.
1676
1677This library is distributed in the hope that it will be useful,
1678but WITHOUT ANY WARRANTY; without even the implied warranty of
1679MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1680Library General Public License for more details.
1681
1682You should have received a copy of the GNU Library General Public
1683License along with this library; see the file COPYING.LIB. If
1684not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1685Cambridge, MA 02139, USA.
1686
1687 The author may be reached (Email) at the address mike@ai.mit.edu,
1688 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1689
1690#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1691
1692/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1693 on MSDOS, where it conflicts with a system header file. */
1694
1695#define ELIDE_VALLOC
1696
1697#endif
1698
1699#ifndef ELIDE_VALLOC
1700
1701#if defined (__GNU_LIBRARY__) || defined (_LIBC)
1702#include <stddef.h>
1703#include <sys/cdefs.h>
47582ab3
KH
1704#if defined (__GLIBC__) && __GLIBC__ >= 2
1705/* __getpagesize is already declared in <unistd.h> with return type int */
1706#else
0a27e8ed 1707extern size_t __getpagesize PP ((void));
47582ab3 1708#endif
74ad5c7f
KH
1709#else
1710#include "getpagesize.h"
1711#define __getpagesize() getpagesize()
1712#endif
1713
1714#ifndef _MALLOC_INTERNAL
1715#define _MALLOC_INTERNAL
1716#include <malloc.h>
1717#endif
1718
1719static __malloc_size_t pagesize;
1720
1721__ptr_t
1722valloc (size)
1723 __malloc_size_t size;
1724{
1725 if (pagesize == 0)
1726 pagesize = __getpagesize ();
1727
1728 return memalign (pagesize, size);
1729}
1730
1731#endif /* Not ELIDE_VALLOC. */
a3ba27da
GM
1732
1733#ifdef GC_MCHECK
1734
1735/* Standard debugging hooks for `malloc'.
1736 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1737 Written May 1989 by Mike Haertel.
1738
1739This library is free software; you can redistribute it and/or
1740modify it under the terms of the GNU Library General Public License as
1741published by the Free Software Foundation; either version 2 of the
1742License, or (at your option) any later version.
1743
1744This library is distributed in the hope that it will be useful,
1745but WITHOUT ANY WARRANTY; without even the implied warranty of
1746MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1747Library General Public License for more details.
1748
1749You should have received a copy of the GNU Library General Public
1750License along with this library; see the file COPYING.LIB. If
1751not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1752Cambridge, MA 02139, USA.
1753
1754 The author may be reached (Email) at the address mike@ai.mit.edu,
1755 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1756
1757#ifdef emacs
1758#include <stdio.h>
1759#else
1760#ifndef _MALLOC_INTERNAL
1761#define _MALLOC_INTERNAL
1762#include <malloc.h>
1763#include <stdio.h>
1764#endif
1765#endif
1766
1767/* Old hook values. */
1768static void (*old_free_hook) __P ((__ptr_t ptr));
1769static __ptr_t (*old_malloc_hook) __P ((__malloc_size_t size));
1770static __ptr_t (*old_realloc_hook) __P ((__ptr_t ptr, __malloc_size_t size));
1771
1772/* Function to call when something awful happens. */
1773static void (*abortfunc) __P ((enum mcheck_status));
1774
1775/* Arbitrary magical numbers. */
1776#define MAGICWORD 0xfedabeeb
1777#define MAGICFREE 0xd8675309
1778#define MAGICBYTE ((char) 0xd7)
1779#define MALLOCFLOOD ((char) 0x93)
1780#define FREEFLOOD ((char) 0x95)
1781
1782struct hdr
1783 {
1784 __malloc_size_t size; /* Exact size requested by user. */
1785 unsigned long int magic; /* Magic number to check header integrity. */
1786 };
1787
1788#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1789#define flood memset
1790#else
1791static void flood __P ((__ptr_t, int, __malloc_size_t));
1792static void
1793flood (ptr, val, size)
1794 __ptr_t ptr;
1795 int val;
1796 __malloc_size_t size;
1797{
1798 char *cp = ptr;
1799 while (size--)
1800 *cp++ = val;
1801}
1802#endif
1803
1804static enum mcheck_status checkhdr __P ((const struct hdr *));
1805static enum mcheck_status
1806checkhdr (hdr)
1807 const struct hdr *hdr;
1808{
1809 enum mcheck_status status;
1810 switch (hdr->magic)
1811 {
1812 default:
1813 status = MCHECK_HEAD;
1814 break;
1815 case MAGICFREE:
1816 status = MCHECK_FREE;
1817 break;
1818 case MAGICWORD:
1819 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1820 status = MCHECK_TAIL;
1821 else
1822 status = MCHECK_OK;
1823 break;
1824 }
1825 if (status != MCHECK_OK)
1826 (*abortfunc) (status);
1827 return status;
1828}
1829
1830static void freehook __P ((__ptr_t));
1831static void
1832freehook (ptr)
1833 __ptr_t ptr;
1834{
1835 struct hdr *hdr;
1836
1837 if (ptr)
1838 {
1839 hdr = ((struct hdr *) ptr) - 1;
1840 checkhdr (hdr);
1841 hdr->magic = MAGICFREE;
1842 flood (ptr, FREEFLOOD, hdr->size);
1843 }
1844 else
1845 hdr = NULL;
1846
1847 __free_hook = old_free_hook;
1848 free (hdr);
1849 __free_hook = freehook;
1850}
1851
1852static __ptr_t mallochook __P ((__malloc_size_t));
1853static __ptr_t
1854mallochook (size)
1855 __malloc_size_t size;
1856{
1857 struct hdr *hdr;
1858
1859 __malloc_hook = old_malloc_hook;
1860 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
1861 __malloc_hook = mallochook;
1862 if (hdr == NULL)
1863 return NULL;
1864
1865 hdr->size = size;
1866 hdr->magic = MAGICWORD;
1867 ((char *) &hdr[1])[size] = MAGICBYTE;
1868 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
1869 return (__ptr_t) (hdr + 1);
1870}
1871
1872static __ptr_t reallochook __P ((__ptr_t, __malloc_size_t));
1873static __ptr_t
1874reallochook (ptr, size)
1875 __ptr_t ptr;
1876 __malloc_size_t size;
1877{
1878 struct hdr *hdr = NULL;
1879 __malloc_size_t osize = 0;
1880
1881 if (ptr)
1882 {
1883 hdr = ((struct hdr *) ptr) - 1;
1884 osize = hdr->size;
1885
1886 checkhdr (hdr);
1887 if (size < osize)
1888 flood ((char *) ptr + size, FREEFLOOD, osize - size);
1889 }
1890
1891 __free_hook = old_free_hook;
1892 __malloc_hook = old_malloc_hook;
1893 __realloc_hook = old_realloc_hook;
1894 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
1895 __free_hook = freehook;
1896 __malloc_hook = mallochook;
1897 __realloc_hook = reallochook;
1898 if (hdr == NULL)
1899 return NULL;
1900
1901 hdr->size = size;
1902 hdr->magic = MAGICWORD;
1903 ((char *) &hdr[1])[size] = MAGICBYTE;
1904 if (size > osize)
1905 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1906 return (__ptr_t) (hdr + 1);
1907}
1908
1909static void
1910mabort (status)
1911 enum mcheck_status status;
1912{
1913 const char *msg;
1914 switch (status)
1915 {
1916 case MCHECK_OK:
1917 msg = "memory is consistent, library is buggy";
1918 break;
1919 case MCHECK_HEAD:
1920 msg = "memory clobbered before allocated block";
1921 break;
1922 case MCHECK_TAIL:
1923 msg = "memory clobbered past end of allocated block";
1924 break;
1925 case MCHECK_FREE:
1926 msg = "block freed twice";
1927 break;
1928 default:
1929 msg = "bogus mcheck_status, library is buggy";
1930 break;
1931 }
1932#ifdef __GNU_LIBRARY__
1933 __libc_fatal (msg);
1934#else
1935 fprintf (stderr, "mcheck: %s\n", msg);
1936 fflush (stderr);
1937 abort ();
1938#endif
1939}
1940
1941static int mcheck_used = 0;
1942
1943int
1944mcheck (func)
1945 void (*func) __P ((enum mcheck_status));
1946{
1947 abortfunc = (func != NULL) ? func : &mabort;
1948
1949 /* These hooks may not be safely inserted if malloc is already in use. */
1950 if (!__malloc_initialized && !mcheck_used)
1951 {
1952 old_free_hook = __free_hook;
1953 __free_hook = freehook;
1954 old_malloc_hook = __malloc_hook;
1955 __malloc_hook = mallochook;
1956 old_realloc_hook = __realloc_hook;
1957 __realloc_hook = reallochook;
1958 mcheck_used = 1;
1959 }
1960
1961 return mcheck_used ? 0 : -1;
1962}
1963
1964enum mcheck_status
1965mprobe (__ptr_t ptr)
1966{
1967 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1968}
1969
1970#endif /* GC_MCHECK */