Upgraded to mh-e version 6.1.1.
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f
KH
1/* This file is no longer automatically generated from libc. */
2
3#define _MALLOC_INTERNAL
4
5/* The malloc headers and source files from the C library follow here. */
6
7/* Declarations for `malloc' and friends.
68c45bf0 8 Copyright 1990, 91, 92, 93, 95, 96, 99 Free Software Foundation, Inc.
74ad5c7f
KH
9 Written May 1989 by Mike Haertel.
10
11This library is free software; you can redistribute it and/or
12modify it under the terms of the GNU Library General Public License as
13published by the Free Software Foundation; either version 2 of the
14License, or (at your option) any later version.
15
16This library is distributed in the hope that it will be useful,
17but WITHOUT ANY WARRANTY; without even the implied warranty of
18MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19Library General Public License for more details.
20
21You should have received a copy of the GNU Library General Public
22License along with this library; see the file COPYING.LIB. If
23not, write to the Free Software Foundation, Inc., 675 Mass Ave,
24Cambridge, MA 02139, USA.
25
26 The author may be reached (Email) at the address mike@ai.mit.edu,
27 or (US mail) as Mike Haertel c/o Free Software Foundation. */
28
29#ifndef _MALLOC_H
30
31#define _MALLOC_H 1
32
33#ifdef _MALLOC_INTERNAL
34
35#ifdef HAVE_CONFIG_H
36#include <config.h>
37#endif
38
b2e92d3e
RS
39#if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
40 || defined STDC_HEADERS || defined PROTOTYPES) \
41 && ! defined (BROKEN_PROTOTYPES))
0a27e8ed
RS
42#undef PP
43#define PP(args) args
74ad5c7f
KH
44#undef __ptr_t
45#define __ptr_t void *
46#else /* Not C++ or ANSI C. */
0a27e8ed
RS
47#undef PP
48#define PP(args) ()
74ad5c7f
KH
49#undef __ptr_t
50#define __ptr_t char *
51#endif /* C++ or ANSI C. */
52
53#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
54#include <string.h>
55#else
56#ifndef memset
57#define memset(s, zero, n) bzero ((s), (n))
58#endif
59#ifndef memcpy
60#define memcpy(d, s, n) bcopy ((s), (d), (n))
61#endif
62#endif
63
ca9c0567 64#ifdef HAVE_LIMITS_H
74ad5c7f 65#include <limits.h>
ca9c0567 66#endif
74ad5c7f
KH
67#ifndef CHAR_BIT
68#define CHAR_BIT 8
69#endif
74ad5c7f
KH
70
71#ifdef HAVE_UNISTD_H
72#include <unistd.h>
73#endif
74
75#endif /* _MALLOC_INTERNAL. */
76
77
78#ifdef __cplusplus
79extern "C"
80{
81#endif
82
ca9c0567 83#ifdef STDC_HEADERS
74ad5c7f
KH
84#include <stddef.h>
85#define __malloc_size_t size_t
86#define __malloc_ptrdiff_t ptrdiff_t
87#else
eec2d1de
EZ
88#ifdef __GNUC__
89#include <stddef.h>
90#ifdef __SIZE_TYPE__
91#define __malloc_size_t __SIZE_TYPE__
92#endif
93#endif
94#ifndef __malloc_size_t
74ad5c7f 95#define __malloc_size_t unsigned int
eec2d1de 96#endif
74ad5c7f
KH
97#define __malloc_ptrdiff_t int
98#endif
99
100#ifndef NULL
101#define NULL 0
102#endif
103
ca9c0567
PE
104#ifndef FREE_RETURN_TYPE
105#define FREE_RETURN_TYPE void
106#endif
107
74ad5c7f
KH
108
109/* Allocate SIZE bytes of memory. */
0a27e8ed 110extern __ptr_t malloc PP ((__malloc_size_t __size));
74ad5c7f
KH
111/* Re-allocate the previously allocated block
112 in __ptr_t, making the new block SIZE bytes long. */
0a27e8ed 113extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f 114/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
0a27e8ed 115extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
74ad5c7f 116/* Free a block allocated by `malloc', `realloc' or `calloc'. */
ca9c0567 117extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr));
74ad5c7f
KH
118
119/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
120#if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
0a27e8ed
RS
121extern __ptr_t memalign PP ((__malloc_size_t __alignment,
122 __malloc_size_t __size));
74ad5c7f
KH
123#endif
124
125/* Allocate SIZE bytes on a page boundary. */
126#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
0a27e8ed 127extern __ptr_t valloc PP ((__malloc_size_t __size));
74ad5c7f
KH
128#endif
129
130
131#ifdef _MALLOC_INTERNAL
132
133/* The allocator divides the heap into blocks of fixed size; large
134 requests receive one or more whole blocks, and small requests
135 receive a fragment of a block. Fragment sizes are powers of two,
136 and all fragments of a block are the same size. When all the
137 fragments in a block have been freed, the block itself is freed. */
138#define INT_BIT (CHAR_BIT * sizeof(int))
139#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
140#define BLOCKSIZE (1 << BLOCKLOG)
141#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
142
143/* Determine the amount of memory spanned by the initial heap table
144 (not an absolute limit). */
145#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
146
147/* Number of contiguous free blocks allowed to build up at the end of
148 memory before they will be returned to the system. */
149#define FINAL_FREE_BLOCKS 8
150
151/* Data structure giving per-block information. */
152typedef union
153 {
154 /* Heap information for a busy block. */
155 struct
156 {
157 /* Zero for a large (multiblock) object, or positive giving the
158 logarithm to the base two of the fragment size. */
159 int type;
160 union
161 {
162 struct
163 {
164 __malloc_size_t nfree; /* Free frags in a fragmented block. */
165 __malloc_size_t first; /* First free fragment of the block. */
166 } frag;
167 /* For a large object, in its first block, this has the number
168 of blocks in the object. In the other blocks, this has a
169 negative number which says how far back the first block is. */
170 __malloc_ptrdiff_t size;
171 } info;
172 } busy;
173 /* Heap information for a free block
174 (that may be the first of a free cluster). */
175 struct
176 {
177 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
178 __malloc_size_t next; /* Index of next free cluster. */
179 __malloc_size_t prev; /* Index of previous free cluster. */
180 } free;
181 } malloc_info;
182
183/* Pointer to first block of the heap. */
184extern char *_heapbase;
185
186/* Table indexed by block number giving per-block information. */
187extern malloc_info *_heapinfo;
188
189/* Address to block number and vice versa. */
190#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
191#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
192
193/* Current search index for the heap table. */
194extern __malloc_size_t _heapindex;
195
196/* Limit of valid info table indices. */
197extern __malloc_size_t _heaplimit;
198
199/* Doubly linked lists of free fragments. */
200struct list
201 {
202 struct list *next;
203 struct list *prev;
204 };
205
206/* Free list headers for each fragment size. */
207extern struct list _fraghead[];
208
209/* List of blocks allocated with `memalign' (or `valloc'). */
210struct alignlist
211 {
212 struct alignlist *next;
213 __ptr_t aligned; /* The address that memaligned returned. */
214 __ptr_t exact; /* The address that malloc returned. */
215 };
216extern struct alignlist *_aligned_blocks;
217
218/* Instrumentation. */
219extern __malloc_size_t _chunks_used;
220extern __malloc_size_t _bytes_used;
221extern __malloc_size_t _chunks_free;
222extern __malloc_size_t _bytes_free;
223
224/* Internal versions of `malloc', `realloc', and `free'
225 used when these functions need to call each other.
226 They are the same but don't call the hooks. */
0a27e8ed
RS
227extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
228extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
229extern void _free_internal PP ((__ptr_t __ptr));
74ad5c7f
KH
230
231#endif /* _MALLOC_INTERNAL. */
232
233/* Given an address in the middle of a malloc'd object,
234 return the address of the beginning of the object. */
0a27e8ed 235extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
74ad5c7f
KH
236
237/* Underlying allocation function; successive calls should
238 return contiguous pieces of memory. */
0a27e8ed 239extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
240
241/* Default value of `__morecore'. */
0a27e8ed 242extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
243
244/* If not NULL, this function is called after each time
245 `__morecore' is called to increase the data size. */
0a27e8ed 246extern void (*__after_morecore_hook) PP ((void));
74ad5c7f
KH
247
248/* Number of extra blocks to get each time we ask for more core.
249 This reduces the frequency of calling `(*__morecore)'. */
250extern __malloc_size_t __malloc_extra_blocks;
251
252/* Nonzero if `malloc' has been called and done its initialization. */
253extern int __malloc_initialized;
254/* Function called to initialize malloc data structures. */
0a27e8ed 255extern int __malloc_initialize PP ((void));
74ad5c7f
KH
256
257/* Hooks for debugging versions. */
0a27e8ed
RS
258extern void (*__malloc_initialize_hook) PP ((void));
259extern void (*__free_hook) PP ((__ptr_t __ptr));
260extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
261extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
262extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
263 __malloc_size_t __alignment));
74ad5c7f
KH
264
265/* Return values for `mprobe': these are the kinds of inconsistencies that
266 `mcheck' enables detection of. */
267enum mcheck_status
268 {
269 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
270 MCHECK_OK, /* Block is fine. */
271 MCHECK_FREE, /* Block freed twice. */
272 MCHECK_HEAD, /* Memory before the block was clobbered. */
273 MCHECK_TAIL /* Memory after the block was clobbered. */
274 };
275
276/* Activate a standard collection of debugging hooks. This must be called
277 before `malloc' is ever called. ABORTFUNC is called with an error code
278 (see enum above) when an inconsistency is detected. If ABORTFUNC is
279 null, the standard function prints on stderr and then calls `abort'. */
0a27e8ed 280extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
74ad5c7f
KH
281
282/* Check for aberrations in a particular malloc'd block. You must have
283 called `mcheck' already. These are the same checks that `mcheck' does
284 when you free or reallocate a block. */
0a27e8ed 285extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
74ad5c7f
KH
286
287/* Activate a standard collection of tracing hooks. */
0a27e8ed
RS
288extern void mtrace PP ((void));
289extern void muntrace PP ((void));
74ad5c7f
KH
290
291/* Statistics available to the user. */
292struct mstats
293 {
294 __malloc_size_t bytes_total; /* Total size of the heap. */
295 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
296 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
297 __malloc_size_t chunks_free; /* Chunks in the free list. */
298 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
299 };
300
301/* Pick up the current statistics. */
0a27e8ed 302extern struct mstats mstats PP ((void));
74ad5c7f
KH
303
304/* Call WARNFUN with a warning message when memory usage is high. */
0a27e8ed
RS
305extern void memory_warnings PP ((__ptr_t __start,
306 void (*__warnfun) PP ((const char *))));
74ad5c7f
KH
307
308
309/* Relocating allocator. */
310
311/* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
0a27e8ed 312extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
313
314/* Free the storage allocated in HANDLEPTR. */
0a27e8ed 315extern void r_alloc_free PP ((__ptr_t *__handleptr));
74ad5c7f
KH
316
317/* Adjust the block at HANDLEPTR to be SIZE bytes long. */
0a27e8ed 318extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
319
320
321#ifdef __cplusplus
322}
323#endif
324
325#endif /* malloc.h */
326/* Memory allocator `malloc'.
327 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
328 Written May 1989 by Mike Haertel.
329
330This library is free software; you can redistribute it and/or
331modify it under the terms of the GNU Library General Public License as
332published by the Free Software Foundation; either version 2 of the
333License, or (at your option) any later version.
334
335This library is distributed in the hope that it will be useful,
336but WITHOUT ANY WARRANTY; without even the implied warranty of
337MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
338Library General Public License for more details.
339
340You should have received a copy of the GNU Library General Public
341License along with this library; see the file COPYING.LIB. If
342not, write to the Free Software Foundation, Inc., 675 Mass Ave,
343Cambridge, MA 02139, USA.
344
345 The author may be reached (Email) at the address mike@ai.mit.edu,
346 or (US mail) as Mike Haertel c/o Free Software Foundation. */
347
348#ifndef _MALLOC_INTERNAL
349#define _MALLOC_INTERNAL
350#include <malloc.h>
351#endif
352#include <errno.h>
353
354/* How to really get more memory. */
0a27e8ed 355__ptr_t (*__morecore) PP ((ptrdiff_t __size)) = __default_morecore;
74ad5c7f
KH
356
357/* Debugging hook for `malloc'. */
0a27e8ed 358__ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
74ad5c7f
KH
359
360/* Pointer to the base of the first block. */
361char *_heapbase;
362
363/* Block information table. Allocated with align/__free (not malloc/free). */
364malloc_info *_heapinfo;
365
366/* Number of info entries. */
367static __malloc_size_t heapsize;
368
369/* Search index in the info table. */
370__malloc_size_t _heapindex;
371
372/* Limit of valid info table indices. */
373__malloc_size_t _heaplimit;
374
375/* Free lists for each fragment size. */
376struct list _fraghead[BLOCKLOG];
377
378/* Instrumentation. */
379__malloc_size_t _chunks_used;
380__malloc_size_t _bytes_used;
381__malloc_size_t _chunks_free;
382__malloc_size_t _bytes_free;
383
384/* Are you experienced? */
385int __malloc_initialized;
386
387__malloc_size_t __malloc_extra_blocks;
388
0a27e8ed
RS
389void (*__malloc_initialize_hook) PP ((void));
390void (*__after_morecore_hook) PP ((void));
74ad5c7f 391
5dcab13e
GM
392#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
393
394/* Some code for hunting a bug writing into _heapinfo.
395
396 Call this macro with argument PROT non-zero to protect internal
397 malloc state against writing to it, call it with a zero argument to
398 make it readable and writable.
399
400 Note that this only works if BLOCKSIZE == page size, which is
401 the case on the i386. */
402
403#include <sys/types.h>
404#include <sys/mman.h>
405
406static int state_protected_p;
407static __malloc_size_t last_state_size;
408static malloc_info *last_heapinfo;
409
410void
411protect_malloc_state (protect_p)
412 int protect_p;
413{
414 /* If _heapinfo has been relocated, make sure its old location
415 isn't left read-only; it will be reused by malloc. */
416 if (_heapinfo != last_heapinfo
417 && last_heapinfo
418 && state_protected_p)
419 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
420
421 last_state_size = _heaplimit * sizeof *_heapinfo;
422 last_heapinfo = _heapinfo;
423
424 if (protect_p != state_protected_p)
425 {
426 state_protected_p = protect_p;
427 if (mprotect (_heapinfo, last_state_size,
428 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
429 abort ();
430 }
431}
432
433#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
434
435#else
436#define PROTECT_MALLOC_STATE(PROT) /* empty */
437#endif
438
74ad5c7f
KH
439
440/* Aligned allocation. */
0a27e8ed 441static __ptr_t align PP ((__malloc_size_t));
74ad5c7f
KH
442static __ptr_t
443align (size)
444 __malloc_size_t size;
445{
446 __ptr_t result;
447 unsigned long int adj;
448
ceeb3d7d
EZ
449 /* align accepts an unsigned argument, but __morecore accepts a
450 signed one. This could lead to trouble if SIZE overflows a
451 signed int type accepted by __morecore. We just punt in that
452 case, since they are requesting a ludicrous amount anyway. */
453 if ((__malloc_ptrdiff_t)size < 0)
454 result = 0;
455 else
456 result = (*__morecore) (size);
74ad5c7f
KH
457 adj = (unsigned long int) ((unsigned long int) ((char *) result -
458 (char *) NULL)) % BLOCKSIZE;
459 if (adj != 0)
460 {
461 __ptr_t new;
462 adj = BLOCKSIZE - adj;
463 new = (*__morecore) (adj);
464 result = (char *) result + adj;
465 }
466
467 if (__after_morecore_hook)
468 (*__after_morecore_hook) ();
469
470 return result;
471}
472
473/* Get SIZE bytes, if we can get them starting at END.
474 Return the address of the space we got.
475 If we cannot get space at END, fail and return 0. */
0a27e8ed 476static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
74ad5c7f
KH
477static __ptr_t
478get_contiguous_space (size, position)
479 __malloc_ptrdiff_t size;
480 __ptr_t position;
481{
482 __ptr_t before;
483 __ptr_t after;
484
485 before = (*__morecore) (0);
486 /* If we can tell in advance that the break is at the wrong place,
487 fail now. */
488 if (before != position)
489 return 0;
490
491 /* Allocate SIZE bytes and get the address of them. */
492 after = (*__morecore) (size);
493 if (!after)
494 return 0;
495
496 /* It was not contiguous--reject it. */
497 if (after != position)
498 {
499 (*__morecore) (- size);
500 return 0;
501 }
502
503 return after;
504}
505
506
507/* This is called when `_heapinfo' and `heapsize' have just
508 been set to describe a new info table. Set up the table
509 to describe itself and account for it in the statistics. */
0a27e8ed 510static void register_heapinfo PP ((void));
74ad5c7f
KH
511#ifdef __GNUC__
512__inline__
513#endif
514static void
515register_heapinfo ()
516{
517 __malloc_size_t block, blocks;
518
519 block = BLOCK (_heapinfo);
520 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
521
522 /* Account for the _heapinfo block itself in the statistics. */
523 _bytes_used += blocks * BLOCKSIZE;
524 ++_chunks_used;
525
526 /* Describe the heapinfo block itself in the heapinfo. */
527 _heapinfo[block].busy.type = 0;
528 _heapinfo[block].busy.info.size = blocks;
529 /* Leave back-pointers for malloc_find_address. */
530 while (--blocks > 0)
531 _heapinfo[block + blocks].busy.info.size = -blocks;
532}
533
534/* Set everything up and remember that we have. */
535int
536__malloc_initialize ()
537{
538 if (__malloc_initialized)
539 return 0;
540
a3ba27da
GM
541#ifdef GC_MCHECK
542 mcheck (NULL);
543#endif
544
74ad5c7f
KH
545 if (__malloc_initialize_hook)
546 (*__malloc_initialize_hook) ();
547
548 heapsize = HEAP / BLOCKSIZE;
549 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
550 if (_heapinfo == NULL)
551 return 0;
552 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
553 _heapinfo[0].free.size = 0;
554 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
555 _heapindex = 0;
556 _heapbase = (char *) _heapinfo;
557 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
558
559 register_heapinfo ();
560
561 __malloc_initialized = 1;
5dcab13e 562 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
563 return 1;
564}
565
566static int morecore_recursing;
567
568/* Get neatly aligned memory, initializing or
569 growing the heap info table as necessary. */
0a27e8ed 570static __ptr_t morecore PP ((__malloc_size_t));
74ad5c7f
KH
571static __ptr_t
572morecore (size)
573 __malloc_size_t size;
574{
575 __ptr_t result;
576 malloc_info *newinfo, *oldinfo;
577 __malloc_size_t newsize;
578
579 if (morecore_recursing)
580 /* Avoid recursion. The caller will know how to handle a null return. */
581 return NULL;
582
583 result = align (size);
584 if (result == NULL)
585 return NULL;
586
5dcab13e
GM
587 PROTECT_MALLOC_STATE (0);
588
74ad5c7f
KH
589 /* Check if we need to grow the info table. */
590 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
591 {
592 /* Calculate the new _heapinfo table size. We do not account for the
593 added blocks in the table itself, as we hope to place them in
594 existing free space, which is already covered by part of the
595 existing table. */
596 newsize = heapsize;
597 do
598 newsize *= 2;
599 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
600
601 /* We must not reuse existing core for the new info table when called
602 from realloc in the case of growing a large block, because the
603 block being grown is momentarily marked as free. In this case
604 _heaplimit is zero so we know not to reuse space for internal
605 allocation. */
606 if (_heaplimit != 0)
607 {
608 /* First try to allocate the new info table in core we already
609 have, in the usual way using realloc. If realloc cannot
610 extend it in place or relocate it to existing sufficient core,
611 we will get called again, and the code above will notice the
612 `morecore_recursing' flag and return null. */
613 int save = errno; /* Don't want to clobber errno with ENOMEM. */
614 morecore_recursing = 1;
615 newinfo = (malloc_info *) _realloc_internal
616 (_heapinfo, newsize * sizeof (malloc_info));
617 morecore_recursing = 0;
618 if (newinfo == NULL)
619 errno = save;
620 else
621 {
622 /* We found some space in core, and realloc has put the old
623 table's blocks on the free list. Now zero the new part
624 of the table and install the new table location. */
625 memset (&newinfo[heapsize], 0,
626 (newsize - heapsize) * sizeof (malloc_info));
627 _heapinfo = newinfo;
628 heapsize = newsize;
629 goto got_heap;
630 }
631 }
632
633 /* Allocate new space for the malloc info table. */
634 while (1)
635 {
636 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
637
638 /* Did it fail? */
639 if (newinfo == NULL)
640 {
641 (*__morecore) (-size);
642 return NULL;
643 }
644
645 /* Is it big enough to record status for its own space?
646 If so, we win. */
647 if ((__malloc_size_t) BLOCK ((char *) newinfo
648 + newsize * sizeof (malloc_info))
649 < newsize)
650 break;
651
652 /* Must try again. First give back most of what we just got. */
653 (*__morecore) (- newsize * sizeof (malloc_info));
654 newsize *= 2;
655 }
656
657 /* Copy the old table to the beginning of the new,
658 and zero the rest of the new table. */
659 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
660 memset (&newinfo[heapsize], 0,
661 (newsize - heapsize) * sizeof (malloc_info));
662 oldinfo = _heapinfo;
663 _heapinfo = newinfo;
664 heapsize = newsize;
665
666 register_heapinfo ();
667
668 /* Reset _heaplimit so _free_internal never decides
669 it can relocate or resize the info table. */
670 _heaplimit = 0;
671 _free_internal (oldinfo);
5dcab13e 672 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
673
674 /* The new heap limit includes the new table just allocated. */
675 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
676 return result;
677 }
678
679 got_heap:
680 _heaplimit = BLOCK ((char *) result + size);
681 return result;
682}
683
684/* Allocate memory from the heap. */
685__ptr_t
686_malloc_internal (size)
687 __malloc_size_t size;
688{
689 __ptr_t result;
690 __malloc_size_t block, blocks, lastblocks, start;
691 register __malloc_size_t i;
692 struct list *next;
693
694 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
695 valid address you can realloc and free (though not dereference).
696
697 It turns out that some extant code (sunrpc, at least Ultrix's version)
698 expects `malloc (0)' to return non-NULL and breaks otherwise.
699 Be compatible. */
700
701#if 0
702 if (size == 0)
703 return NULL;
704#endif
705
5dcab13e
GM
706 PROTECT_MALLOC_STATE (0);
707
74ad5c7f
KH
708 if (size < sizeof (struct list))
709 size = sizeof (struct list);
710
711#ifdef SUNOS_LOCALTIME_BUG
712 if (size < 16)
713 size = 16;
714#endif
715
716 /* Determine the allocation policy based on the request size. */
717 if (size <= BLOCKSIZE / 2)
718 {
719 /* Small allocation to receive a fragment of a block.
720 Determine the logarithm to base two of the fragment size. */
721 register __malloc_size_t log = 1;
722 --size;
723 while ((size /= 2) != 0)
724 ++log;
725
726 /* Look in the fragment lists for a
727 free fragment of the desired size. */
728 next = _fraghead[log].next;
729 if (next != NULL)
730 {
731 /* There are free fragments of this size.
732 Pop a fragment out of the fragment list and return it.
733 Update the block's nfree and first counters. */
734 result = (__ptr_t) next;
735 next->prev->next = next->next;
736 if (next->next != NULL)
737 next->next->prev = next->prev;
738 block = BLOCK (result);
739 if (--_heapinfo[block].busy.info.frag.nfree != 0)
740 _heapinfo[block].busy.info.frag.first = (unsigned long int)
741 ((unsigned long int) ((char *) next->next - (char *) NULL)
742 % BLOCKSIZE) >> log;
743
744 /* Update the statistics. */
745 ++_chunks_used;
746 _bytes_used += 1 << log;
747 --_chunks_free;
748 _bytes_free -= 1 << log;
749 }
750 else
751 {
752 /* No free fragments of the desired size, so get a new block
753 and break it into fragments, returning the first. */
8094989b
GM
754#ifdef GC_MALLOC_CHECK
755 result = _malloc_internal (BLOCKSIZE);
5dcab13e 756 PROTECT_MALLOC_STATE (0);
8094989b 757#else
74ad5c7f 758 result = malloc (BLOCKSIZE);
8094989b 759#endif
74ad5c7f 760 if (result == NULL)
5dcab13e
GM
761 {
762 PROTECT_MALLOC_STATE (1);
763 return NULL;
764 }
74ad5c7f
KH
765
766 /* Link all fragments but the first into the free list. */
767 next = (struct list *) ((char *) result + (1 << log));
768 next->next = NULL;
769 next->prev = &_fraghead[log];
770 _fraghead[log].next = next;
771
772 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
773 {
774 next = (struct list *) ((char *) result + (i << log));
775 next->next = _fraghead[log].next;
776 next->prev = &_fraghead[log];
777 next->prev->next = next;
778 next->next->prev = next;
779 }
780
781 /* Initialize the nfree and first counters for this block. */
782 block = BLOCK (result);
783 _heapinfo[block].busy.type = log;
784 _heapinfo[block].busy.info.frag.nfree = i - 1;
785 _heapinfo[block].busy.info.frag.first = i - 1;
786
787 _chunks_free += (BLOCKSIZE >> log) - 1;
788 _bytes_free += BLOCKSIZE - (1 << log);
789 _bytes_used -= BLOCKSIZE - (1 << log);
790 }
791 }
792 else
793 {
794 /* Large allocation to receive one or more blocks.
795 Search the free list in a circle starting at the last place visited.
796 If we loop completely around without finding a large enough
797 space we will have to get more memory from the system. */
798 blocks = BLOCKIFY (size);
799 start = block = _heapindex;
800 while (_heapinfo[block].free.size < blocks)
801 {
802 block = _heapinfo[block].free.next;
803 if (block == start)
804 {
805 /* Need to get more from the system. Get a little extra. */
806 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
807 block = _heapinfo[0].free.prev;
808 lastblocks = _heapinfo[block].free.size;
809 /* Check to see if the new core will be contiguous with the
810 final free block; if so we don't need to get as much. */
811 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
812 /* We can't do this if we will have to make the heap info
813 table bigger to accomodate the new space. */
814 block + wantblocks <= heapsize &&
815 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
816 ADDRESS (block + lastblocks)))
817 {
818 /* We got it contiguously. Which block we are extending
819 (the `final free block' referred to above) might have
820 changed, if it got combined with a freed info table. */
821 block = _heapinfo[0].free.prev;
822 _heapinfo[block].free.size += (wantblocks - lastblocks);
823 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
824 _heaplimit += wantblocks - lastblocks;
825 continue;
826 }
827 result = morecore (wantblocks * BLOCKSIZE);
828 if (result == NULL)
829 return NULL;
830 block = BLOCK (result);
831 /* Put the new block at the end of the free list. */
832 _heapinfo[block].free.size = wantblocks;
833 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
834 _heapinfo[block].free.next = 0;
835 _heapinfo[0].free.prev = block;
836 _heapinfo[_heapinfo[block].free.prev].free.next = block;
837 ++_chunks_free;
838 /* Now loop to use some of that block for this allocation. */
839 }
840 }
841
842 /* At this point we have found a suitable free list entry.
843 Figure out how to remove what we need from the list. */
844 result = ADDRESS (block);
845 if (_heapinfo[block].free.size > blocks)
846 {
847 /* The block we found has a bit left over,
848 so relink the tail end back into the free list. */
849 _heapinfo[block + blocks].free.size
850 = _heapinfo[block].free.size - blocks;
851 _heapinfo[block + blocks].free.next
852 = _heapinfo[block].free.next;
853 _heapinfo[block + blocks].free.prev
854 = _heapinfo[block].free.prev;
855 _heapinfo[_heapinfo[block].free.prev].free.next
856 = _heapinfo[_heapinfo[block].free.next].free.prev
857 = _heapindex = block + blocks;
858 }
859 else
860 {
861 /* The block exactly matches our requirements,
862 so just remove it from the list. */
863 _heapinfo[_heapinfo[block].free.next].free.prev
864 = _heapinfo[block].free.prev;
865 _heapinfo[_heapinfo[block].free.prev].free.next
866 = _heapindex = _heapinfo[block].free.next;
867 --_chunks_free;
868 }
869
870 _heapinfo[block].busy.type = 0;
871 _heapinfo[block].busy.info.size = blocks;
872 ++_chunks_used;
873 _bytes_used += blocks * BLOCKSIZE;
874 _bytes_free -= blocks * BLOCKSIZE;
875
876 /* Mark all the blocks of the object just allocated except for the
877 first with a negative number so you can find the first block by
878 adding that adjustment. */
879 while (--blocks > 0)
880 _heapinfo[block + blocks].busy.info.size = -blocks;
881 }
882
5dcab13e 883 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
884 return result;
885}
886
887__ptr_t
888malloc (size)
889 __malloc_size_t size;
890{
891 if (!__malloc_initialized && !__malloc_initialize ())
892 return NULL;
893
894 return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size);
895}
896\f
897#ifndef _LIBC
898
899/* On some ANSI C systems, some libc functions call _malloc, _free
900 and _realloc. Make them use the GNU functions. */
901
902__ptr_t
903_malloc (size)
904 __malloc_size_t size;
905{
906 return malloc (size);
907}
908
909void
910_free (ptr)
911 __ptr_t ptr;
912{
913 free (ptr);
914}
915
916__ptr_t
917_realloc (ptr, size)
918 __ptr_t ptr;
919 __malloc_size_t size;
920{
921 return realloc (ptr, size);
922}
923
924#endif
925/* Free a block of memory allocated by `malloc'.
926 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
927 Written May 1989 by Mike Haertel.
928
929This library is free software; you can redistribute it and/or
930modify it under the terms of the GNU Library General Public License as
931published by the Free Software Foundation; either version 2 of the
932License, or (at your option) any later version.
933
934This library is distributed in the hope that it will be useful,
935but WITHOUT ANY WARRANTY; without even the implied warranty of
936MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
937Library General Public License for more details.
938
939You should have received a copy of the GNU Library General Public
940License along with this library; see the file COPYING.LIB. If
941not, write to the Free Software Foundation, Inc., 675 Mass Ave,
942Cambridge, MA 02139, USA.
943
944 The author may be reached (Email) at the address mike@ai.mit.edu,
945 or (US mail) as Mike Haertel c/o Free Software Foundation. */
946
947#ifndef _MALLOC_INTERNAL
948#define _MALLOC_INTERNAL
949#include <malloc.h>
950#endif
951
952
953/* Cope with systems lacking `memmove'. */
954#ifndef memmove
955#if (defined (MEMMOVE_MISSING) || \
956 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
957#ifdef emacs
958#undef __malloc_safe_bcopy
959#define __malloc_safe_bcopy safe_bcopy
960#endif
961/* This function is defined in realloc.c. */
0a27e8ed 962extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
963#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
964#endif
965#endif
966
967
968/* Debugging hook for free. */
0a27e8ed 969void (*__free_hook) PP ((__ptr_t __ptr));
74ad5c7f
KH
970
971/* List of blocks allocated by memalign. */
972struct alignlist *_aligned_blocks = NULL;
973
974/* Return memory to the heap.
975 Like `free' but don't call a __free_hook if there is one. */
976void
977_free_internal (ptr)
978 __ptr_t ptr;
979{
980 int type;
981 __malloc_size_t block, blocks;
982 register __malloc_size_t i;
983 struct list *prev, *next;
984 __ptr_t curbrk;
985 const __malloc_size_t lesscore_threshold
986 /* Threshold of free space at which we will return some to the system. */
987 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
988
989 register struct alignlist *l;
990
991 if (ptr == NULL)
992 return;
993
5dcab13e
GM
994 PROTECT_MALLOC_STATE (0);
995
74ad5c7f
KH
996 for (l = _aligned_blocks; l != NULL; l = l->next)
997 if (l->aligned == ptr)
998 {
999 l->aligned = NULL; /* Mark the slot in the list as free. */
1000 ptr = l->exact;
1001 break;
1002 }
1003
1004 block = BLOCK (ptr);
1005
1006 type = _heapinfo[block].busy.type;
1007 switch (type)
1008 {
1009 case 0:
1010 /* Get as many statistics as early as we can. */
1011 --_chunks_used;
1012 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1013 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1014
1015 /* Find the free cluster previous to this one in the free list.
1016 Start searching at the last block referenced; this may benefit
1017 programs with locality of allocation. */
1018 i = _heapindex;
1019 if (i > block)
1020 while (i > block)
1021 i = _heapinfo[i].free.prev;
1022 else
1023 {
1024 do
1025 i = _heapinfo[i].free.next;
1026 while (i > 0 && i < block);
1027 i = _heapinfo[i].free.prev;
1028 }
1029
1030 /* Determine how to link this block into the free list. */
1031 if (block == i + _heapinfo[i].free.size)
1032 {
1033 /* Coalesce this block with its predecessor. */
1034 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1035 block = i;
1036 }
1037 else
1038 {
1039 /* Really link this block back into the free list. */
1040 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1041 _heapinfo[block].free.next = _heapinfo[i].free.next;
1042 _heapinfo[block].free.prev = i;
1043 _heapinfo[i].free.next = block;
1044 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1045 ++_chunks_free;
1046 }
1047
1048 /* Now that the block is linked in, see if we can coalesce it
1049 with its successor (by deleting its successor from the list
1050 and adding in its size). */
1051 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1052 {
1053 _heapinfo[block].free.size
1054 += _heapinfo[_heapinfo[block].free.next].free.size;
1055 _heapinfo[block].free.next
1056 = _heapinfo[_heapinfo[block].free.next].free.next;
1057 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1058 --_chunks_free;
1059 }
1060
1061 /* How many trailing free blocks are there now? */
1062 blocks = _heapinfo[block].free.size;
1063
1064 /* Where is the current end of accessible core? */
1065 curbrk = (*__morecore) (0);
1066
1067 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1068 {
1069 /* The end of the malloc heap is at the end of accessible core.
1070 It's possible that moving _heapinfo will allow us to
1071 return some space to the system. */
1072
1073 __malloc_size_t info_block = BLOCK (_heapinfo);
1074 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1075 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1076 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1077 __malloc_size_t next_block = _heapinfo[block].free.next;
1078 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1079
1080 if (/* Win if this block being freed is last in core, the info table
1081 is just before it, the previous free block is just before the
1082 info table, and the two free blocks together form a useful
1083 amount to return to the system. */
1084 (block + blocks == _heaplimit &&
1085 info_block + info_blocks == block &&
1086 prev_block != 0 && prev_block + prev_blocks == info_block &&
1087 blocks + prev_blocks >= lesscore_threshold) ||
1088 /* Nope, not the case. We can also win if this block being
1089 freed is just before the info table, and the table extends
1090 to the end of core or is followed only by a free block,
1091 and the total free space is worth returning to the system. */
1092 (block + blocks == info_block &&
1093 ((info_block + info_blocks == _heaplimit &&
1094 blocks >= lesscore_threshold) ||
1095 (info_block + info_blocks == next_block &&
1096 next_block + next_blocks == _heaplimit &&
1097 blocks + next_blocks >= lesscore_threshold)))
1098 )
1099 {
1100 malloc_info *newinfo;
1101 __malloc_size_t oldlimit = _heaplimit;
1102
1103 /* Free the old info table, clearing _heaplimit to avoid
1104 recursion into this code. We don't want to return the
1105 table's blocks to the system before we have copied them to
1106 the new location. */
1107 _heaplimit = 0;
1108 _free_internal (_heapinfo);
1109 _heaplimit = oldlimit;
1110
1111 /* Tell malloc to search from the beginning of the heap for
1112 free blocks, so it doesn't reuse the ones just freed. */
1113 _heapindex = 0;
1114
1115 /* Allocate new space for the info table and move its data. */
1116 newinfo = (malloc_info *) _malloc_internal (info_blocks
1117 * BLOCKSIZE);
5dcab13e 1118 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1119 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1120 _heapinfo = newinfo;
1121
1122 /* We should now have coalesced the free block with the
1123 blocks freed from the old info table. Examine the entire
1124 trailing free block to decide below whether to return some
1125 to the system. */
1126 block = _heapinfo[0].free.prev;
1127 blocks = _heapinfo[block].free.size;
1128 }
1129
1130 /* Now see if we can return stuff to the system. */
1131 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1132 {
1133 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1134 _heaplimit -= blocks;
1135 (*__morecore) (-bytes);
1136 _heapinfo[_heapinfo[block].free.prev].free.next
1137 = _heapinfo[block].free.next;
1138 _heapinfo[_heapinfo[block].free.next].free.prev
1139 = _heapinfo[block].free.prev;
1140 block = _heapinfo[block].free.prev;
1141 --_chunks_free;
1142 _bytes_free -= bytes;
1143 }
1144 }
1145
1146 /* Set the next search to begin at this block. */
1147 _heapindex = block;
1148 break;
1149
1150 default:
1151 /* Do some of the statistics. */
1152 --_chunks_used;
1153 _bytes_used -= 1 << type;
1154 ++_chunks_free;
1155 _bytes_free += 1 << type;
1156
1157 /* Get the address of the first free fragment in this block. */
1158 prev = (struct list *) ((char *) ADDRESS (block) +
1159 (_heapinfo[block].busy.info.frag.first << type));
1160
1161 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1162 {
1163 /* If all fragments of this block are free, remove them
1164 from the fragment list and free the whole block. */
1165 next = prev;
1166 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1167 next = next->next;
1168 prev->prev->next = next;
1169 if (next != NULL)
1170 next->prev = prev->prev;
1171 _heapinfo[block].busy.type = 0;
1172 _heapinfo[block].busy.info.size = 1;
1173
1174 /* Keep the statistics accurate. */
1175 ++_chunks_used;
1176 _bytes_used += BLOCKSIZE;
1177 _chunks_free -= BLOCKSIZE >> type;
1178 _bytes_free -= BLOCKSIZE;
1179
8094989b
GM
1180#ifdef GC_MALLOC_CHECK
1181 _free_internal (ADDRESS (block));
1182#else
74ad5c7f 1183 free (ADDRESS (block));
8094989b 1184#endif
74ad5c7f
KH
1185 }
1186 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1187 {
1188 /* If some fragments of this block are free, link this
1189 fragment into the fragment list after the first free
1190 fragment of this block. */
1191 next = (struct list *) ptr;
1192 next->next = prev->next;
1193 next->prev = prev;
1194 prev->next = next;
1195 if (next->next != NULL)
1196 next->next->prev = next;
1197 ++_heapinfo[block].busy.info.frag.nfree;
1198 }
1199 else
1200 {
1201 /* No fragments of this block are free, so link this
1202 fragment into the fragment list and announce that
1203 it is the first free fragment of this block. */
1204 prev = (struct list *) ptr;
1205 _heapinfo[block].busy.info.frag.nfree = 1;
1206 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1207 ((unsigned long int) ((char *) ptr - (char *) NULL)
1208 % BLOCKSIZE >> type);
1209 prev->next = _fraghead[type].next;
1210 prev->prev = &_fraghead[type];
1211 prev->prev->next = prev;
1212 if (prev->next != NULL)
1213 prev->next->prev = prev;
1214 }
1215 break;
1216 }
5dcab13e
GM
1217
1218 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
1219}
1220
1221/* Return memory to the heap. */
ca9c0567
PE
1222
1223FREE_RETURN_TYPE
74ad5c7f
KH
1224free (ptr)
1225 __ptr_t ptr;
1226{
1227 if (__free_hook != NULL)
1228 (*__free_hook) (ptr);
1229 else
1230 _free_internal (ptr);
1231}
1232
1233/* Define the `cfree' alias for `free'. */
1234#ifdef weak_alias
1235weak_alias (free, cfree)
1236#else
1237void
1238cfree (ptr)
1239 __ptr_t ptr;
1240{
1241 free (ptr);
1242}
1243#endif
1244/* Change the size of a block allocated by `malloc'.
1245 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1246 Written May 1989 by Mike Haertel.
1247
1248This library is free software; you can redistribute it and/or
1249modify it under the terms of the GNU Library General Public License as
1250published by the Free Software Foundation; either version 2 of the
1251License, or (at your option) any later version.
1252
1253This library is distributed in the hope that it will be useful,
1254but WITHOUT ANY WARRANTY; without even the implied warranty of
1255MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1256Library General Public License for more details.
1257
1258You should have received a copy of the GNU Library General Public
1259License along with this library; see the file COPYING.LIB. If
1260not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1261Cambridge, MA 02139, USA.
1262
1263 The author may be reached (Email) at the address mike@ai.mit.edu,
1264 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1265
1266#ifndef _MALLOC_INTERNAL
1267#define _MALLOC_INTERNAL
1268#include <malloc.h>
1269#endif
1270
1271
1272
1273/* Cope with systems lacking `memmove'. */
1274#if (defined (MEMMOVE_MISSING) || \
1275 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1276
1277#ifdef emacs
1278#undef __malloc_safe_bcopy
1279#define __malloc_safe_bcopy safe_bcopy
1280#else
1281
1282/* Snarfed directly from Emacs src/dispnew.c:
1283 XXX Should use system bcopy if it handles overlap. */
1284
1285/* Like bcopy except never gets confused by overlap. */
1286
1287void
1288__malloc_safe_bcopy (afrom, ato, size)
1289 __ptr_t afrom;
1290 __ptr_t ato;
1291 __malloc_size_t size;
1292{
1293 char *from = afrom, *to = ato;
1294
1295 if (size <= 0 || from == to)
1296 return;
1297
1298 /* If the source and destination don't overlap, then bcopy can
1299 handle it. If they do overlap, but the destination is lower in
1300 memory than the source, we'll assume bcopy can handle that. */
1301 if (to < from || from + size <= to)
1302 bcopy (from, to, size);
1303
1304 /* Otherwise, we'll copy from the end. */
1305 else
1306 {
1307 register char *endf = from + size;
1308 register char *endt = to + size;
1309
1310 /* If TO - FROM is large, then we should break the copy into
1311 nonoverlapping chunks of TO - FROM bytes each. However, if
1312 TO - FROM is small, then the bcopy function call overhead
1313 makes this not worth it. The crossover point could be about
1314 anywhere. Since I don't think the obvious copy loop is too
1315 bad, I'm trying to err in its favor. */
1316 if (to - from < 64)
1317 {
1318 do
1319 *--endt = *--endf;
1320 while (endf != from);
1321 }
1322 else
1323 {
1324 for (;;)
1325 {
1326 endt -= (to - from);
1327 endf -= (to - from);
1328
1329 if (endt < to)
1330 break;
1331
1332 bcopy (endf, endt, to - from);
1333 }
1334
1335 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1336 little left over. The amount left over is
1337 (endt + (to - from)) - to, which is endt - from. */
1338 bcopy (from, to, endt - from);
1339 }
1340 }
1341}
1342#endif /* emacs */
1343
1344#ifndef memmove
0a27e8ed 1345extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
1346#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1347#endif
1348
1349#endif
1350
1351
1352#define min(A, B) ((A) < (B) ? (A) : (B))
1353
1354/* Debugging hook for realloc. */
0a27e8ed 1355__ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f
KH
1356
1357/* Resize the given region to the new size, returning a pointer
1358 to the (possibly moved) region. This is optimized for speed;
1359 some benchmarks seem to indicate that greater compactness is
1360 achieved by unconditionally allocating and copying to a
1361 new region. This module has incestuous knowledge of the
1362 internals of both free and malloc. */
1363__ptr_t
1364_realloc_internal (ptr, size)
1365 __ptr_t ptr;
1366 __malloc_size_t size;
1367{
1368 __ptr_t result;
1369 int type;
1370 __malloc_size_t block, blocks, oldlimit;
1371
1372 if (size == 0)
1373 {
1374 _free_internal (ptr);
1375 return _malloc_internal (0);
1376 }
1377 else if (ptr == NULL)
1378 return _malloc_internal (size);
1379
1380 block = BLOCK (ptr);
1381
5dcab13e
GM
1382 PROTECT_MALLOC_STATE (0);
1383
74ad5c7f
KH
1384 type = _heapinfo[block].busy.type;
1385 switch (type)
1386 {
1387 case 0:
1388 /* Maybe reallocate a large block to a small fragment. */
1389 if (size <= BLOCKSIZE / 2)
1390 {
1391 result = _malloc_internal (size);
1392 if (result != NULL)
1393 {
1394 memcpy (result, ptr, size);
1395 _free_internal (ptr);
1396 return result;
1397 }
1398 }
1399
1400 /* The new size is a large allocation as well;
1401 see if we can hold it in place. */
1402 blocks = BLOCKIFY (size);
1403 if (blocks < _heapinfo[block].busy.info.size)
1404 {
1405 /* The new size is smaller; return
1406 excess memory to the free list. */
1407 _heapinfo[block + blocks].busy.type = 0;
1408 _heapinfo[block + blocks].busy.info.size
1409 = _heapinfo[block].busy.info.size - blocks;
1410 _heapinfo[block].busy.info.size = blocks;
1411 /* We have just created a new chunk by splitting a chunk in two.
1412 Now we will free this chunk; increment the statistics counter
1413 so it doesn't become wrong when _free_internal decrements it. */
1414 ++_chunks_used;
1415 _free_internal (ADDRESS (block + blocks));
1416 result = ptr;
1417 }
1418 else if (blocks == _heapinfo[block].busy.info.size)
1419 /* No size change necessary. */
1420 result = ptr;
1421 else
1422 {
1423 /* Won't fit, so allocate a new region that will.
1424 Free the old region first in case there is sufficient
1425 adjacent free space to grow without moving. */
1426 blocks = _heapinfo[block].busy.info.size;
1427 /* Prevent free from actually returning memory to the system. */
1428 oldlimit = _heaplimit;
1429 _heaplimit = 0;
1430 _free_internal (ptr);
1431 result = _malloc_internal (size);
5dcab13e 1432 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1433 if (_heaplimit == 0)
1434 _heaplimit = oldlimit;
1435 if (result == NULL)
1436 {
1437 /* Now we're really in trouble. We have to unfree
1438 the thing we just freed. Unfortunately it might
1439 have been coalesced with its neighbors. */
1440 if (_heapindex == block)
1441 (void) _malloc_internal (blocks * BLOCKSIZE);
1442 else
1443 {
1444 __ptr_t previous
1445 = _malloc_internal ((block - _heapindex) * BLOCKSIZE);
1446 (void) _malloc_internal (blocks * BLOCKSIZE);
1447 _free_internal (previous);
1448 }
1449 return NULL;
1450 }
1451 if (ptr != result)
1452 memmove (result, ptr, blocks * BLOCKSIZE);
1453 }
1454 break;
1455
1456 default:
1457 /* Old size is a fragment; type is logarithm
1458 to base two of the fragment size. */
1459 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1460 size <= (__malloc_size_t) (1 << type))
1461 /* The new size is the same kind of fragment. */
1462 result = ptr;
1463 else
1464 {
1465 /* The new size is different; allocate a new space,
1466 and copy the lesser of the new size and the old. */
1467 result = _malloc_internal (size);
1468 if (result == NULL)
1469 return NULL;
1470 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1471 _free_internal (ptr);
1472 }
1473 break;
1474 }
1475
5dcab13e 1476 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
1477 return result;
1478}
1479
1480__ptr_t
1481realloc (ptr, size)
1482 __ptr_t ptr;
1483 __malloc_size_t size;
1484{
1485 if (!__malloc_initialized && !__malloc_initialize ())
1486 return NULL;
1487
1488 return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal)
1489 (ptr, size);
1490}
1491/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1492
1493This library is free software; you can redistribute it and/or
1494modify it under the terms of the GNU Library General Public License as
1495published by the Free Software Foundation; either version 2 of the
1496License, or (at your option) any later version.
1497
1498This library is distributed in the hope that it will be useful,
1499but WITHOUT ANY WARRANTY; without even the implied warranty of
1500MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1501Library General Public License for more details.
1502
1503You should have received a copy of the GNU Library General Public
1504License along with this library; see the file COPYING.LIB. If
1505not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1506Cambridge, MA 02139, USA.
1507
1508 The author may be reached (Email) at the address mike@ai.mit.edu,
1509 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1510
1511#ifndef _MALLOC_INTERNAL
1512#define _MALLOC_INTERNAL
1513#include <malloc.h>
1514#endif
1515
1516/* Allocate an array of NMEMB elements each SIZE bytes long.
1517 The entire array is initialized to zeros. */
1518__ptr_t
1519calloc (nmemb, size)
1520 register __malloc_size_t nmemb;
1521 register __malloc_size_t size;
1522{
1523 register __ptr_t result = malloc (nmemb * size);
1524
1525 if (result != NULL)
1526 (void) memset (result, 0, nmemb * size);
1527
1528 return result;
1529}
1530/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1531This file is part of the GNU C Library.
1532
1533The GNU C Library is free software; you can redistribute it and/or modify
1534it under the terms of the GNU General Public License as published by
1535the Free Software Foundation; either version 2, or (at your option)
1536any later version.
1537
1538The GNU C Library is distributed in the hope that it will be useful,
1539but WITHOUT ANY WARRANTY; without even the implied warranty of
1540MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1541GNU General Public License for more details.
1542
1543You should have received a copy of the GNU General Public License
1544along with the GNU C Library; see the file COPYING. If not, write to
1545the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
1546
1547#ifndef _MALLOC_INTERNAL
1548#define _MALLOC_INTERNAL
1549#include <malloc.h>
1550#endif
1551
1552#ifndef __GNU_LIBRARY__
1553#define __sbrk sbrk
1554#endif
1555
1556#ifdef __GNU_LIBRARY__
1557/* It is best not to declare this and cast its result on foreign operating
1558 systems with potentially hostile include files. */
1559
1560#include <stddef.h>
0a27e8ed 1561extern __ptr_t __sbrk PP ((ptrdiff_t increment));
74ad5c7f
KH
1562#endif
1563
1564#ifndef NULL
1565#define NULL 0
1566#endif
1567
1568/* Allocate INCREMENT more bytes of data space,
1569 and return the start of data space, or NULL on errors.
1570 If INCREMENT is negative, shrink data space. */
1571__ptr_t
1572__default_morecore (increment)
1573 __malloc_ptrdiff_t increment;
1574{
1575 __ptr_t result = (__ptr_t) __sbrk (increment);
1576 if (result == (__ptr_t) -1)
1577 return NULL;
1578 return result;
1579}
1580/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1581
1582This library is free software; you can redistribute it and/or
1583modify it under the terms of the GNU Library General Public License as
1584published by the Free Software Foundation; either version 2 of the
1585License, or (at your option) any later version.
1586
1587This library is distributed in the hope that it will be useful,
1588but WITHOUT ANY WARRANTY; without even the implied warranty of
1589MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1590Library General Public License for more details.
1591
1592You should have received a copy of the GNU Library General Public
1593License along with this library; see the file COPYING.LIB. If
1594not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1595Cambridge, MA 02139, USA. */
1596
1597#ifndef _MALLOC_INTERNAL
1598#define _MALLOC_INTERNAL
1599#include <malloc.h>
1600#endif
1601
1602#if __DJGPP__ - 0 == 1
1603
1604/* There is some problem with memalign in DJGPP v1 and we are supposed
1605 to omit it. Noone told me why, they just told me to do it. */
1606
1607#else
1608
eec2d1de
EZ
1609__ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1610 __malloc_size_t __alignment));
74ad5c7f
KH
1611
1612__ptr_t
1613memalign (alignment, size)
1614 __malloc_size_t alignment;
1615 __malloc_size_t size;
1616{
1617 __ptr_t result;
1618 unsigned long int adj, lastadj;
1619
1620 if (__memalign_hook)
1621 return (*__memalign_hook) (alignment, size);
1622
1623 /* Allocate a block with enough extra space to pad the block with up to
1624 (ALIGNMENT - 1) bytes if necessary. */
1625 result = malloc (size + alignment - 1);
1626 if (result == NULL)
1627 return NULL;
1628
1629 /* Figure out how much we will need to pad this particular block
1630 to achieve the required alignment. */
1631 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1632
1633 do
1634 {
1635 /* Reallocate the block with only as much excess as it needs. */
1636 free (result);
1637 result = malloc (adj + size);
1638 if (result == NULL) /* Impossible unless interrupted. */
1639 return NULL;
1640
1641 lastadj = adj;
1642 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1643 /* It's conceivable we might have been so unlucky as to get a
1644 different block with weaker alignment. If so, this block is too
1645 short to contain SIZE after alignment correction. So we must
1646 try again and get another block, slightly larger. */
1647 } while (adj > lastadj);
1648
1649 if (adj != 0)
1650 {
1651 /* Record this block in the list of aligned blocks, so that `free'
1652 can identify the pointer it is passed, which will be in the middle
1653 of an allocated block. */
1654
1655 struct alignlist *l;
1656 for (l = _aligned_blocks; l != NULL; l = l->next)
1657 if (l->aligned == NULL)
1658 /* This slot is free. Use it. */
1659 break;
1660 if (l == NULL)
1661 {
1662 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1663 if (l == NULL)
1664 {
1665 free (result);
1666 return NULL;
1667 }
1668 l->next = _aligned_blocks;
1669 _aligned_blocks = l;
1670 }
1671 l->exact = result;
1672 result = l->aligned = (char *) result + alignment - adj;
1673 }
1674
1675 return result;
1676}
1677
1678#endif /* Not DJGPP v1 */
1679/* Allocate memory on a page boundary.
1680 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1681
1682This library is free software; you can redistribute it and/or
1683modify it under the terms of the GNU Library General Public License as
1684published by the Free Software Foundation; either version 2 of the
1685License, or (at your option) any later version.
1686
1687This library is distributed in the hope that it will be useful,
1688but WITHOUT ANY WARRANTY; without even the implied warranty of
1689MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1690Library General Public License for more details.
1691
1692You should have received a copy of the GNU Library General Public
1693License along with this library; see the file COPYING.LIB. If
1694not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1695Cambridge, MA 02139, USA.
1696
1697 The author may be reached (Email) at the address mike@ai.mit.edu,
1698 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1699
1700#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1701
1702/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1703 on MSDOS, where it conflicts with a system header file. */
1704
1705#define ELIDE_VALLOC
1706
1707#endif
1708
1709#ifndef ELIDE_VALLOC
1710
1711#if defined (__GNU_LIBRARY__) || defined (_LIBC)
1712#include <stddef.h>
1713#include <sys/cdefs.h>
47582ab3
KH
1714#if defined (__GLIBC__) && __GLIBC__ >= 2
1715/* __getpagesize is already declared in <unistd.h> with return type int */
1716#else
0a27e8ed 1717extern size_t __getpagesize PP ((void));
47582ab3 1718#endif
74ad5c7f
KH
1719#else
1720#include "getpagesize.h"
1721#define __getpagesize() getpagesize()
1722#endif
1723
1724#ifndef _MALLOC_INTERNAL
1725#define _MALLOC_INTERNAL
1726#include <malloc.h>
1727#endif
1728
1729static __malloc_size_t pagesize;
1730
1731__ptr_t
1732valloc (size)
1733 __malloc_size_t size;
1734{
1735 if (pagesize == 0)
1736 pagesize = __getpagesize ();
1737
1738 return memalign (pagesize, size);
1739}
1740
1741#endif /* Not ELIDE_VALLOC. */
a3ba27da
GM
1742
1743#ifdef GC_MCHECK
1744
1745/* Standard debugging hooks for `malloc'.
1746 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1747 Written May 1989 by Mike Haertel.
1748
1749This library is free software; you can redistribute it and/or
1750modify it under the terms of the GNU Library General Public License as
1751published by the Free Software Foundation; either version 2 of the
1752License, or (at your option) any later version.
1753
1754This library is distributed in the hope that it will be useful,
1755but WITHOUT ANY WARRANTY; without even the implied warranty of
1756MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1757Library General Public License for more details.
1758
1759You should have received a copy of the GNU Library General Public
1760License along with this library; see the file COPYING.LIB. If
1761not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1762Cambridge, MA 02139, USA.
1763
1764 The author may be reached (Email) at the address mike@ai.mit.edu,
1765 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1766
1767#ifdef emacs
1768#include <stdio.h>
1769#else
1770#ifndef _MALLOC_INTERNAL
1771#define _MALLOC_INTERNAL
1772#include <malloc.h>
1773#include <stdio.h>
1774#endif
1775#endif
1776
1777/* Old hook values. */
1778static void (*old_free_hook) __P ((__ptr_t ptr));
1779static __ptr_t (*old_malloc_hook) __P ((__malloc_size_t size));
1780static __ptr_t (*old_realloc_hook) __P ((__ptr_t ptr, __malloc_size_t size));
1781
1782/* Function to call when something awful happens. */
1783static void (*abortfunc) __P ((enum mcheck_status));
1784
1785/* Arbitrary magical numbers. */
1786#define MAGICWORD 0xfedabeeb
1787#define MAGICFREE 0xd8675309
1788#define MAGICBYTE ((char) 0xd7)
1789#define MALLOCFLOOD ((char) 0x93)
1790#define FREEFLOOD ((char) 0x95)
1791
1792struct hdr
1793 {
1794 __malloc_size_t size; /* Exact size requested by user. */
1795 unsigned long int magic; /* Magic number to check header integrity. */
1796 };
1797
1798#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1799#define flood memset
1800#else
1801static void flood __P ((__ptr_t, int, __malloc_size_t));
1802static void
1803flood (ptr, val, size)
1804 __ptr_t ptr;
1805 int val;
1806 __malloc_size_t size;
1807{
1808 char *cp = ptr;
1809 while (size--)
1810 *cp++ = val;
1811}
1812#endif
1813
1814static enum mcheck_status checkhdr __P ((const struct hdr *));
1815static enum mcheck_status
1816checkhdr (hdr)
1817 const struct hdr *hdr;
1818{
1819 enum mcheck_status status;
1820 switch (hdr->magic)
1821 {
1822 default:
1823 status = MCHECK_HEAD;
1824 break;
1825 case MAGICFREE:
1826 status = MCHECK_FREE;
1827 break;
1828 case MAGICWORD:
1829 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1830 status = MCHECK_TAIL;
1831 else
1832 status = MCHECK_OK;
1833 break;
1834 }
1835 if (status != MCHECK_OK)
1836 (*abortfunc) (status);
1837 return status;
1838}
1839
1840static void freehook __P ((__ptr_t));
1841static void
1842freehook (ptr)
1843 __ptr_t ptr;
1844{
1845 struct hdr *hdr;
1846
1847 if (ptr)
1848 {
1849 hdr = ((struct hdr *) ptr) - 1;
1850 checkhdr (hdr);
1851 hdr->magic = MAGICFREE;
1852 flood (ptr, FREEFLOOD, hdr->size);
1853 }
1854 else
1855 hdr = NULL;
1856
1857 __free_hook = old_free_hook;
1858 free (hdr);
1859 __free_hook = freehook;
1860}
1861
1862static __ptr_t mallochook __P ((__malloc_size_t));
1863static __ptr_t
1864mallochook (size)
1865 __malloc_size_t size;
1866{
1867 struct hdr *hdr;
1868
1869 __malloc_hook = old_malloc_hook;
1870 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
1871 __malloc_hook = mallochook;
1872 if (hdr == NULL)
1873 return NULL;
1874
1875 hdr->size = size;
1876 hdr->magic = MAGICWORD;
1877 ((char *) &hdr[1])[size] = MAGICBYTE;
1878 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
1879 return (__ptr_t) (hdr + 1);
1880}
1881
1882static __ptr_t reallochook __P ((__ptr_t, __malloc_size_t));
1883static __ptr_t
1884reallochook (ptr, size)
1885 __ptr_t ptr;
1886 __malloc_size_t size;
1887{
1888 struct hdr *hdr = NULL;
1889 __malloc_size_t osize = 0;
1890
1891 if (ptr)
1892 {
1893 hdr = ((struct hdr *) ptr) - 1;
1894 osize = hdr->size;
1895
1896 checkhdr (hdr);
1897 if (size < osize)
1898 flood ((char *) ptr + size, FREEFLOOD, osize - size);
1899 }
1900
1901 __free_hook = old_free_hook;
1902 __malloc_hook = old_malloc_hook;
1903 __realloc_hook = old_realloc_hook;
1904 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
1905 __free_hook = freehook;
1906 __malloc_hook = mallochook;
1907 __realloc_hook = reallochook;
1908 if (hdr == NULL)
1909 return NULL;
1910
1911 hdr->size = size;
1912 hdr->magic = MAGICWORD;
1913 ((char *) &hdr[1])[size] = MAGICBYTE;
1914 if (size > osize)
1915 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1916 return (__ptr_t) (hdr + 1);
1917}
1918
1919static void
1920mabort (status)
1921 enum mcheck_status status;
1922{
1923 const char *msg;
1924 switch (status)
1925 {
1926 case MCHECK_OK:
1927 msg = "memory is consistent, library is buggy";
1928 break;
1929 case MCHECK_HEAD:
1930 msg = "memory clobbered before allocated block";
1931 break;
1932 case MCHECK_TAIL:
1933 msg = "memory clobbered past end of allocated block";
1934 break;
1935 case MCHECK_FREE:
1936 msg = "block freed twice";
1937 break;
1938 default:
1939 msg = "bogus mcheck_status, library is buggy";
1940 break;
1941 }
1942#ifdef __GNU_LIBRARY__
1943 __libc_fatal (msg);
1944#else
1945 fprintf (stderr, "mcheck: %s\n", msg);
1946 fflush (stderr);
1947 abort ();
1948#endif
1949}
1950
1951static int mcheck_used = 0;
1952
1953int
1954mcheck (func)
1955 void (*func) __P ((enum mcheck_status));
1956{
1957 abortfunc = (func != NULL) ? func : &mabort;
1958
1959 /* These hooks may not be safely inserted if malloc is already in use. */
1960 if (!__malloc_initialized && !mcheck_used)
1961 {
1962 old_free_hook = __free_hook;
1963 __free_hook = freehook;
1964 old_malloc_hook = __malloc_hook;
1965 __malloc_hook = mallochook;
1966 old_realloc_hook = __realloc_hook;
1967 __realloc_hook = reallochook;
1968 mcheck_used = 1;
1969 }
1970
1971 return mcheck_used ? 0 : -1;
1972}
1973
1974enum mcheck_status
1975mprobe (__ptr_t ptr)
1976{
1977 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1978}
1979
1980#endif /* GC_MCHECK */