Merge from emacs-24; up to 2012-12-15T12:19:04Z!juri@jurta.org
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f 1/* Declarations for `malloc' and friends.
ab422c4d
PE
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013 Free
3 Software Foundation, Inc.
74ad5c7f
KH
4 Written May 1989 by Mike Haertel.
5
6This library is free software; you can redistribute it and/or
423a1f3c 7modify it under the terms of the GNU General Public License as
74ad5c7f
KH
8published by the Free Software Foundation; either version 2 of the
9License, or (at your option) any later version.
10
11This library is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 14General Public License for more details.
74ad5c7f 15
423a1f3c 16You should have received a copy of the GNU General Public
fee0bd5f 17License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
18
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
21
74ad5c7f 22#include <config.h>
74ad5c7f 23
ae9e757a 24#ifdef HAVE_PTHREAD
8d0d84d2
YM
25#define USE_PTHREAD
26#endif
27
74ad5c7f 28#include <string.h>
74ad5c7f 29#include <limits.h>
d0baac98 30#include <stdint.h>
74ad5c7f 31#include <unistd.h>
74ad5c7f 32
2f213514
YM
33#ifdef USE_PTHREAD
34#include <pthread.h>
35#endif
36
62aba0d4
FP
37#ifdef WINDOWSNT
38#include <w32heap.h> /* for sbrk */
39#endif
40
74ad5c7f
KH
41#ifdef __cplusplus
42extern "C"
43{
44#endif
45
74ad5c7f 46#include <stddef.h>
74ad5c7f
KH
47
48
49/* Allocate SIZE bytes of memory. */
d0baac98 50extern void *malloc (size_t size);
74ad5c7f 51/* Re-allocate the previously allocated block
d0baac98
PE
52 in ptr, making the new block SIZE bytes long. */
53extern void *realloc (void *ptr, size_t size);
74ad5c7f 54/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
d0baac98 55extern void *calloc (size_t nmemb, size_t size);
74ad5c7f 56/* Free a block allocated by `malloc', `realloc' or `calloc'. */
d0baac98 57extern void free (void *ptr);
74ad5c7f
KH
58
59/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
d0baac98
PE
60#ifdef MSDOS
61extern void *memalign (size_t, size_t);
62extern int posix_memalign (void **, size_t, size_t);
74ad5c7f
KH
63#endif
64
3ceeb306
YM
65#ifdef USE_PTHREAD
66/* Set up mutexes and make malloc etc. thread-safe. */
d0baac98 67extern void malloc_enable_thread (void);
3ceeb306 68#endif
74ad5c7f 69
74ad5c7f
KH
70/* The allocator divides the heap into blocks of fixed size; large
71 requests receive one or more whole blocks, and small requests
72 receive a fragment of a block. Fragment sizes are powers of two,
73 and all fragments of a block are the same size. When all the
74 fragments in a block have been freed, the block itself is freed. */
5e617bc2 75#define INT_BIT (CHAR_BIT * sizeof (int))
74ad5c7f
KH
76#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
77#define BLOCKSIZE (1 << BLOCKLOG)
78#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
79
80/* Determine the amount of memory spanned by the initial heap table
81 (not an absolute limit). */
82#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
83
84/* Number of contiguous free blocks allowed to build up at the end of
85 memory before they will be returned to the system. */
86#define FINAL_FREE_BLOCKS 8
87
88/* Data structure giving per-block information. */
89typedef union
90 {
91 /* Heap information for a busy block. */
92 struct
93 {
94 /* Zero for a large (multiblock) object, or positive giving the
95 logarithm to the base two of the fragment size. */
96 int type;
97 union
98 {
99 struct
100 {
d0baac98
PE
101 size_t nfree; /* Free frags in a fragmented block. */
102 size_t first; /* First free fragment of the block. */
74ad5c7f
KH
103 } frag;
104 /* For a large object, in its first block, this has the number
105 of blocks in the object. In the other blocks, this has a
106 negative number which says how far back the first block is. */
d0baac98 107 ptrdiff_t size;
74ad5c7f
KH
108 } info;
109 } busy;
110 /* Heap information for a free block
111 (that may be the first of a free cluster). */
112 struct
113 {
d0baac98
PE
114 size_t size; /* Size (in blocks) of a free cluster. */
115 size_t next; /* Index of next free cluster. */
116 size_t prev; /* Index of previous free cluster. */
74ad5c7f
KH
117 } free;
118 } malloc_info;
119
120/* Pointer to first block of the heap. */
121extern char *_heapbase;
122
123/* Table indexed by block number giving per-block information. */
124extern malloc_info *_heapinfo;
125
126/* Address to block number and vice versa. */
127#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
d0baac98 128#define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
74ad5c7f
KH
129
130/* Current search index for the heap table. */
d0baac98 131extern size_t _heapindex;
74ad5c7f
KH
132
133/* Limit of valid info table indices. */
d0baac98 134extern size_t _heaplimit;
74ad5c7f
KH
135
136/* Doubly linked lists of free fragments. */
137struct list
138 {
139 struct list *next;
140 struct list *prev;
141 };
142
143/* Free list headers for each fragment size. */
144extern struct list _fraghead[];
145
146/* List of blocks allocated with `memalign' (or `valloc'). */
147struct alignlist
148 {
149 struct alignlist *next;
d0baac98
PE
150 void *aligned; /* The address that memaligned returned. */
151 void *exact; /* The address that malloc returned. */
74ad5c7f
KH
152 };
153extern struct alignlist *_aligned_blocks;
154
155/* Instrumentation. */
d0baac98
PE
156extern size_t _chunks_used;
157extern size_t _bytes_used;
158extern size_t _chunks_free;
159extern size_t _bytes_free;
74ad5c7f
KH
160
161/* Internal versions of `malloc', `realloc', and `free'
162 used when these functions need to call each other.
163 They are the same but don't call the hooks. */
d0baac98
PE
164extern void *_malloc_internal (size_t);
165extern void *_realloc_internal (void *, size_t);
166extern void _free_internal (void *);
167extern void *_malloc_internal_nolock (size_t);
168extern void *_realloc_internal_nolock (void *, size_t);
169extern void _free_internal_nolock (void *);
74ad5c7f 170
2f213514 171#ifdef USE_PTHREAD
8d0d84d2 172extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
3ceeb306
YM
173extern int _malloc_thread_enabled_p;
174#define LOCK() \
175 do { \
176 if (_malloc_thread_enabled_p) \
177 pthread_mutex_lock (&_malloc_mutex); \
178 } while (0)
179#define UNLOCK() \
180 do { \
181 if (_malloc_thread_enabled_p) \
182 pthread_mutex_unlock (&_malloc_mutex); \
183 } while (0)
184#define LOCK_ALIGNED_BLOCKS() \
185 do { \
186 if (_malloc_thread_enabled_p) \
187 pthread_mutex_lock (&_aligned_blocks_mutex); \
188 } while (0)
189#define UNLOCK_ALIGNED_BLOCKS() \
190 do { \
191 if (_malloc_thread_enabled_p) \
192 pthread_mutex_unlock (&_aligned_blocks_mutex); \
193 } while (0)
2f213514
YM
194#else
195#define LOCK()
196#define UNLOCK()
8d0d84d2
YM
197#define LOCK_ALIGNED_BLOCKS()
198#define UNLOCK_ALIGNED_BLOCKS()
2f213514
YM
199#endif
200
74ad5c7f
KH
201/* Given an address in the middle of a malloc'd object,
202 return the address of the beginning of the object. */
d0baac98 203extern void *malloc_find_object_address (void *ptr);
74ad5c7f
KH
204
205/* Underlying allocation function; successive calls should
206 return contiguous pieces of memory. */
d0baac98 207extern void *(*__morecore) (ptrdiff_t size);
74ad5c7f
KH
208
209/* Default value of `__morecore'. */
d0baac98 210extern void *__default_morecore (ptrdiff_t size);
74ad5c7f
KH
211
212/* If not NULL, this function is called after each time
213 `__morecore' is called to increase the data size. */
d0baac98 214extern void (*__after_morecore_hook) (void);
74ad5c7f
KH
215
216/* Number of extra blocks to get each time we ask for more core.
217 This reduces the frequency of calling `(*__morecore)'. */
d0baac98 218extern size_t __malloc_extra_blocks;
74ad5c7f
KH
219
220/* Nonzero if `malloc' has been called and done its initialization. */
221extern int __malloc_initialized;
222/* Function called to initialize malloc data structures. */
d0baac98 223extern int __malloc_initialize (void);
74ad5c7f
KH
224
225/* Hooks for debugging versions. */
d0baac98
PE
226extern void (*__malloc_initialize_hook) (void);
227extern void (*__free_hook) (void *ptr);
228extern void *(*__malloc_hook) (size_t size);
229extern void *(*__realloc_hook) (void *ptr, size_t size);
230extern void *(*__memalign_hook) (size_t size, size_t alignment);
74ad5c7f
KH
231
232/* Return values for `mprobe': these are the kinds of inconsistencies that
233 `mcheck' enables detection of. */
234enum mcheck_status
235 {
236 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
237 MCHECK_OK, /* Block is fine. */
238 MCHECK_FREE, /* Block freed twice. */
239 MCHECK_HEAD, /* Memory before the block was clobbered. */
240 MCHECK_TAIL /* Memory after the block was clobbered. */
241 };
242
243/* Activate a standard collection of debugging hooks. This must be called
244 before `malloc' is ever called. ABORTFUNC is called with an error code
245 (see enum above) when an inconsistency is detected. If ABORTFUNC is
246 null, the standard function prints on stderr and then calls `abort'. */
d0baac98 247extern int mcheck (void (*abortfunc) (enum mcheck_status));
74ad5c7f
KH
248
249/* Check for aberrations in a particular malloc'd block. You must have
250 called `mcheck' already. These are the same checks that `mcheck' does
251 when you free or reallocate a block. */
d0baac98 252extern enum mcheck_status mprobe (void *ptr);
74ad5c7f
KH
253
254/* Activate a standard collection of tracing hooks. */
d0baac98
PE
255extern void mtrace (void);
256extern void muntrace (void);
74ad5c7f
KH
257
258/* Statistics available to the user. */
259struct mstats
260 {
d0baac98
PE
261 size_t bytes_total; /* Total size of the heap. */
262 size_t chunks_used; /* Chunks allocated by the user. */
263 size_t bytes_used; /* Byte total of user-allocated chunks. */
264 size_t chunks_free; /* Chunks in the free list. */
265 size_t bytes_free; /* Byte total of chunks in the free list. */
74ad5c7f
KH
266 };
267
268/* Pick up the current statistics. */
d0baac98 269extern struct mstats mstats (void);
74ad5c7f
KH
270
271/* Call WARNFUN with a warning message when memory usage is high. */
d0baac98 272extern void memory_warnings (void *start, void (*warnfun) (const char *));
74ad5c7f
KH
273
274#ifdef __cplusplus
275}
276#endif
277
74ad5c7f
KH
278/* Memory allocator `malloc'.
279 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
280 Written May 1989 by Mike Haertel.
281
282This library is free software; you can redistribute it and/or
423a1f3c 283modify it under the terms of the GNU General Public License as
74ad5c7f
KH
284published by the Free Software Foundation; either version 2 of the
285License, or (at your option) any later version.
286
287This library is distributed in the hope that it will be useful,
288but WITHOUT ANY WARRANTY; without even the implied warranty of
289MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 290General Public License for more details.
74ad5c7f 291
423a1f3c 292You should have received a copy of the GNU General Public
fee0bd5f 293License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
294
295 The author may be reached (Email) at the address mike@ai.mit.edu,
296 or (US mail) as Mike Haertel c/o Free Software Foundation. */
297
74ad5c7f
KH
298#include <errno.h>
299
a4579d33
KB
300/* On Cygwin there are two heaps. temacs uses the static heap
301 (defined in sheap.c and managed with bss_sbrk), and the dumped
302 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
303 on Cygwin, it reinitializes malloc, and we save the old info for
304 use by free and realloc if they're called with a pointer into the
db76dd85
KB
305 static heap.
306
307 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
308 this is changed in the future, we'll have to similarly deal with
309 reinitializing ralloc. */
a4579d33 310#ifdef CYGWIN
d0baac98 311extern void *bss_sbrk (ptrdiff_t size);
ef6d1039 312extern int bss_sbrk_did_unexec;
a4579d33
KB
313char *bss_sbrk_heapbase; /* _heapbase for static heap */
314malloc_info *bss_sbrk_heapinfo; /* _heapinfo for static heap */
ef6d1039 315#endif
d0baac98 316void *(*__morecore) (ptrdiff_t size) = __default_morecore;
74ad5c7f
KH
317
318/* Debugging hook for `malloc'. */
d0baac98 319void *(*__malloc_hook) (size_t size);
74ad5c7f
KH
320
321/* Pointer to the base of the first block. */
322char *_heapbase;
323
324/* Block information table. Allocated with align/__free (not malloc/free). */
325malloc_info *_heapinfo;
326
327/* Number of info entries. */
d0baac98 328static size_t heapsize;
74ad5c7f
KH
329
330/* Search index in the info table. */
d0baac98 331size_t _heapindex;
74ad5c7f
KH
332
333/* Limit of valid info table indices. */
d0baac98 334size_t _heaplimit;
74ad5c7f
KH
335
336/* Free lists for each fragment size. */
337struct list _fraghead[BLOCKLOG];
338
339/* Instrumentation. */
d0baac98
PE
340size_t _chunks_used;
341size_t _bytes_used;
342size_t _chunks_free;
343size_t _bytes_free;
74ad5c7f
KH
344
345/* Are you experienced? */
346int __malloc_initialized;
347
d0baac98 348size_t __malloc_extra_blocks;
74ad5c7f 349
d0baac98
PE
350void (*__malloc_initialize_hook) (void);
351void (*__after_morecore_hook) (void);
74ad5c7f 352
5dcab13e
GM
353#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
354
355/* Some code for hunting a bug writing into _heapinfo.
356
357 Call this macro with argument PROT non-zero to protect internal
358 malloc state against writing to it, call it with a zero argument to
359 make it readable and writable.
360
361 Note that this only works if BLOCKSIZE == page size, which is
362 the case on the i386. */
363
364#include <sys/types.h>
365#include <sys/mman.h>
366
367static int state_protected_p;
d0baac98 368static size_t last_state_size;
5dcab13e
GM
369static malloc_info *last_heapinfo;
370
371void
d0baac98 372protect_malloc_state (int protect_p)
5dcab13e
GM
373{
374 /* If _heapinfo has been relocated, make sure its old location
375 isn't left read-only; it will be reused by malloc. */
376 if (_heapinfo != last_heapinfo
377 && last_heapinfo
378 && state_protected_p)
379 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
380
381 last_state_size = _heaplimit * sizeof *_heapinfo;
382 last_heapinfo = _heapinfo;
177c0ea7 383
5dcab13e
GM
384 if (protect_p != state_protected_p)
385 {
386 state_protected_p = protect_p;
387 if (mprotect (_heapinfo, last_state_size,
388 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
389 abort ();
390 }
391}
392
5e617bc2 393#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
5dcab13e
GM
394
395#else
396#define PROTECT_MALLOC_STATE(PROT) /* empty */
397#endif
398
74ad5c7f
KH
399
400/* Aligned allocation. */
d0baac98
PE
401static void *
402align (size_t size)
74ad5c7f 403{
d0baac98
PE
404 void *result;
405 ptrdiff_t adj;
74ad5c7f 406
ceeb3d7d 407 /* align accepts an unsigned argument, but __morecore accepts a
d0baac98
PE
408 signed one. This could lead to trouble if SIZE overflows the
409 ptrdiff_t type accepted by __morecore. We just punt in that
ceeb3d7d 410 case, since they are requesting a ludicrous amount anyway. */
d0baac98 411 if (PTRDIFF_MAX < size)
ceeb3d7d
EZ
412 result = 0;
413 else
414 result = (*__morecore) (size);
d0baac98 415 adj = (uintptr_t) result % BLOCKSIZE;
74ad5c7f
KH
416 if (adj != 0)
417 {
74ad5c7f 418 adj = BLOCKSIZE - adj;
d0baac98 419 (*__morecore) (adj);
74ad5c7f
KH
420 result = (char *) result + adj;
421 }
422
423 if (__after_morecore_hook)
424 (*__after_morecore_hook) ();
425
426 return result;
427}
428
429/* Get SIZE bytes, if we can get them starting at END.
430 Return the address of the space we got.
431 If we cannot get space at END, fail and return 0. */
d0baac98
PE
432static void *
433get_contiguous_space (ptrdiff_t size, void *position)
74ad5c7f 434{
d0baac98
PE
435 void *before;
436 void *after;
74ad5c7f
KH
437
438 before = (*__morecore) (0);
439 /* If we can tell in advance that the break is at the wrong place,
440 fail now. */
441 if (before != position)
442 return 0;
443
444 /* Allocate SIZE bytes and get the address of them. */
445 after = (*__morecore) (size);
446 if (!after)
447 return 0;
448
449 /* It was not contiguous--reject it. */
450 if (after != position)
451 {
452 (*__morecore) (- size);
453 return 0;
454 }
455
456 return after;
457}
458
459
460/* This is called when `_heapinfo' and `heapsize' have just
461 been set to describe a new info table. Set up the table
462 to describe itself and account for it in the statistics. */
b0ab8123 463static void
55d4c1b2 464register_heapinfo (void)
74ad5c7f 465{
d0baac98 466 size_t block, blocks;
74ad5c7f
KH
467
468 block = BLOCK (_heapinfo);
469 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
470
471 /* Account for the _heapinfo block itself in the statistics. */
472 _bytes_used += blocks * BLOCKSIZE;
473 ++_chunks_used;
474
475 /* Describe the heapinfo block itself in the heapinfo. */
476 _heapinfo[block].busy.type = 0;
477 _heapinfo[block].busy.info.size = blocks;
478 /* Leave back-pointers for malloc_find_address. */
479 while (--blocks > 0)
480 _heapinfo[block + blocks].busy.info.size = -blocks;
481}
482
2f213514 483#ifdef USE_PTHREAD
8d0d84d2
YM
484pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
485pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
3ceeb306
YM
486int _malloc_thread_enabled_p;
487
488static void
d0baac98 489malloc_atfork_handler_prepare (void)
3ceeb306
YM
490{
491 LOCK ();
492 LOCK_ALIGNED_BLOCKS ();
493}
494
495static void
d0baac98 496malloc_atfork_handler_parent (void)
3ceeb306
YM
497{
498 UNLOCK_ALIGNED_BLOCKS ();
499 UNLOCK ();
500}
501
502static void
d0baac98 503malloc_atfork_handler_child (void)
3ceeb306
YM
504{
505 UNLOCK_ALIGNED_BLOCKS ();
506 UNLOCK ();
507}
508
509/* Set up mutexes and make malloc etc. thread-safe. */
510void
d0baac98 511malloc_enable_thread (void)
3ceeb306
YM
512{
513 if (_malloc_thread_enabled_p)
514 return;
515
516 /* Some pthread implementations call malloc for statically
517 initialized mutexes when they are used first. To avoid such a
518 situation, we initialize mutexes here while their use is
519 disabled in malloc etc. */
520 pthread_mutex_init (&_malloc_mutex, NULL);
521 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
522 pthread_atfork (malloc_atfork_handler_prepare,
523 malloc_atfork_handler_parent,
524 malloc_atfork_handler_child);
525 _malloc_thread_enabled_p = 1;
526}
2f213514 527#endif
74ad5c7f 528
2f213514 529static void
d0baac98 530malloc_initialize_1 (void)
2f213514 531{
a3ba27da
GM
532#ifdef GC_MCHECK
533 mcheck (NULL);
534#endif
535
a4579d33
KB
536#ifdef CYGWIN
537 if (bss_sbrk_did_unexec)
538 /* we're reinitializing the dumped emacs */
539 {
540 bss_sbrk_heapbase = _heapbase;
541 bss_sbrk_heapinfo = _heapinfo;
542 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
543 }
544#endif
545
74ad5c7f
KH
546 if (__malloc_initialize_hook)
547 (*__malloc_initialize_hook) ();
548
549 heapsize = HEAP / BLOCKSIZE;
d0baac98 550 _heapinfo = align (heapsize * sizeof (malloc_info));
74ad5c7f 551 if (_heapinfo == NULL)
2f213514 552 return;
74ad5c7f
KH
553 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
554 _heapinfo[0].free.size = 0;
555 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
556 _heapindex = 0;
557 _heapbase = (char *) _heapinfo;
558 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
559
560 register_heapinfo ();
561
562 __malloc_initialized = 1;
5dcab13e 563 PROTECT_MALLOC_STATE (1);
2f213514
YM
564 return;
565}
566
784c1472
JD
567/* Set everything up and remember that we have.
568 main will call malloc which calls this function. That is before any threads
569 or signal handlers has been set up, so we don't need thread protection. */
2f213514 570int
d0baac98 571__malloc_initialize (void)
2f213514 572{
2f213514
YM
573 if (__malloc_initialized)
574 return 0;
575
576 malloc_initialize_1 ();
2f213514
YM
577
578 return __malloc_initialized;
74ad5c7f
KH
579}
580
581static int morecore_recursing;
582
583/* Get neatly aligned memory, initializing or
584 growing the heap info table as necessary. */
d0baac98
PE
585static void *
586morecore_nolock (size_t size)
74ad5c7f 587{
d0baac98 588 void *result;
74ad5c7f 589 malloc_info *newinfo, *oldinfo;
d0baac98 590 size_t newsize;
74ad5c7f
KH
591
592 if (morecore_recursing)
593 /* Avoid recursion. The caller will know how to handle a null return. */
594 return NULL;
595
596 result = align (size);
597 if (result == NULL)
598 return NULL;
599
5dcab13e
GM
600 PROTECT_MALLOC_STATE (0);
601
74ad5c7f 602 /* Check if we need to grow the info table. */
d0baac98 603 if ((size_t) BLOCK ((char *) result + size) > heapsize)
74ad5c7f
KH
604 {
605 /* Calculate the new _heapinfo table size. We do not account for the
606 added blocks in the table itself, as we hope to place them in
607 existing free space, which is already covered by part of the
608 existing table. */
609 newsize = heapsize;
610 do
611 newsize *= 2;
d0baac98 612 while ((size_t) BLOCK ((char *) result + size) > newsize);
74ad5c7f
KH
613
614 /* We must not reuse existing core for the new info table when called
615 from realloc in the case of growing a large block, because the
616 block being grown is momentarily marked as free. In this case
617 _heaplimit is zero so we know not to reuse space for internal
618 allocation. */
619 if (_heaplimit != 0)
620 {
621 /* First try to allocate the new info table in core we already
622 have, in the usual way using realloc. If realloc cannot
623 extend it in place or relocate it to existing sufficient core,
624 we will get called again, and the code above will notice the
625 `morecore_recursing' flag and return null. */
626 int save = errno; /* Don't want to clobber errno with ENOMEM. */
627 morecore_recursing = 1;
d0baac98
PE
628 newinfo = _realloc_internal_nolock (_heapinfo,
629 newsize * sizeof (malloc_info));
74ad5c7f
KH
630 morecore_recursing = 0;
631 if (newinfo == NULL)
632 errno = save;
633 else
634 {
635 /* We found some space in core, and realloc has put the old
636 table's blocks on the free list. Now zero the new part
637 of the table and install the new table location. */
638 memset (&newinfo[heapsize], 0,
639 (newsize - heapsize) * sizeof (malloc_info));
640 _heapinfo = newinfo;
641 heapsize = newsize;
642 goto got_heap;
643 }
644 }
645
646 /* Allocate new space for the malloc info table. */
647 while (1)
648 {
d0baac98 649 newinfo = align (newsize * sizeof (malloc_info));
74ad5c7f
KH
650
651 /* Did it fail? */
652 if (newinfo == NULL)
653 {
654 (*__morecore) (-size);
655 return NULL;
656 }
657
658 /* Is it big enough to record status for its own space?
659 If so, we win. */
d0baac98
PE
660 if ((size_t) BLOCK ((char *) newinfo
661 + newsize * sizeof (malloc_info))
74ad5c7f
KH
662 < newsize)
663 break;
664
665 /* Must try again. First give back most of what we just got. */
666 (*__morecore) (- newsize * sizeof (malloc_info));
667 newsize *= 2;
668 }
669
670 /* Copy the old table to the beginning of the new,
671 and zero the rest of the new table. */
672 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
673 memset (&newinfo[heapsize], 0,
674 (newsize - heapsize) * sizeof (malloc_info));
675 oldinfo = _heapinfo;
676 _heapinfo = newinfo;
677 heapsize = newsize;
678
679 register_heapinfo ();
680
681 /* Reset _heaplimit so _free_internal never decides
682 it can relocate or resize the info table. */
683 _heaplimit = 0;
8d0d84d2 684 _free_internal_nolock (oldinfo);
5dcab13e 685 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
686
687 /* The new heap limit includes the new table just allocated. */
688 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
689 return result;
690 }
691
692 got_heap:
693 _heaplimit = BLOCK ((char *) result + size);
694 return result;
695}
696
697/* Allocate memory from the heap. */
d0baac98
PE
698void *
699_malloc_internal_nolock (size_t size)
74ad5c7f 700{
d0baac98
PE
701 void *result;
702 size_t block, blocks, lastblocks, start;
703 register size_t i;
74ad5c7f
KH
704 struct list *next;
705
706 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
707 valid address you can realloc and free (though not dereference).
708
709 It turns out that some extant code (sunrpc, at least Ultrix's version)
710 expects `malloc (0)' to return non-NULL and breaks otherwise.
711 Be compatible. */
712
713#if 0
714 if (size == 0)
715 return NULL;
716#endif
717
5dcab13e
GM
718 PROTECT_MALLOC_STATE (0);
719
74ad5c7f
KH
720 if (size < sizeof (struct list))
721 size = sizeof (struct list);
722
74ad5c7f
KH
723 /* Determine the allocation policy based on the request size. */
724 if (size <= BLOCKSIZE / 2)
725 {
726 /* Small allocation to receive a fragment of a block.
727 Determine the logarithm to base two of the fragment size. */
d0baac98 728 register size_t log = 1;
74ad5c7f
KH
729 --size;
730 while ((size /= 2) != 0)
731 ++log;
732
733 /* Look in the fragment lists for a
734 free fragment of the desired size. */
735 next = _fraghead[log].next;
736 if (next != NULL)
737 {
738 /* There are free fragments of this size.
739 Pop a fragment out of the fragment list and return it.
740 Update the block's nfree and first counters. */
d0baac98 741 result = next;
74ad5c7f
KH
742 next->prev->next = next->next;
743 if (next->next != NULL)
744 next->next->prev = next->prev;
745 block = BLOCK (result);
746 if (--_heapinfo[block].busy.info.frag.nfree != 0)
d0baac98
PE
747 _heapinfo[block].busy.info.frag.first =
748 (uintptr_t) next->next % BLOCKSIZE >> log;
74ad5c7f
KH
749
750 /* Update the statistics. */
751 ++_chunks_used;
752 _bytes_used += 1 << log;
753 --_chunks_free;
754 _bytes_free -= 1 << log;
755 }
756 else
757 {
758 /* No free fragments of the desired size, so get a new block
759 and break it into fragments, returning the first. */
8094989b 760#ifdef GC_MALLOC_CHECK
8d0d84d2 761 result = _malloc_internal_nolock (BLOCKSIZE);
5dcab13e 762 PROTECT_MALLOC_STATE (0);
8d0d84d2
YM
763#elif defined (USE_PTHREAD)
764 result = _malloc_internal_nolock (BLOCKSIZE);
8094989b 765#else
74ad5c7f 766 result = malloc (BLOCKSIZE);
8094989b 767#endif
74ad5c7f 768 if (result == NULL)
5dcab13e
GM
769 {
770 PROTECT_MALLOC_STATE (1);
2f213514 771 goto out;
5dcab13e 772 }
74ad5c7f
KH
773
774 /* Link all fragments but the first into the free list. */
775 next = (struct list *) ((char *) result + (1 << log));
776 next->next = NULL;
777 next->prev = &_fraghead[log];
778 _fraghead[log].next = next;
779
d0baac98 780 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
74ad5c7f
KH
781 {
782 next = (struct list *) ((char *) result + (i << log));
783 next->next = _fraghead[log].next;
784 next->prev = &_fraghead[log];
785 next->prev->next = next;
786 next->next->prev = next;
787 }
788
789 /* Initialize the nfree and first counters for this block. */
790 block = BLOCK (result);
791 _heapinfo[block].busy.type = log;
792 _heapinfo[block].busy.info.frag.nfree = i - 1;
793 _heapinfo[block].busy.info.frag.first = i - 1;
794
795 _chunks_free += (BLOCKSIZE >> log) - 1;
796 _bytes_free += BLOCKSIZE - (1 << log);
797 _bytes_used -= BLOCKSIZE - (1 << log);
798 }
799 }
800 else
801 {
802 /* Large allocation to receive one or more blocks.
803 Search the free list in a circle starting at the last place visited.
804 If we loop completely around without finding a large enough
805 space we will have to get more memory from the system. */
806 blocks = BLOCKIFY (size);
807 start = block = _heapindex;
808 while (_heapinfo[block].free.size < blocks)
809 {
810 block = _heapinfo[block].free.next;
811 if (block == start)
812 {
813 /* Need to get more from the system. Get a little extra. */
d0baac98 814 size_t wantblocks = blocks + __malloc_extra_blocks;
74ad5c7f
KH
815 block = _heapinfo[0].free.prev;
816 lastblocks = _heapinfo[block].free.size;
817 /* Check to see if the new core will be contiguous with the
818 final free block; if so we don't need to get as much. */
819 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
820 /* We can't do this if we will have to make the heap info
cc4a96c6 821 table bigger to accommodate the new space. */
74ad5c7f
KH
822 block + wantblocks <= heapsize &&
823 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
824 ADDRESS (block + lastblocks)))
825 {
826 /* We got it contiguously. Which block we are extending
827 (the `final free block' referred to above) might have
828 changed, if it got combined with a freed info table. */
829 block = _heapinfo[0].free.prev;
830 _heapinfo[block].free.size += (wantblocks - lastblocks);
831 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
832 _heaplimit += wantblocks - lastblocks;
833 continue;
834 }
8d0d84d2 835 result = morecore_nolock (wantblocks * BLOCKSIZE);
74ad5c7f 836 if (result == NULL)
2f213514 837 goto out;
74ad5c7f
KH
838 block = BLOCK (result);
839 /* Put the new block at the end of the free list. */
840 _heapinfo[block].free.size = wantblocks;
841 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
842 _heapinfo[block].free.next = 0;
843 _heapinfo[0].free.prev = block;
844 _heapinfo[_heapinfo[block].free.prev].free.next = block;
845 ++_chunks_free;
846 /* Now loop to use some of that block for this allocation. */
847 }
848 }
849
850 /* At this point we have found a suitable free list entry.
851 Figure out how to remove what we need from the list. */
852 result = ADDRESS (block);
853 if (_heapinfo[block].free.size > blocks)
854 {
855 /* The block we found has a bit left over,
856 so relink the tail end back into the free list. */
857 _heapinfo[block + blocks].free.size
858 = _heapinfo[block].free.size - blocks;
859 _heapinfo[block + blocks].free.next
860 = _heapinfo[block].free.next;
861 _heapinfo[block + blocks].free.prev
862 = _heapinfo[block].free.prev;
863 _heapinfo[_heapinfo[block].free.prev].free.next
864 = _heapinfo[_heapinfo[block].free.next].free.prev
865 = _heapindex = block + blocks;
866 }
867 else
868 {
869 /* The block exactly matches our requirements,
870 so just remove it from the list. */
871 _heapinfo[_heapinfo[block].free.next].free.prev
872 = _heapinfo[block].free.prev;
873 _heapinfo[_heapinfo[block].free.prev].free.next
874 = _heapindex = _heapinfo[block].free.next;
875 --_chunks_free;
876 }
877
878 _heapinfo[block].busy.type = 0;
879 _heapinfo[block].busy.info.size = blocks;
880 ++_chunks_used;
881 _bytes_used += blocks * BLOCKSIZE;
882 _bytes_free -= blocks * BLOCKSIZE;
883
884 /* Mark all the blocks of the object just allocated except for the
885 first with a negative number so you can find the first block by
886 adding that adjustment. */
887 while (--blocks > 0)
888 _heapinfo[block + blocks].busy.info.size = -blocks;
889 }
890
5dcab13e 891 PROTECT_MALLOC_STATE (1);
2f213514 892 out:
8d0d84d2
YM
893 return result;
894}
895
d0baac98
PE
896void *
897_malloc_internal (size_t size)
8d0d84d2 898{
d0baac98 899 void *result;
8d0d84d2
YM
900
901 LOCK ();
902 result = _malloc_internal_nolock (size);
2f213514 903 UNLOCK ();
8d0d84d2 904
74ad5c7f
KH
905 return result;
906}
907
d0baac98
PE
908void *
909malloc (size_t size)
74ad5c7f 910{
d0baac98 911 void *(*hook) (size_t);
8d0d84d2 912
74ad5c7f
KH
913 if (!__malloc_initialized && !__malloc_initialize ())
914 return NULL;
915
8d0d84d2
YM
916 /* Copy the value of __malloc_hook to an automatic variable in case
917 __malloc_hook is modified in another thread between its
918 NULL-check and the use.
919
920 Note: Strictly speaking, this is not a right solution. We should
921 use mutexes to access non-read-only variables that are shared
922 among multiple threads. We just leave it for compatibility with
923 glibc malloc (i.e., assignments to __malloc_hook) for now. */
924 hook = __malloc_hook;
925 return (hook != NULL ? *hook : _malloc_internal) (size);
74ad5c7f
KH
926}
927\f
928#ifndef _LIBC
929
930/* On some ANSI C systems, some libc functions call _malloc, _free
931 and _realloc. Make them use the GNU functions. */
932
d0baac98
PE
933extern void *_malloc (size_t);
934extern void _free (void *);
935extern void *_realloc (void *, size_t);
936
937void *
938_malloc (size_t size)
74ad5c7f
KH
939{
940 return malloc (size);
941}
942
943void
d0baac98 944_free (void *ptr)
74ad5c7f
KH
945{
946 free (ptr);
947}
948
d0baac98
PE
949void *
950_realloc (void *ptr, size_t size)
74ad5c7f
KH
951{
952 return realloc (ptr, size);
953}
954
955#endif
956/* Free a block of memory allocated by `malloc'.
957 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
958 Written May 1989 by Mike Haertel.
959
960This library is free software; you can redistribute it and/or
423a1f3c 961modify it under the terms of the GNU General Public License as
74ad5c7f
KH
962published by the Free Software Foundation; either version 2 of the
963License, or (at your option) any later version.
964
965This library is distributed in the hope that it will be useful,
966but WITHOUT ANY WARRANTY; without even the implied warranty of
967MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 968General Public License for more details.
74ad5c7f 969
423a1f3c 970You should have received a copy of the GNU General Public
fee0bd5f 971License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
972
973 The author may be reached (Email) at the address mike@ai.mit.edu,
974 or (US mail) as Mike Haertel c/o Free Software Foundation. */
975
74ad5c7f 976
74ad5c7f 977/* Debugging hook for free. */
d0baac98 978void (*__free_hook) (void *__ptr);
74ad5c7f
KH
979
980/* List of blocks allocated by memalign. */
981struct alignlist *_aligned_blocks = NULL;
982
983/* Return memory to the heap.
8d0d84d2 984 Like `_free_internal' but don't lock mutex. */
74ad5c7f 985void
d0baac98 986_free_internal_nolock (void *ptr)
74ad5c7f
KH
987{
988 int type;
d0baac98
PE
989 size_t block, blocks;
990 register size_t i;
74ad5c7f 991 struct list *prev, *next;
d0baac98
PE
992 void *curbrk;
993 const size_t lesscore_threshold
74ad5c7f
KH
994 /* Threshold of free space at which we will return some to the system. */
995 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
996
997 register struct alignlist *l;
998
999 if (ptr == NULL)
1000 return;
1001
a4579d33 1002#ifdef CYGWIN
1b170bc6 1003 if ((char *) ptr < _heapbase)
a4579d33
KB
1004 /* We're being asked to free something in the static heap. */
1005 return;
1006#endif
1007
5dcab13e 1008 PROTECT_MALLOC_STATE (0);
177c0ea7 1009
8d0d84d2 1010 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1011 for (l = _aligned_blocks; l != NULL; l = l->next)
1012 if (l->aligned == ptr)
1013 {
1014 l->aligned = NULL; /* Mark the slot in the list as free. */
1015 ptr = l->exact;
1016 break;
1017 }
8d0d84d2 1018 UNLOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1019
1020 block = BLOCK (ptr);
1021
1022 type = _heapinfo[block].busy.type;
1023 switch (type)
1024 {
1025 case 0:
1026 /* Get as many statistics as early as we can. */
1027 --_chunks_used;
1028 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1029 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1030
1031 /* Find the free cluster previous to this one in the free list.
1032 Start searching at the last block referenced; this may benefit
1033 programs with locality of allocation. */
1034 i = _heapindex;
1035 if (i > block)
1036 while (i > block)
1037 i = _heapinfo[i].free.prev;
1038 else
1039 {
1040 do
1041 i = _heapinfo[i].free.next;
1042 while (i > 0 && i < block);
1043 i = _heapinfo[i].free.prev;
1044 }
1045
1046 /* Determine how to link this block into the free list. */
1047 if (block == i + _heapinfo[i].free.size)
1048 {
1049 /* Coalesce this block with its predecessor. */
1050 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1051 block = i;
1052 }
1053 else
1054 {
1055 /* Really link this block back into the free list. */
1056 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1057 _heapinfo[block].free.next = _heapinfo[i].free.next;
1058 _heapinfo[block].free.prev = i;
1059 _heapinfo[i].free.next = block;
1060 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1061 ++_chunks_free;
1062 }
1063
1064 /* Now that the block is linked in, see if we can coalesce it
1065 with its successor (by deleting its successor from the list
1066 and adding in its size). */
1067 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1068 {
1069 _heapinfo[block].free.size
1070 += _heapinfo[_heapinfo[block].free.next].free.size;
1071 _heapinfo[block].free.next
1072 = _heapinfo[_heapinfo[block].free.next].free.next;
1073 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1074 --_chunks_free;
1075 }
1076
1077 /* How many trailing free blocks are there now? */
1078 blocks = _heapinfo[block].free.size;
1079
1080 /* Where is the current end of accessible core? */
1081 curbrk = (*__morecore) (0);
1082
1083 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1084 {
1085 /* The end of the malloc heap is at the end of accessible core.
1086 It's possible that moving _heapinfo will allow us to
1087 return some space to the system. */
1088
d0baac98
PE
1089 size_t info_block = BLOCK (_heapinfo);
1090 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1091 size_t prev_block = _heapinfo[block].free.prev;
1092 size_t prev_blocks = _heapinfo[prev_block].free.size;
1093 size_t next_block = _heapinfo[block].free.next;
1094 size_t next_blocks = _heapinfo[next_block].free.size;
74ad5c7f
KH
1095
1096 if (/* Win if this block being freed is last in core, the info table
1097 is just before it, the previous free block is just before the
1098 info table, and the two free blocks together form a useful
1099 amount to return to the system. */
1100 (block + blocks == _heaplimit &&
1101 info_block + info_blocks == block &&
1102 prev_block != 0 && prev_block + prev_blocks == info_block &&
1103 blocks + prev_blocks >= lesscore_threshold) ||
1104 /* Nope, not the case. We can also win if this block being
1105 freed is just before the info table, and the table extends
1106 to the end of core or is followed only by a free block,
1107 and the total free space is worth returning to the system. */
1108 (block + blocks == info_block &&
1109 ((info_block + info_blocks == _heaplimit &&
1110 blocks >= lesscore_threshold) ||
1111 (info_block + info_blocks == next_block &&
1112 next_block + next_blocks == _heaplimit &&
1113 blocks + next_blocks >= lesscore_threshold)))
1114 )
1115 {
1116 malloc_info *newinfo;
d0baac98 1117 size_t oldlimit = _heaplimit;
74ad5c7f
KH
1118
1119 /* Free the old info table, clearing _heaplimit to avoid
1120 recursion into this code. We don't want to return the
1121 table's blocks to the system before we have copied them to
1122 the new location. */
1123 _heaplimit = 0;
8d0d84d2 1124 _free_internal_nolock (_heapinfo);
74ad5c7f
KH
1125 _heaplimit = oldlimit;
1126
1127 /* Tell malloc to search from the beginning of the heap for
1128 free blocks, so it doesn't reuse the ones just freed. */
1129 _heapindex = 0;
1130
1131 /* Allocate new space for the info table and move its data. */
d0baac98 1132 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
5dcab13e 1133 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1134 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1135 _heapinfo = newinfo;
1136
1137 /* We should now have coalesced the free block with the
1138 blocks freed from the old info table. Examine the entire
1139 trailing free block to decide below whether to return some
1140 to the system. */
1141 block = _heapinfo[0].free.prev;
1142 blocks = _heapinfo[block].free.size;
1143 }
1144
1145 /* Now see if we can return stuff to the system. */
1146 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1147 {
d0baac98 1148 register size_t bytes = blocks * BLOCKSIZE;
74ad5c7f
KH
1149 _heaplimit -= blocks;
1150 (*__morecore) (-bytes);
1151 _heapinfo[_heapinfo[block].free.prev].free.next
1152 = _heapinfo[block].free.next;
1153 _heapinfo[_heapinfo[block].free.next].free.prev
1154 = _heapinfo[block].free.prev;
1155 block = _heapinfo[block].free.prev;
1156 --_chunks_free;
1157 _bytes_free -= bytes;
1158 }
1159 }
1160
1161 /* Set the next search to begin at this block. */
1162 _heapindex = block;
1163 break;
1164
1165 default:
1166 /* Do some of the statistics. */
1167 --_chunks_used;
1168 _bytes_used -= 1 << type;
1169 ++_chunks_free;
1170 _bytes_free += 1 << type;
1171
1172 /* Get the address of the first free fragment in this block. */
1173 prev = (struct list *) ((char *) ADDRESS (block) +
1174 (_heapinfo[block].busy.info.frag.first << type));
1175
1176 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1177 {
1178 /* If all fragments of this block are free, remove them
1179 from the fragment list and free the whole block. */
1180 next = prev;
d0baac98 1181 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
74ad5c7f
KH
1182 next = next->next;
1183 prev->prev->next = next;
1184 if (next != NULL)
1185 next->prev = prev->prev;
1186 _heapinfo[block].busy.type = 0;
1187 _heapinfo[block].busy.info.size = 1;
1188
1189 /* Keep the statistics accurate. */
1190 ++_chunks_used;
1191 _bytes_used += BLOCKSIZE;
1192 _chunks_free -= BLOCKSIZE >> type;
1193 _bytes_free -= BLOCKSIZE;
1194
8d0d84d2
YM
1195#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1196 _free_internal_nolock (ADDRESS (block));
8094989b 1197#else
74ad5c7f 1198 free (ADDRESS (block));
8094989b 1199#endif
74ad5c7f
KH
1200 }
1201 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1202 {
1203 /* If some fragments of this block are free, link this
1204 fragment into the fragment list after the first free
1205 fragment of this block. */
d0baac98 1206 next = ptr;
74ad5c7f
KH
1207 next->next = prev->next;
1208 next->prev = prev;
1209 prev->next = next;
1210 if (next->next != NULL)
1211 next->next->prev = next;
1212 ++_heapinfo[block].busy.info.frag.nfree;
1213 }
1214 else
1215 {
1216 /* No fragments of this block are free, so link this
1217 fragment into the fragment list and announce that
1218 it is the first free fragment of this block. */
d0baac98 1219 prev = ptr;
74ad5c7f 1220 _heapinfo[block].busy.info.frag.nfree = 1;
d0baac98
PE
1221 _heapinfo[block].busy.info.frag.first =
1222 (uintptr_t) ptr % BLOCKSIZE >> type;
74ad5c7f
KH
1223 prev->next = _fraghead[type].next;
1224 prev->prev = &_fraghead[type];
1225 prev->prev->next = prev;
1226 if (prev->next != NULL)
1227 prev->next->prev = prev;
1228 }
1229 break;
1230 }
177c0ea7 1231
5dcab13e 1232 PROTECT_MALLOC_STATE (1);
8d0d84d2
YM
1233}
1234
1235/* Return memory to the heap.
1236 Like `free' but don't call a __free_hook if there is one. */
1237void
d0baac98 1238_free_internal (void *ptr)
8d0d84d2
YM
1239{
1240 LOCK ();
1241 _free_internal_nolock (ptr);
2f213514 1242 UNLOCK ();
74ad5c7f
KH
1243}
1244
1245/* Return memory to the heap. */
ca9c0567 1246
4624371d 1247void
d0baac98 1248free (void *ptr)
74ad5c7f 1249{
d0baac98 1250 void (*hook) (void *) = __free_hook;
8d0d84d2
YM
1251
1252 if (hook != NULL)
1253 (*hook) (ptr);
74ad5c7f
KH
1254 else
1255 _free_internal (ptr);
1256}
1257
1258/* Define the `cfree' alias for `free'. */
1259#ifdef weak_alias
1260weak_alias (free, cfree)
1261#else
1262void
d0baac98 1263cfree (void *ptr)
74ad5c7f
KH
1264{
1265 free (ptr);
1266}
1267#endif
1268/* Change the size of a block allocated by `malloc'.
1269 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1270 Written May 1989 by Mike Haertel.
1271
1272This library is free software; you can redistribute it and/or
423a1f3c 1273modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1274published by the Free Software Foundation; either version 2 of the
1275License, or (at your option) any later version.
1276
1277This library is distributed in the hope that it will be useful,
1278but WITHOUT ANY WARRANTY; without even the implied warranty of
1279MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1280General Public License for more details.
74ad5c7f 1281
423a1f3c 1282You should have received a copy of the GNU General Public
fee0bd5f 1283License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
1284
1285 The author may be reached (Email) at the address mike@ai.mit.edu,
1286 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1287
62aba0d4 1288#ifndef min
74ad5c7f 1289#define min(A, B) ((A) < (B) ? (A) : (B))
62aba0d4 1290#endif
74ad5c7f 1291
a4579d33
KB
1292/* On Cygwin the dumped emacs may try to realloc storage allocated in
1293 the static heap. We just malloc space in the new heap and copy the
1294 data. */
1295#ifdef CYGWIN
d0baac98
PE
1296void *
1297special_realloc (void *ptr, size_t size)
a4579d33 1298{
d0baac98 1299 void *result;
a4579d33 1300 int type;
d0baac98 1301 size_t block, oldsize;
a4579d33
KB
1302
1303 block = ((char *) ptr - bss_sbrk_heapbase) / BLOCKSIZE + 1;
1304 type = bss_sbrk_heapinfo[block].busy.type;
1305 oldsize =
1306 type == 0 ? bss_sbrk_heapinfo[block].busy.info.size * BLOCKSIZE
d0baac98 1307 : (size_t) 1 << type;
a4579d33
KB
1308 result = _malloc_internal_nolock (size);
1309 if (result != NULL)
1310 memcpy (result, ptr, min (oldsize, size));
1311 return result;
1312}
1313#endif
1314
74ad5c7f 1315/* Debugging hook for realloc. */
d0baac98 1316void *(*__realloc_hook) (void *ptr, size_t size);
74ad5c7f
KH
1317
1318/* Resize the given region to the new size, returning a pointer
1319 to the (possibly moved) region. This is optimized for speed;
1320 some benchmarks seem to indicate that greater compactness is
1321 achieved by unconditionally allocating and copying to a
1322 new region. This module has incestuous knowledge of the
1323 internals of both free and malloc. */
d0baac98
PE
1324void *
1325_realloc_internal_nolock (void *ptr, size_t size)
74ad5c7f 1326{
d0baac98 1327 void *result;
74ad5c7f 1328 int type;
d0baac98 1329 size_t block, blocks, oldlimit;
74ad5c7f
KH
1330
1331 if (size == 0)
1332 {
8d0d84d2
YM
1333 _free_internal_nolock (ptr);
1334 return _malloc_internal_nolock (0);
74ad5c7f
KH
1335 }
1336 else if (ptr == NULL)
8d0d84d2 1337 return _malloc_internal_nolock (size);
74ad5c7f 1338
a4579d33 1339#ifdef CYGWIN
1b170bc6 1340 if ((char *) ptr < _heapbase)
a4579d33
KB
1341 /* ptr points into the static heap */
1342 return special_realloc (ptr, size);
1343#endif
1344
74ad5c7f
KH
1345 block = BLOCK (ptr);
1346
5dcab13e 1347 PROTECT_MALLOC_STATE (0);
177c0ea7 1348
74ad5c7f
KH
1349 type = _heapinfo[block].busy.type;
1350 switch (type)
1351 {
1352 case 0:
1353 /* Maybe reallocate a large block to a small fragment. */
1354 if (size <= BLOCKSIZE / 2)
1355 {
8d0d84d2 1356 result = _malloc_internal_nolock (size);
74ad5c7f
KH
1357 if (result != NULL)
1358 {
1359 memcpy (result, ptr, size);
8d0d84d2 1360 _free_internal_nolock (ptr);
2f213514 1361 goto out;
74ad5c7f
KH
1362 }
1363 }
1364
1365 /* The new size is a large allocation as well;
1366 see if we can hold it in place. */
1367 blocks = BLOCKIFY (size);
1368 if (blocks < _heapinfo[block].busy.info.size)
1369 {
1370 /* The new size is smaller; return
1371 excess memory to the free list. */
1372 _heapinfo[block + blocks].busy.type = 0;
1373 _heapinfo[block + blocks].busy.info.size
1374 = _heapinfo[block].busy.info.size - blocks;
1375 _heapinfo[block].busy.info.size = blocks;
1376 /* We have just created a new chunk by splitting a chunk in two.
1377 Now we will free this chunk; increment the statistics counter
1378 so it doesn't become wrong when _free_internal decrements it. */
1379 ++_chunks_used;
8d0d84d2 1380 _free_internal_nolock (ADDRESS (block + blocks));
74ad5c7f
KH
1381 result = ptr;
1382 }
1383 else if (blocks == _heapinfo[block].busy.info.size)
1384 /* No size change necessary. */
1385 result = ptr;
1386 else
1387 {
1388 /* Won't fit, so allocate a new region that will.
1389 Free the old region first in case there is sufficient
1390 adjacent free space to grow without moving. */
1391 blocks = _heapinfo[block].busy.info.size;
1392 /* Prevent free from actually returning memory to the system. */
1393 oldlimit = _heaplimit;
1394 _heaplimit = 0;
8d0d84d2
YM
1395 _free_internal_nolock (ptr);
1396 result = _malloc_internal_nolock (size);
5dcab13e 1397 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1398 if (_heaplimit == 0)
1399 _heaplimit = oldlimit;
1400 if (result == NULL)
1401 {
1402 /* Now we're really in trouble. We have to unfree
1403 the thing we just freed. Unfortunately it might
1404 have been coalesced with its neighbors. */
1405 if (_heapindex == block)
8d0d84d2 1406 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
74ad5c7f
KH
1407 else
1408 {
d0baac98 1409 void *previous
8d0d84d2
YM
1410 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1411 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1412 _free_internal_nolock (previous);
74ad5c7f 1413 }
2f213514 1414 goto out;
74ad5c7f
KH
1415 }
1416 if (ptr != result)
1417 memmove (result, ptr, blocks * BLOCKSIZE);
1418 }
1419 break;
1420
1421 default:
1422 /* Old size is a fragment; type is logarithm
1423 to base two of the fragment size. */
d0baac98
PE
1424 if (size > (size_t) (1 << (type - 1)) &&
1425 size <= (size_t) (1 << type))
74ad5c7f
KH
1426 /* The new size is the same kind of fragment. */
1427 result = ptr;
1428 else
1429 {
1430 /* The new size is different; allocate a new space,
1431 and copy the lesser of the new size and the old. */
8d0d84d2 1432 result = _malloc_internal_nolock (size);
74ad5c7f 1433 if (result == NULL)
2f213514 1434 goto out;
d0baac98 1435 memcpy (result, ptr, min (size, (size_t) 1 << type));
8d0d84d2 1436 _free_internal_nolock (ptr);
74ad5c7f
KH
1437 }
1438 break;
1439 }
1440
5dcab13e 1441 PROTECT_MALLOC_STATE (1);
2f213514 1442 out:
8d0d84d2
YM
1443 return result;
1444}
1445
d0baac98
PE
1446void *
1447_realloc_internal (void *ptr, size_t size)
8d0d84d2 1448{
d0baac98 1449 void *result;
8d0d84d2 1450
5e617bc2 1451 LOCK ();
8d0d84d2 1452 result = _realloc_internal_nolock (ptr, size);
2f213514 1453 UNLOCK ();
8d0d84d2 1454
74ad5c7f
KH
1455 return result;
1456}
1457
d0baac98
PE
1458void *
1459realloc (void *ptr, size_t size)
74ad5c7f 1460{
d0baac98 1461 void *(*hook) (void *, size_t);
8d0d84d2 1462
74ad5c7f
KH
1463 if (!__malloc_initialized && !__malloc_initialize ())
1464 return NULL;
1465
8d0d84d2
YM
1466 hook = __realloc_hook;
1467 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
74ad5c7f
KH
1468}
1469/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1470
1471This library is free software; you can redistribute it and/or
423a1f3c 1472modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1473published by the Free Software Foundation; either version 2 of the
1474License, or (at your option) any later version.
1475
1476This library is distributed in the hope that it will be useful,
1477but WITHOUT ANY WARRANTY; without even the implied warranty of
1478MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1479General Public License for more details.
74ad5c7f 1480
423a1f3c 1481You should have received a copy of the GNU General Public
fee0bd5f 1482License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
1483
1484 The author may be reached (Email) at the address mike@ai.mit.edu,
1485 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1486
74ad5c7f
KH
1487/* Allocate an array of NMEMB elements each SIZE bytes long.
1488 The entire array is initialized to zeros. */
d0baac98
PE
1489void *
1490calloc (register size_t nmemb, register size_t size)
74ad5c7f 1491{
d0baac98 1492 register void *result = malloc (nmemb * size);
74ad5c7f
KH
1493
1494 if (result != NULL)
1495 (void) memset (result, 0, nmemb * size);
1496
1497 return result;
1498}
1499/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1500This file is part of the GNU C Library.
1501
1502The GNU C Library is free software; you can redistribute it and/or modify
1503it under the terms of the GNU General Public License as published by
1504the Free Software Foundation; either version 2, or (at your option)
1505any later version.
1506
1507The GNU C Library is distributed in the hope that it will be useful,
1508but WITHOUT ANY WARRANTY; without even the implied warranty of
1509MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1510GNU General Public License for more details.
1511
1512You should have received a copy of the GNU General Public License
fee0bd5f 1513along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
74ad5c7f 1514
65f451d0
DN
1515/* uClibc defines __GNU_LIBRARY__, but it is not completely
1516 compatible. */
5e617bc2 1517#if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
74ad5c7f 1518#define __sbrk sbrk
65f451d0 1519#else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1520/* It is best not to declare this and cast its result on foreign operating
1521 systems with potentially hostile include files. */
1522
d0baac98 1523extern void *__sbrk (ptrdiff_t increment);
65f451d0 1524#endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f 1525
74ad5c7f
KH
1526/* Allocate INCREMENT more bytes of data space,
1527 and return the start of data space, or NULL on errors.
1528 If INCREMENT is negative, shrink data space. */
d0baac98
PE
1529void *
1530__default_morecore (ptrdiff_t increment)
74ad5c7f 1531{
d0baac98 1532 void *result;
5e617bc2 1533#if defined (CYGWIN)
ef6d1039
SM
1534 if (!bss_sbrk_did_unexec)
1535 {
1536 return bss_sbrk (increment);
1537 }
1538#endif
d0baac98
PE
1539 result = (void *) __sbrk (increment);
1540 if (result == (void *) -1)
74ad5c7f
KH
1541 return NULL;
1542 return result;
1543}
1544/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1545
1546This library is free software; you can redistribute it and/or
423a1f3c 1547modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1548published by the Free Software Foundation; either version 2 of the
1549License, or (at your option) any later version.
1550
1551This library is distributed in the hope that it will be useful,
1552but WITHOUT ANY WARRANTY; without even the implied warranty of
1553MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1554General Public License for more details.
74ad5c7f 1555
423a1f3c 1556You should have received a copy of the GNU General Public
fee0bd5f 1557License along with this library. If not, see <http://www.gnu.org/licenses/>. */
74ad5c7f 1558
d0baac98 1559void *(*__memalign_hook) (size_t size, size_t alignment);
74ad5c7f 1560
d0baac98
PE
1561void *
1562memalign (size_t alignment, size_t size)
74ad5c7f 1563{
d0baac98
PE
1564 void *result;
1565 size_t adj, lastadj;
1566 void *(*hook) (size_t, size_t) = __memalign_hook;
74ad5c7f 1567
8d0d84d2
YM
1568 if (hook)
1569 return (*hook) (alignment, size);
74ad5c7f
KH
1570
1571 /* Allocate a block with enough extra space to pad the block with up to
1572 (ALIGNMENT - 1) bytes if necessary. */
1573 result = malloc (size + alignment - 1);
1574 if (result == NULL)
1575 return NULL;
1576
1577 /* Figure out how much we will need to pad this particular block
1578 to achieve the required alignment. */
d0baac98 1579 adj = (uintptr_t) result % alignment;
74ad5c7f
KH
1580
1581 do
1582 {
1583 /* Reallocate the block with only as much excess as it needs. */
1584 free (result);
1585 result = malloc (adj + size);
1586 if (result == NULL) /* Impossible unless interrupted. */
1587 return NULL;
1588
1589 lastadj = adj;
d0baac98 1590 adj = (uintptr_t) result % alignment;
74ad5c7f
KH
1591 /* It's conceivable we might have been so unlucky as to get a
1592 different block with weaker alignment. If so, this block is too
1593 short to contain SIZE after alignment correction. So we must
1594 try again and get another block, slightly larger. */
1595 } while (adj > lastadj);
1596
1597 if (adj != 0)
1598 {
1599 /* Record this block in the list of aligned blocks, so that `free'
1600 can identify the pointer it is passed, which will be in the middle
1601 of an allocated block. */
1602
1603 struct alignlist *l;
8d0d84d2 1604 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1605 for (l = _aligned_blocks; l != NULL; l = l->next)
1606 if (l->aligned == NULL)
1607 /* This slot is free. Use it. */
1608 break;
1609 if (l == NULL)
1610 {
38182d90 1611 l = malloc (sizeof *l);
8d0d84d2 1612 if (l != NULL)
74ad5c7f 1613 {
8d0d84d2
YM
1614 l->next = _aligned_blocks;
1615 _aligned_blocks = l;
74ad5c7f 1616 }
74ad5c7f 1617 }
8d0d84d2
YM
1618 if (l != NULL)
1619 {
1620 l->exact = result;
1621 result = l->aligned = (char *) result + alignment - adj;
1622 }
1623 UNLOCK_ALIGNED_BLOCKS ();
1624 if (l == NULL)
1625 {
1626 free (result);
1627 result = NULL;
1628 }
74ad5c7f
KH
1629 }
1630
1631 return result;
1632}
1633
72359c32 1634int
d0baac98 1635posix_memalign (void **memptr, size_t alignment, size_t size)
72359c32 1636{
d0baac98 1637 void *mem;
72359c32
YM
1638
1639 if (alignment == 0
d0baac98 1640 || alignment % sizeof (void *) != 0
72359c32
YM
1641 || (alignment & (alignment - 1)) != 0)
1642 return EINVAL;
1643
1644 mem = memalign (alignment, size);
1645 if (mem == NULL)
1646 return ENOMEM;
1647
1648 *memptr = mem;
1649
1650 return 0;
1651}
1652
74ad5c7f
KH
1653/* Allocate memory on a page boundary.
1654 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1655
1656This library is free software; you can redistribute it and/or
423a1f3c 1657modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1658published by the Free Software Foundation; either version 2 of the
1659License, or (at your option) any later version.
1660
1661This library is distributed in the hope that it will be useful,
1662but WITHOUT ANY WARRANTY; without even the implied warranty of
1663MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1664General Public License for more details.
74ad5c7f 1665
423a1f3c 1666You should have received a copy of the GNU General Public
fee0bd5f 1667License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
1668
1669 The author may be reached (Email) at the address mike@ai.mit.edu,
1670 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1671
d0baac98
PE
1672/* Allocate SIZE bytes on a page boundary. */
1673extern void *valloc (size_t);
74ad5c7f 1674
d0baac98
PE
1675#if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1676# include "getpagesize.h"
1677#elif !defined getpagesize
1678extern int getpagesize (void);
74ad5c7f
KH
1679#endif
1680
d0baac98 1681static size_t pagesize;
74ad5c7f 1682
d0baac98
PE
1683void *
1684valloc (size_t size)
74ad5c7f
KH
1685{
1686 if (pagesize == 0)
d0baac98 1687 pagesize = getpagesize ();
74ad5c7f
KH
1688
1689 return memalign (pagesize, size);
1690}
1691
a3ba27da
GM
1692#ifdef GC_MCHECK
1693
1694/* Standard debugging hooks for `malloc'.
1695 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1696 Written May 1989 by Mike Haertel.
1697
1698This library is free software; you can redistribute it and/or
423a1f3c 1699modify it under the terms of the GNU General Public License as
a3ba27da
GM
1700published by the Free Software Foundation; either version 2 of the
1701License, or (at your option) any later version.
1702
1703This library is distributed in the hope that it will be useful,
1704but WITHOUT ANY WARRANTY; without even the implied warranty of
1705MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1706General Public License for more details.
a3ba27da 1707
423a1f3c 1708You should have received a copy of the GNU General Public
fee0bd5f 1709License along with this library. If not, see <http://www.gnu.org/licenses/>.
a3ba27da
GM
1710
1711 The author may be reached (Email) at the address mike@ai.mit.edu,
1712 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1713
a3ba27da 1714#include <stdio.h>
a3ba27da
GM
1715
1716/* Old hook values. */
d0baac98
PE
1717static void (*old_free_hook) (void *ptr);
1718static void *(*old_malloc_hook) (size_t size);
1719static void *(*old_realloc_hook) (void *ptr, size_t size);
a3ba27da
GM
1720
1721/* Function to call when something awful happens. */
f57e2426 1722static void (*abortfunc) (enum mcheck_status);
a3ba27da
GM
1723
1724/* Arbitrary magical numbers. */
d0baac98
PE
1725#define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1726#define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
a3ba27da
GM
1727#define MAGICBYTE ((char) 0xd7)
1728#define MALLOCFLOOD ((char) 0x93)
1729#define FREEFLOOD ((char) 0x95)
1730
1731struct hdr
1732 {
d0baac98
PE
1733 size_t size; /* Exact size requested by user. */
1734 size_t magic; /* Magic number to check header integrity. */
a3ba27da
GM
1735 };
1736
a3ba27da 1737static enum mcheck_status
d0baac98 1738checkhdr (const struct hdr *hdr)
a3ba27da
GM
1739{
1740 enum mcheck_status status;
1741 switch (hdr->magic)
1742 {
1743 default:
1744 status = MCHECK_HEAD;
1745 break;
1746 case MAGICFREE:
1747 status = MCHECK_FREE;
1748 break;
1749 case MAGICWORD:
1750 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1751 status = MCHECK_TAIL;
1752 else
1753 status = MCHECK_OK;
1754 break;
1755 }
1756 if (status != MCHECK_OK)
1757 (*abortfunc) (status);
1758 return status;
1759}
1760
a3ba27da 1761static void
d0baac98 1762freehook (void *ptr)
a3ba27da
GM
1763{
1764 struct hdr *hdr;
177c0ea7 1765
a3ba27da
GM
1766 if (ptr)
1767 {
1768 hdr = ((struct hdr *) ptr) - 1;
1769 checkhdr (hdr);
1770 hdr->magic = MAGICFREE;
0e926e56 1771 memset (ptr, FREEFLOOD, hdr->size);
a3ba27da
GM
1772 }
1773 else
1774 hdr = NULL;
177c0ea7 1775
a3ba27da
GM
1776 __free_hook = old_free_hook;
1777 free (hdr);
1778 __free_hook = freehook;
1779}
1780
d0baac98
PE
1781static void *
1782mallochook (size_t size)
a3ba27da
GM
1783{
1784 struct hdr *hdr;
1785
1786 __malloc_hook = old_malloc_hook;
38182d90 1787 hdr = malloc (sizeof *hdr + size + 1);
a3ba27da
GM
1788 __malloc_hook = mallochook;
1789 if (hdr == NULL)
1790 return NULL;
1791
1792 hdr->size = size;
1793 hdr->magic = MAGICWORD;
1794 ((char *) &hdr[1])[size] = MAGICBYTE;
d0baac98
PE
1795 memset (hdr + 1, MALLOCFLOOD, size);
1796 return hdr + 1;
a3ba27da
GM
1797}
1798
d0baac98
PE
1799static void *
1800reallochook (void *ptr, size_t size)
a3ba27da
GM
1801{
1802 struct hdr *hdr = NULL;
d0baac98 1803 size_t osize = 0;
177c0ea7 1804
a3ba27da
GM
1805 if (ptr)
1806 {
1807 hdr = ((struct hdr *) ptr) - 1;
1808 osize = hdr->size;
1809
1810 checkhdr (hdr);
1811 if (size < osize)
0e926e56 1812 memset ((char *) ptr + size, FREEFLOOD, osize - size);
a3ba27da 1813 }
177c0ea7 1814
a3ba27da
GM
1815 __free_hook = old_free_hook;
1816 __malloc_hook = old_malloc_hook;
1817 __realloc_hook = old_realloc_hook;
38182d90 1818 hdr = realloc (hdr, sizeof *hdr + size + 1);
a3ba27da
GM
1819 __free_hook = freehook;
1820 __malloc_hook = mallochook;
1821 __realloc_hook = reallochook;
1822 if (hdr == NULL)
1823 return NULL;
1824
1825 hdr->size = size;
1826 hdr->magic = MAGICWORD;
1827 ((char *) &hdr[1])[size] = MAGICBYTE;
1828 if (size > osize)
0e926e56 1829 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
d0baac98 1830 return hdr + 1;
a3ba27da
GM
1831}
1832
1833static void
d0baac98 1834mabort (enum mcheck_status status)
a3ba27da
GM
1835{
1836 const char *msg;
1837 switch (status)
1838 {
1839 case MCHECK_OK:
1840 msg = "memory is consistent, library is buggy";
1841 break;
1842 case MCHECK_HEAD:
1843 msg = "memory clobbered before allocated block";
1844 break;
1845 case MCHECK_TAIL:
1846 msg = "memory clobbered past end of allocated block";
1847 break;
1848 case MCHECK_FREE:
1849 msg = "block freed twice";
1850 break;
1851 default:
1852 msg = "bogus mcheck_status, library is buggy";
1853 break;
1854 }
1855#ifdef __GNU_LIBRARY__
1856 __libc_fatal (msg);
1857#else
1858 fprintf (stderr, "mcheck: %s\n", msg);
1859 fflush (stderr);
1860 abort ();
1861#endif
1862}
1863
1864static int mcheck_used = 0;
1865
1866int
d0baac98 1867mcheck (void (*func) (enum mcheck_status))
a3ba27da
GM
1868{
1869 abortfunc = (func != NULL) ? func : &mabort;
1870
1871 /* These hooks may not be safely inserted if malloc is already in use. */
1872 if (!__malloc_initialized && !mcheck_used)
1873 {
1874 old_free_hook = __free_hook;
1875 __free_hook = freehook;
1876 old_malloc_hook = __malloc_hook;
1877 __malloc_hook = mallochook;
1878 old_realloc_hook = __realloc_hook;
1879 __realloc_hook = reallochook;
1880 mcheck_used = 1;
1881 }
1882
1883 return mcheck_used ? 0 : -1;
1884}
1885
1886enum mcheck_status
d0baac98 1887mprobe (void *ptr)
a3ba27da
GM
1888{
1889 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1890}
1891
1892#endif /* GC_MCHECK */