Merge from gnulib.
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f 1/* Declarations for `malloc' and friends.
ab422c4d
PE
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013 Free
3 Software Foundation, Inc.
74ad5c7f
KH
4 Written May 1989 by Mike Haertel.
5
6This library is free software; you can redistribute it and/or
423a1f3c 7modify it under the terms of the GNU General Public License as
74ad5c7f
KH
8published by the Free Software Foundation; either version 2 of the
9License, or (at your option) any later version.
10
11This library is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 14General Public License for more details.
74ad5c7f 15
423a1f3c 16You should have received a copy of the GNU General Public
fee0bd5f 17License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
18
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
21
74ad5c7f 22#include <config.h>
74ad5c7f 23
ae9e757a 24#ifdef HAVE_PTHREAD
8d0d84d2
YM
25#define USE_PTHREAD
26#endif
27
74ad5c7f 28#include <string.h>
74ad5c7f 29#include <limits.h>
d0baac98 30#include <stdint.h>
74ad5c7f 31#include <unistd.h>
74ad5c7f 32
2f213514
YM
33#ifdef USE_PTHREAD
34#include <pthread.h>
35#endif
36
62aba0d4
FP
37#ifdef WINDOWSNT
38#include <w32heap.h> /* for sbrk */
39#endif
40
74ad5c7f
KH
41#ifdef __cplusplus
42extern "C"
43{
44#endif
45
74ad5c7f 46#include <stddef.h>
74ad5c7f
KH
47
48
49/* Allocate SIZE bytes of memory. */
d0baac98 50extern void *malloc (size_t size);
74ad5c7f 51/* Re-allocate the previously allocated block
d0baac98
PE
52 in ptr, making the new block SIZE bytes long. */
53extern void *realloc (void *ptr, size_t size);
74ad5c7f 54/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
d0baac98 55extern void *calloc (size_t nmemb, size_t size);
74ad5c7f 56/* Free a block allocated by `malloc', `realloc' or `calloc'. */
d0baac98 57extern void free (void *ptr);
74ad5c7f
KH
58
59/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
d0baac98 60#ifdef MSDOS
aea07e2c 61extern void *aligned_alloc (size_t, size_t);
d0baac98
PE
62extern void *memalign (size_t, size_t);
63extern int posix_memalign (void **, size_t, size_t);
74ad5c7f
KH
64#endif
65
3ceeb306
YM
66#ifdef USE_PTHREAD
67/* Set up mutexes and make malloc etc. thread-safe. */
d0baac98 68extern void malloc_enable_thread (void);
3ceeb306 69#endif
74ad5c7f 70
74ad5c7f
KH
71/* The allocator divides the heap into blocks of fixed size; large
72 requests receive one or more whole blocks, and small requests
73 receive a fragment of a block. Fragment sizes are powers of two,
74 and all fragments of a block are the same size. When all the
75 fragments in a block have been freed, the block itself is freed. */
5e617bc2 76#define INT_BIT (CHAR_BIT * sizeof (int))
74ad5c7f
KH
77#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
78#define BLOCKSIZE (1 << BLOCKLOG)
79#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
80
81/* Determine the amount of memory spanned by the initial heap table
82 (not an absolute limit). */
83#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
84
85/* Number of contiguous free blocks allowed to build up at the end of
86 memory before they will be returned to the system. */
87#define FINAL_FREE_BLOCKS 8
88
89/* Data structure giving per-block information. */
90typedef union
91 {
92 /* Heap information for a busy block. */
93 struct
94 {
95 /* Zero for a large (multiblock) object, or positive giving the
96 logarithm to the base two of the fragment size. */
97 int type;
98 union
99 {
100 struct
101 {
d0baac98
PE
102 size_t nfree; /* Free frags in a fragmented block. */
103 size_t first; /* First free fragment of the block. */
74ad5c7f
KH
104 } frag;
105 /* For a large object, in its first block, this has the number
106 of blocks in the object. In the other blocks, this has a
107 negative number which says how far back the first block is. */
d0baac98 108 ptrdiff_t size;
74ad5c7f
KH
109 } info;
110 } busy;
111 /* Heap information for a free block
112 (that may be the first of a free cluster). */
113 struct
114 {
d0baac98
PE
115 size_t size; /* Size (in blocks) of a free cluster. */
116 size_t next; /* Index of next free cluster. */
117 size_t prev; /* Index of previous free cluster. */
74ad5c7f
KH
118 } free;
119 } malloc_info;
120
121/* Pointer to first block of the heap. */
122extern char *_heapbase;
123
124/* Table indexed by block number giving per-block information. */
125extern malloc_info *_heapinfo;
126
127/* Address to block number and vice versa. */
128#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
d0baac98 129#define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
74ad5c7f
KH
130
131/* Current search index for the heap table. */
d0baac98 132extern size_t _heapindex;
74ad5c7f
KH
133
134/* Limit of valid info table indices. */
d0baac98 135extern size_t _heaplimit;
74ad5c7f
KH
136
137/* Doubly linked lists of free fragments. */
138struct list
139 {
140 struct list *next;
141 struct list *prev;
142 };
143
144/* Free list headers for each fragment size. */
145extern struct list _fraghead[];
146
aea07e2c 147/* List of blocks allocated with aligned_alloc and friends. */
74ad5c7f
KH
148struct alignlist
149 {
150 struct alignlist *next;
aea07e2c 151 void *aligned; /* The address that aligned_alloc returned. */
d0baac98 152 void *exact; /* The address that malloc returned. */
74ad5c7f
KH
153 };
154extern struct alignlist *_aligned_blocks;
155
156/* Instrumentation. */
d0baac98
PE
157extern size_t _chunks_used;
158extern size_t _bytes_used;
159extern size_t _chunks_free;
160extern size_t _bytes_free;
74ad5c7f
KH
161
162/* Internal versions of `malloc', `realloc', and `free'
163 used when these functions need to call each other.
164 They are the same but don't call the hooks. */
d0baac98
PE
165extern void *_malloc_internal (size_t);
166extern void *_realloc_internal (void *, size_t);
167extern void _free_internal (void *);
168extern void *_malloc_internal_nolock (size_t);
169extern void *_realloc_internal_nolock (void *, size_t);
170extern void _free_internal_nolock (void *);
74ad5c7f 171
2f213514 172#ifdef USE_PTHREAD
8d0d84d2 173extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
3ceeb306
YM
174extern int _malloc_thread_enabled_p;
175#define LOCK() \
176 do { \
177 if (_malloc_thread_enabled_p) \
178 pthread_mutex_lock (&_malloc_mutex); \
179 } while (0)
180#define UNLOCK() \
181 do { \
182 if (_malloc_thread_enabled_p) \
183 pthread_mutex_unlock (&_malloc_mutex); \
184 } while (0)
185#define LOCK_ALIGNED_BLOCKS() \
186 do { \
187 if (_malloc_thread_enabled_p) \
188 pthread_mutex_lock (&_aligned_blocks_mutex); \
189 } while (0)
190#define UNLOCK_ALIGNED_BLOCKS() \
191 do { \
192 if (_malloc_thread_enabled_p) \
193 pthread_mutex_unlock (&_aligned_blocks_mutex); \
194 } while (0)
2f213514
YM
195#else
196#define LOCK()
197#define UNLOCK()
8d0d84d2
YM
198#define LOCK_ALIGNED_BLOCKS()
199#define UNLOCK_ALIGNED_BLOCKS()
2f213514
YM
200#endif
201
74ad5c7f
KH
202/* Given an address in the middle of a malloc'd object,
203 return the address of the beginning of the object. */
d0baac98 204extern void *malloc_find_object_address (void *ptr);
74ad5c7f
KH
205
206/* Underlying allocation function; successive calls should
207 return contiguous pieces of memory. */
d0baac98 208extern void *(*__morecore) (ptrdiff_t size);
74ad5c7f
KH
209
210/* Default value of `__morecore'. */
d0baac98 211extern void *__default_morecore (ptrdiff_t size);
74ad5c7f
KH
212
213/* If not NULL, this function is called after each time
214 `__morecore' is called to increase the data size. */
d0baac98 215extern void (*__after_morecore_hook) (void);
74ad5c7f
KH
216
217/* Number of extra blocks to get each time we ask for more core.
218 This reduces the frequency of calling `(*__morecore)'. */
d0baac98 219extern size_t __malloc_extra_blocks;
74ad5c7f
KH
220
221/* Nonzero if `malloc' has been called and done its initialization. */
222extern int __malloc_initialized;
223/* Function called to initialize malloc data structures. */
d0baac98 224extern int __malloc_initialize (void);
74ad5c7f
KH
225
226/* Hooks for debugging versions. */
d0baac98
PE
227extern void (*__malloc_initialize_hook) (void);
228extern void (*__free_hook) (void *ptr);
229extern void *(*__malloc_hook) (size_t size);
230extern void *(*__realloc_hook) (void *ptr, size_t size);
231extern void *(*__memalign_hook) (size_t size, size_t alignment);
74ad5c7f
KH
232
233/* Return values for `mprobe': these are the kinds of inconsistencies that
234 `mcheck' enables detection of. */
235enum mcheck_status
236 {
237 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
238 MCHECK_OK, /* Block is fine. */
239 MCHECK_FREE, /* Block freed twice. */
240 MCHECK_HEAD, /* Memory before the block was clobbered. */
241 MCHECK_TAIL /* Memory after the block was clobbered. */
242 };
243
244/* Activate a standard collection of debugging hooks. This must be called
245 before `malloc' is ever called. ABORTFUNC is called with an error code
246 (see enum above) when an inconsistency is detected. If ABORTFUNC is
247 null, the standard function prints on stderr and then calls `abort'. */
d0baac98 248extern int mcheck (void (*abortfunc) (enum mcheck_status));
74ad5c7f
KH
249
250/* Check for aberrations in a particular malloc'd block. You must have
251 called `mcheck' already. These are the same checks that `mcheck' does
252 when you free or reallocate a block. */
d0baac98 253extern enum mcheck_status mprobe (void *ptr);
74ad5c7f
KH
254
255/* Activate a standard collection of tracing hooks. */
d0baac98
PE
256extern void mtrace (void);
257extern void muntrace (void);
74ad5c7f
KH
258
259/* Statistics available to the user. */
260struct mstats
261 {
d0baac98
PE
262 size_t bytes_total; /* Total size of the heap. */
263 size_t chunks_used; /* Chunks allocated by the user. */
264 size_t bytes_used; /* Byte total of user-allocated chunks. */
265 size_t chunks_free; /* Chunks in the free list. */
266 size_t bytes_free; /* Byte total of chunks in the free list. */
74ad5c7f
KH
267 };
268
269/* Pick up the current statistics. */
d0baac98 270extern struct mstats mstats (void);
74ad5c7f
KH
271
272/* Call WARNFUN with a warning message when memory usage is high. */
d0baac98 273extern void memory_warnings (void *start, void (*warnfun) (const char *));
74ad5c7f
KH
274
275#ifdef __cplusplus
276}
277#endif
278
74ad5c7f
KH
279/* Memory allocator `malloc'.
280 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
281 Written May 1989 by Mike Haertel.
282
283This library is free software; you can redistribute it and/or
423a1f3c 284modify it under the terms of the GNU General Public License as
74ad5c7f
KH
285published by the Free Software Foundation; either version 2 of the
286License, or (at your option) any later version.
287
288This library is distributed in the hope that it will be useful,
289but WITHOUT ANY WARRANTY; without even the implied warranty of
290MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 291General Public License for more details.
74ad5c7f 292
423a1f3c 293You should have received a copy of the GNU General Public
fee0bd5f 294License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
295
296 The author may be reached (Email) at the address mike@ai.mit.edu,
297 or (US mail) as Mike Haertel c/o Free Software Foundation. */
298
74ad5c7f
KH
299#include <errno.h>
300
a4579d33
KB
301/* On Cygwin there are two heaps. temacs uses the static heap
302 (defined in sheap.c and managed with bss_sbrk), and the dumped
303 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
304 on Cygwin, it reinitializes malloc, and we save the old info for
305 use by free and realloc if they're called with a pointer into the
db76dd85
KB
306 static heap.
307
308 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
309 this is changed in the future, we'll have to similarly deal with
310 reinitializing ralloc. */
a4579d33 311#ifdef CYGWIN
d0baac98 312extern void *bss_sbrk (ptrdiff_t size);
ef6d1039 313extern int bss_sbrk_did_unexec;
a4579d33
KB
314char *bss_sbrk_heapbase; /* _heapbase for static heap */
315malloc_info *bss_sbrk_heapinfo; /* _heapinfo for static heap */
ef6d1039 316#endif
d0baac98 317void *(*__morecore) (ptrdiff_t size) = __default_morecore;
74ad5c7f
KH
318
319/* Debugging hook for `malloc'. */
d0baac98 320void *(*__malloc_hook) (size_t size);
74ad5c7f
KH
321
322/* Pointer to the base of the first block. */
323char *_heapbase;
324
325/* Block information table. Allocated with align/__free (not malloc/free). */
326malloc_info *_heapinfo;
327
328/* Number of info entries. */
d0baac98 329static size_t heapsize;
74ad5c7f
KH
330
331/* Search index in the info table. */
d0baac98 332size_t _heapindex;
74ad5c7f
KH
333
334/* Limit of valid info table indices. */
d0baac98 335size_t _heaplimit;
74ad5c7f
KH
336
337/* Free lists for each fragment size. */
338struct list _fraghead[BLOCKLOG];
339
340/* Instrumentation. */
d0baac98
PE
341size_t _chunks_used;
342size_t _bytes_used;
343size_t _chunks_free;
344size_t _bytes_free;
74ad5c7f
KH
345
346/* Are you experienced? */
347int __malloc_initialized;
348
d0baac98 349size_t __malloc_extra_blocks;
74ad5c7f 350
d0baac98
PE
351void (*__malloc_initialize_hook) (void);
352void (*__after_morecore_hook) (void);
74ad5c7f 353
5dcab13e
GM
354#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
355
356/* Some code for hunting a bug writing into _heapinfo.
357
358 Call this macro with argument PROT non-zero to protect internal
359 malloc state against writing to it, call it with a zero argument to
360 make it readable and writable.
361
362 Note that this only works if BLOCKSIZE == page size, which is
363 the case on the i386. */
364
365#include <sys/types.h>
366#include <sys/mman.h>
367
368static int state_protected_p;
d0baac98 369static size_t last_state_size;
5dcab13e
GM
370static malloc_info *last_heapinfo;
371
372void
d0baac98 373protect_malloc_state (int protect_p)
5dcab13e
GM
374{
375 /* If _heapinfo has been relocated, make sure its old location
376 isn't left read-only; it will be reused by malloc. */
377 if (_heapinfo != last_heapinfo
378 && last_heapinfo
379 && state_protected_p)
380 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
381
382 last_state_size = _heaplimit * sizeof *_heapinfo;
383 last_heapinfo = _heapinfo;
177c0ea7 384
5dcab13e
GM
385 if (protect_p != state_protected_p)
386 {
387 state_protected_p = protect_p;
388 if (mprotect (_heapinfo, last_state_size,
389 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
390 abort ();
391 }
392}
393
5e617bc2 394#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
5dcab13e
GM
395
396#else
397#define PROTECT_MALLOC_STATE(PROT) /* empty */
398#endif
399
74ad5c7f
KH
400
401/* Aligned allocation. */
d0baac98
PE
402static void *
403align (size_t size)
74ad5c7f 404{
d0baac98
PE
405 void *result;
406 ptrdiff_t adj;
74ad5c7f 407
ceeb3d7d 408 /* align accepts an unsigned argument, but __morecore accepts a
d0baac98
PE
409 signed one. This could lead to trouble if SIZE overflows the
410 ptrdiff_t type accepted by __morecore. We just punt in that
ceeb3d7d 411 case, since they are requesting a ludicrous amount anyway. */
d0baac98 412 if (PTRDIFF_MAX < size)
ceeb3d7d
EZ
413 result = 0;
414 else
415 result = (*__morecore) (size);
d0baac98 416 adj = (uintptr_t) result % BLOCKSIZE;
74ad5c7f
KH
417 if (adj != 0)
418 {
74ad5c7f 419 adj = BLOCKSIZE - adj;
d0baac98 420 (*__morecore) (adj);
74ad5c7f
KH
421 result = (char *) result + adj;
422 }
423
424 if (__after_morecore_hook)
425 (*__after_morecore_hook) ();
426
427 return result;
428}
429
430/* Get SIZE bytes, if we can get them starting at END.
431 Return the address of the space we got.
432 If we cannot get space at END, fail and return 0. */
d0baac98
PE
433static void *
434get_contiguous_space (ptrdiff_t size, void *position)
74ad5c7f 435{
d0baac98
PE
436 void *before;
437 void *after;
74ad5c7f
KH
438
439 before = (*__morecore) (0);
440 /* If we can tell in advance that the break is at the wrong place,
441 fail now. */
442 if (before != position)
443 return 0;
444
445 /* Allocate SIZE bytes and get the address of them. */
446 after = (*__morecore) (size);
447 if (!after)
448 return 0;
449
450 /* It was not contiguous--reject it. */
451 if (after != position)
452 {
453 (*__morecore) (- size);
454 return 0;
455 }
456
457 return after;
458}
459
460
461/* This is called when `_heapinfo' and `heapsize' have just
462 been set to describe a new info table. Set up the table
463 to describe itself and account for it in the statistics. */
b0ab8123 464static void
55d4c1b2 465register_heapinfo (void)
74ad5c7f 466{
d0baac98 467 size_t block, blocks;
74ad5c7f
KH
468
469 block = BLOCK (_heapinfo);
470 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
471
472 /* Account for the _heapinfo block itself in the statistics. */
473 _bytes_used += blocks * BLOCKSIZE;
474 ++_chunks_used;
475
476 /* Describe the heapinfo block itself in the heapinfo. */
477 _heapinfo[block].busy.type = 0;
478 _heapinfo[block].busy.info.size = blocks;
479 /* Leave back-pointers for malloc_find_address. */
480 while (--blocks > 0)
481 _heapinfo[block + blocks].busy.info.size = -blocks;
482}
483
2f213514 484#ifdef USE_PTHREAD
8d0d84d2
YM
485pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
486pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
3ceeb306
YM
487int _malloc_thread_enabled_p;
488
489static void
d0baac98 490malloc_atfork_handler_prepare (void)
3ceeb306
YM
491{
492 LOCK ();
493 LOCK_ALIGNED_BLOCKS ();
494}
495
496static void
d0baac98 497malloc_atfork_handler_parent (void)
3ceeb306
YM
498{
499 UNLOCK_ALIGNED_BLOCKS ();
500 UNLOCK ();
501}
502
503static void
d0baac98 504malloc_atfork_handler_child (void)
3ceeb306
YM
505{
506 UNLOCK_ALIGNED_BLOCKS ();
507 UNLOCK ();
508}
509
510/* Set up mutexes and make malloc etc. thread-safe. */
511void
d0baac98 512malloc_enable_thread (void)
3ceeb306
YM
513{
514 if (_malloc_thread_enabled_p)
515 return;
516
517 /* Some pthread implementations call malloc for statically
518 initialized mutexes when they are used first. To avoid such a
519 situation, we initialize mutexes here while their use is
520 disabled in malloc etc. */
521 pthread_mutex_init (&_malloc_mutex, NULL);
522 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
523 pthread_atfork (malloc_atfork_handler_prepare,
524 malloc_atfork_handler_parent,
525 malloc_atfork_handler_child);
526 _malloc_thread_enabled_p = 1;
527}
2f213514 528#endif
74ad5c7f 529
2f213514 530static void
d0baac98 531malloc_initialize_1 (void)
2f213514 532{
a3ba27da
GM
533#ifdef GC_MCHECK
534 mcheck (NULL);
535#endif
536
a4579d33
KB
537#ifdef CYGWIN
538 if (bss_sbrk_did_unexec)
539 /* we're reinitializing the dumped emacs */
540 {
541 bss_sbrk_heapbase = _heapbase;
542 bss_sbrk_heapinfo = _heapinfo;
543 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
544 }
545#endif
546
74ad5c7f
KH
547 if (__malloc_initialize_hook)
548 (*__malloc_initialize_hook) ();
549
550 heapsize = HEAP / BLOCKSIZE;
d0baac98 551 _heapinfo = align (heapsize * sizeof (malloc_info));
74ad5c7f 552 if (_heapinfo == NULL)
2f213514 553 return;
74ad5c7f
KH
554 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
555 _heapinfo[0].free.size = 0;
556 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
557 _heapindex = 0;
558 _heapbase = (char *) _heapinfo;
559 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
560
561 register_heapinfo ();
562
563 __malloc_initialized = 1;
5dcab13e 564 PROTECT_MALLOC_STATE (1);
2f213514
YM
565 return;
566}
567
784c1472
JD
568/* Set everything up and remember that we have.
569 main will call malloc which calls this function. That is before any threads
570 or signal handlers has been set up, so we don't need thread protection. */
2f213514 571int
d0baac98 572__malloc_initialize (void)
2f213514 573{
2f213514
YM
574 if (__malloc_initialized)
575 return 0;
576
577 malloc_initialize_1 ();
2f213514
YM
578
579 return __malloc_initialized;
74ad5c7f
KH
580}
581
582static int morecore_recursing;
583
584/* Get neatly aligned memory, initializing or
585 growing the heap info table as necessary. */
d0baac98
PE
586static void *
587morecore_nolock (size_t size)
74ad5c7f 588{
d0baac98 589 void *result;
74ad5c7f 590 malloc_info *newinfo, *oldinfo;
d0baac98 591 size_t newsize;
74ad5c7f
KH
592
593 if (morecore_recursing)
594 /* Avoid recursion. The caller will know how to handle a null return. */
595 return NULL;
596
597 result = align (size);
598 if (result == NULL)
599 return NULL;
600
5dcab13e
GM
601 PROTECT_MALLOC_STATE (0);
602
74ad5c7f 603 /* Check if we need to grow the info table. */
d0baac98 604 if ((size_t) BLOCK ((char *) result + size) > heapsize)
74ad5c7f
KH
605 {
606 /* Calculate the new _heapinfo table size. We do not account for the
607 added blocks in the table itself, as we hope to place them in
608 existing free space, which is already covered by part of the
609 existing table. */
610 newsize = heapsize;
611 do
612 newsize *= 2;
d0baac98 613 while ((size_t) BLOCK ((char *) result + size) > newsize);
74ad5c7f
KH
614
615 /* We must not reuse existing core for the new info table when called
616 from realloc in the case of growing a large block, because the
617 block being grown is momentarily marked as free. In this case
618 _heaplimit is zero so we know not to reuse space for internal
619 allocation. */
620 if (_heaplimit != 0)
621 {
622 /* First try to allocate the new info table in core we already
623 have, in the usual way using realloc. If realloc cannot
624 extend it in place or relocate it to existing sufficient core,
625 we will get called again, and the code above will notice the
626 `morecore_recursing' flag and return null. */
627 int save = errno; /* Don't want to clobber errno with ENOMEM. */
628 morecore_recursing = 1;
d0baac98
PE
629 newinfo = _realloc_internal_nolock (_heapinfo,
630 newsize * sizeof (malloc_info));
74ad5c7f
KH
631 morecore_recursing = 0;
632 if (newinfo == NULL)
633 errno = save;
634 else
635 {
636 /* We found some space in core, and realloc has put the old
637 table's blocks on the free list. Now zero the new part
638 of the table and install the new table location. */
639 memset (&newinfo[heapsize], 0,
640 (newsize - heapsize) * sizeof (malloc_info));
641 _heapinfo = newinfo;
642 heapsize = newsize;
643 goto got_heap;
644 }
645 }
646
647 /* Allocate new space for the malloc info table. */
648 while (1)
649 {
d0baac98 650 newinfo = align (newsize * sizeof (malloc_info));
74ad5c7f
KH
651
652 /* Did it fail? */
653 if (newinfo == NULL)
654 {
655 (*__morecore) (-size);
656 return NULL;
657 }
658
659 /* Is it big enough to record status for its own space?
660 If so, we win. */
d0baac98
PE
661 if ((size_t) BLOCK ((char *) newinfo
662 + newsize * sizeof (malloc_info))
74ad5c7f
KH
663 < newsize)
664 break;
665
666 /* Must try again. First give back most of what we just got. */
667 (*__morecore) (- newsize * sizeof (malloc_info));
668 newsize *= 2;
669 }
670
671 /* Copy the old table to the beginning of the new,
672 and zero the rest of the new table. */
673 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
674 memset (&newinfo[heapsize], 0,
675 (newsize - heapsize) * sizeof (malloc_info));
676 oldinfo = _heapinfo;
677 _heapinfo = newinfo;
678 heapsize = newsize;
679
680 register_heapinfo ();
681
682 /* Reset _heaplimit so _free_internal never decides
683 it can relocate or resize the info table. */
684 _heaplimit = 0;
8d0d84d2 685 _free_internal_nolock (oldinfo);
5dcab13e 686 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
687
688 /* The new heap limit includes the new table just allocated. */
689 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
690 return result;
691 }
692
693 got_heap:
694 _heaplimit = BLOCK ((char *) result + size);
695 return result;
696}
697
698/* Allocate memory from the heap. */
d0baac98
PE
699void *
700_malloc_internal_nolock (size_t size)
74ad5c7f 701{
d0baac98
PE
702 void *result;
703 size_t block, blocks, lastblocks, start;
704 register size_t i;
74ad5c7f
KH
705 struct list *next;
706
707 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
708 valid address you can realloc and free (though not dereference).
709
710 It turns out that some extant code (sunrpc, at least Ultrix's version)
711 expects `malloc (0)' to return non-NULL and breaks otherwise.
712 Be compatible. */
713
714#if 0
715 if (size == 0)
716 return NULL;
717#endif
718
5dcab13e
GM
719 PROTECT_MALLOC_STATE (0);
720
74ad5c7f
KH
721 if (size < sizeof (struct list))
722 size = sizeof (struct list);
723
74ad5c7f
KH
724 /* Determine the allocation policy based on the request size. */
725 if (size <= BLOCKSIZE / 2)
726 {
727 /* Small allocation to receive a fragment of a block.
728 Determine the logarithm to base two of the fragment size. */
d0baac98 729 register size_t log = 1;
74ad5c7f
KH
730 --size;
731 while ((size /= 2) != 0)
732 ++log;
733
734 /* Look in the fragment lists for a
735 free fragment of the desired size. */
736 next = _fraghead[log].next;
737 if (next != NULL)
738 {
739 /* There are free fragments of this size.
740 Pop a fragment out of the fragment list and return it.
741 Update the block's nfree and first counters. */
d0baac98 742 result = next;
74ad5c7f
KH
743 next->prev->next = next->next;
744 if (next->next != NULL)
745 next->next->prev = next->prev;
746 block = BLOCK (result);
747 if (--_heapinfo[block].busy.info.frag.nfree != 0)
d0baac98
PE
748 _heapinfo[block].busy.info.frag.first =
749 (uintptr_t) next->next % BLOCKSIZE >> log;
74ad5c7f
KH
750
751 /* Update the statistics. */
752 ++_chunks_used;
753 _bytes_used += 1 << log;
754 --_chunks_free;
755 _bytes_free -= 1 << log;
756 }
757 else
758 {
759 /* No free fragments of the desired size, so get a new block
760 and break it into fragments, returning the first. */
8094989b 761#ifdef GC_MALLOC_CHECK
8d0d84d2 762 result = _malloc_internal_nolock (BLOCKSIZE);
5dcab13e 763 PROTECT_MALLOC_STATE (0);
8d0d84d2
YM
764#elif defined (USE_PTHREAD)
765 result = _malloc_internal_nolock (BLOCKSIZE);
8094989b 766#else
74ad5c7f 767 result = malloc (BLOCKSIZE);
8094989b 768#endif
74ad5c7f 769 if (result == NULL)
5dcab13e
GM
770 {
771 PROTECT_MALLOC_STATE (1);
2f213514 772 goto out;
5dcab13e 773 }
74ad5c7f
KH
774
775 /* Link all fragments but the first into the free list. */
776 next = (struct list *) ((char *) result + (1 << log));
777 next->next = NULL;
778 next->prev = &_fraghead[log];
779 _fraghead[log].next = next;
780
d0baac98 781 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
74ad5c7f
KH
782 {
783 next = (struct list *) ((char *) result + (i << log));
784 next->next = _fraghead[log].next;
785 next->prev = &_fraghead[log];
786 next->prev->next = next;
787 next->next->prev = next;
788 }
789
790 /* Initialize the nfree and first counters for this block. */
791 block = BLOCK (result);
792 _heapinfo[block].busy.type = log;
793 _heapinfo[block].busy.info.frag.nfree = i - 1;
794 _heapinfo[block].busy.info.frag.first = i - 1;
795
796 _chunks_free += (BLOCKSIZE >> log) - 1;
797 _bytes_free += BLOCKSIZE - (1 << log);
798 _bytes_used -= BLOCKSIZE - (1 << log);
799 }
800 }
801 else
802 {
803 /* Large allocation to receive one or more blocks.
804 Search the free list in a circle starting at the last place visited.
805 If we loop completely around without finding a large enough
806 space we will have to get more memory from the system. */
807 blocks = BLOCKIFY (size);
808 start = block = _heapindex;
809 while (_heapinfo[block].free.size < blocks)
810 {
811 block = _heapinfo[block].free.next;
812 if (block == start)
813 {
814 /* Need to get more from the system. Get a little extra. */
d0baac98 815 size_t wantblocks = blocks + __malloc_extra_blocks;
74ad5c7f
KH
816 block = _heapinfo[0].free.prev;
817 lastblocks = _heapinfo[block].free.size;
818 /* Check to see if the new core will be contiguous with the
819 final free block; if so we don't need to get as much. */
820 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
821 /* We can't do this if we will have to make the heap info
cc4a96c6 822 table bigger to accommodate the new space. */
74ad5c7f
KH
823 block + wantblocks <= heapsize &&
824 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
825 ADDRESS (block + lastblocks)))
826 {
827 /* We got it contiguously. Which block we are extending
828 (the `final free block' referred to above) might have
829 changed, if it got combined with a freed info table. */
830 block = _heapinfo[0].free.prev;
831 _heapinfo[block].free.size += (wantblocks - lastblocks);
832 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
833 _heaplimit += wantblocks - lastblocks;
834 continue;
835 }
8d0d84d2 836 result = morecore_nolock (wantblocks * BLOCKSIZE);
74ad5c7f 837 if (result == NULL)
2f213514 838 goto out;
74ad5c7f
KH
839 block = BLOCK (result);
840 /* Put the new block at the end of the free list. */
841 _heapinfo[block].free.size = wantblocks;
842 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
843 _heapinfo[block].free.next = 0;
844 _heapinfo[0].free.prev = block;
845 _heapinfo[_heapinfo[block].free.prev].free.next = block;
846 ++_chunks_free;
847 /* Now loop to use some of that block for this allocation. */
848 }
849 }
850
851 /* At this point we have found a suitable free list entry.
852 Figure out how to remove what we need from the list. */
853 result = ADDRESS (block);
854 if (_heapinfo[block].free.size > blocks)
855 {
856 /* The block we found has a bit left over,
857 so relink the tail end back into the free list. */
858 _heapinfo[block + blocks].free.size
859 = _heapinfo[block].free.size - blocks;
860 _heapinfo[block + blocks].free.next
861 = _heapinfo[block].free.next;
862 _heapinfo[block + blocks].free.prev
863 = _heapinfo[block].free.prev;
864 _heapinfo[_heapinfo[block].free.prev].free.next
865 = _heapinfo[_heapinfo[block].free.next].free.prev
866 = _heapindex = block + blocks;
867 }
868 else
869 {
870 /* The block exactly matches our requirements,
871 so just remove it from the list. */
872 _heapinfo[_heapinfo[block].free.next].free.prev
873 = _heapinfo[block].free.prev;
874 _heapinfo[_heapinfo[block].free.prev].free.next
875 = _heapindex = _heapinfo[block].free.next;
876 --_chunks_free;
877 }
878
879 _heapinfo[block].busy.type = 0;
880 _heapinfo[block].busy.info.size = blocks;
881 ++_chunks_used;
882 _bytes_used += blocks * BLOCKSIZE;
883 _bytes_free -= blocks * BLOCKSIZE;
884
885 /* Mark all the blocks of the object just allocated except for the
886 first with a negative number so you can find the first block by
887 adding that adjustment. */
888 while (--blocks > 0)
889 _heapinfo[block + blocks].busy.info.size = -blocks;
890 }
891
5dcab13e 892 PROTECT_MALLOC_STATE (1);
2f213514 893 out:
8d0d84d2
YM
894 return result;
895}
896
d0baac98
PE
897void *
898_malloc_internal (size_t size)
8d0d84d2 899{
d0baac98 900 void *result;
8d0d84d2
YM
901
902 LOCK ();
903 result = _malloc_internal_nolock (size);
2f213514 904 UNLOCK ();
8d0d84d2 905
74ad5c7f
KH
906 return result;
907}
908
d0baac98
PE
909void *
910malloc (size_t size)
74ad5c7f 911{
d0baac98 912 void *(*hook) (size_t);
8d0d84d2 913
74ad5c7f
KH
914 if (!__malloc_initialized && !__malloc_initialize ())
915 return NULL;
916
8d0d84d2
YM
917 /* Copy the value of __malloc_hook to an automatic variable in case
918 __malloc_hook is modified in another thread between its
919 NULL-check and the use.
920
921 Note: Strictly speaking, this is not a right solution. We should
922 use mutexes to access non-read-only variables that are shared
923 among multiple threads. We just leave it for compatibility with
924 glibc malloc (i.e., assignments to __malloc_hook) for now. */
925 hook = __malloc_hook;
926 return (hook != NULL ? *hook : _malloc_internal) (size);
74ad5c7f
KH
927}
928\f
929#ifndef _LIBC
930
931/* On some ANSI C systems, some libc functions call _malloc, _free
932 and _realloc. Make them use the GNU functions. */
933
d0baac98
PE
934extern void *_malloc (size_t);
935extern void _free (void *);
936extern void *_realloc (void *, size_t);
937
938void *
939_malloc (size_t size)
74ad5c7f
KH
940{
941 return malloc (size);
942}
943
944void
d0baac98 945_free (void *ptr)
74ad5c7f
KH
946{
947 free (ptr);
948}
949
d0baac98
PE
950void *
951_realloc (void *ptr, size_t size)
74ad5c7f
KH
952{
953 return realloc (ptr, size);
954}
955
956#endif
957/* Free a block of memory allocated by `malloc'.
958 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
959 Written May 1989 by Mike Haertel.
960
961This library is free software; you can redistribute it and/or
423a1f3c 962modify it under the terms of the GNU General Public License as
74ad5c7f
KH
963published by the Free Software Foundation; either version 2 of the
964License, or (at your option) any later version.
965
966This library is distributed in the hope that it will be useful,
967but WITHOUT ANY WARRANTY; without even the implied warranty of
968MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 969General Public License for more details.
74ad5c7f 970
423a1f3c 971You should have received a copy of the GNU General Public
fee0bd5f 972License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
973
974 The author may be reached (Email) at the address mike@ai.mit.edu,
975 or (US mail) as Mike Haertel c/o Free Software Foundation. */
976
74ad5c7f 977
74ad5c7f 978/* Debugging hook for free. */
d0baac98 979void (*__free_hook) (void *__ptr);
74ad5c7f 980
aea07e2c 981/* List of blocks allocated by aligned_alloc. */
74ad5c7f
KH
982struct alignlist *_aligned_blocks = NULL;
983
984/* Return memory to the heap.
8d0d84d2 985 Like `_free_internal' but don't lock mutex. */
74ad5c7f 986void
d0baac98 987_free_internal_nolock (void *ptr)
74ad5c7f
KH
988{
989 int type;
d0baac98
PE
990 size_t block, blocks;
991 register size_t i;
74ad5c7f 992 struct list *prev, *next;
d0baac98
PE
993 void *curbrk;
994 const size_t lesscore_threshold
74ad5c7f
KH
995 /* Threshold of free space at which we will return some to the system. */
996 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
997
998 register struct alignlist *l;
999
1000 if (ptr == NULL)
1001 return;
1002
a4579d33 1003#ifdef CYGWIN
1b170bc6 1004 if ((char *) ptr < _heapbase)
a4579d33
KB
1005 /* We're being asked to free something in the static heap. */
1006 return;
1007#endif
1008
5dcab13e 1009 PROTECT_MALLOC_STATE (0);
177c0ea7 1010
8d0d84d2 1011 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1012 for (l = _aligned_blocks; l != NULL; l = l->next)
1013 if (l->aligned == ptr)
1014 {
1015 l->aligned = NULL; /* Mark the slot in the list as free. */
1016 ptr = l->exact;
1017 break;
1018 }
8d0d84d2 1019 UNLOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1020
1021 block = BLOCK (ptr);
1022
1023 type = _heapinfo[block].busy.type;
1024 switch (type)
1025 {
1026 case 0:
1027 /* Get as many statistics as early as we can. */
1028 --_chunks_used;
1029 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1030 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1031
1032 /* Find the free cluster previous to this one in the free list.
1033 Start searching at the last block referenced; this may benefit
1034 programs with locality of allocation. */
1035 i = _heapindex;
1036 if (i > block)
1037 while (i > block)
1038 i = _heapinfo[i].free.prev;
1039 else
1040 {
1041 do
1042 i = _heapinfo[i].free.next;
1043 while (i > 0 && i < block);
1044 i = _heapinfo[i].free.prev;
1045 }
1046
1047 /* Determine how to link this block into the free list. */
1048 if (block == i + _heapinfo[i].free.size)
1049 {
1050 /* Coalesce this block with its predecessor. */
1051 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1052 block = i;
1053 }
1054 else
1055 {
1056 /* Really link this block back into the free list. */
1057 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1058 _heapinfo[block].free.next = _heapinfo[i].free.next;
1059 _heapinfo[block].free.prev = i;
1060 _heapinfo[i].free.next = block;
1061 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1062 ++_chunks_free;
1063 }
1064
1065 /* Now that the block is linked in, see if we can coalesce it
1066 with its successor (by deleting its successor from the list
1067 and adding in its size). */
1068 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1069 {
1070 _heapinfo[block].free.size
1071 += _heapinfo[_heapinfo[block].free.next].free.size;
1072 _heapinfo[block].free.next
1073 = _heapinfo[_heapinfo[block].free.next].free.next;
1074 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1075 --_chunks_free;
1076 }
1077
1078 /* How many trailing free blocks are there now? */
1079 blocks = _heapinfo[block].free.size;
1080
1081 /* Where is the current end of accessible core? */
1082 curbrk = (*__morecore) (0);
1083
1084 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1085 {
1086 /* The end of the malloc heap is at the end of accessible core.
1087 It's possible that moving _heapinfo will allow us to
1088 return some space to the system. */
1089
d0baac98
PE
1090 size_t info_block = BLOCK (_heapinfo);
1091 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1092 size_t prev_block = _heapinfo[block].free.prev;
1093 size_t prev_blocks = _heapinfo[prev_block].free.size;
1094 size_t next_block = _heapinfo[block].free.next;
1095 size_t next_blocks = _heapinfo[next_block].free.size;
74ad5c7f
KH
1096
1097 if (/* Win if this block being freed is last in core, the info table
1098 is just before it, the previous free block is just before the
1099 info table, and the two free blocks together form a useful
1100 amount to return to the system. */
1101 (block + blocks == _heaplimit &&
1102 info_block + info_blocks == block &&
1103 prev_block != 0 && prev_block + prev_blocks == info_block &&
1104 blocks + prev_blocks >= lesscore_threshold) ||
1105 /* Nope, not the case. We can also win if this block being
1106 freed is just before the info table, and the table extends
1107 to the end of core or is followed only by a free block,
1108 and the total free space is worth returning to the system. */
1109 (block + blocks == info_block &&
1110 ((info_block + info_blocks == _heaplimit &&
1111 blocks >= lesscore_threshold) ||
1112 (info_block + info_blocks == next_block &&
1113 next_block + next_blocks == _heaplimit &&
1114 blocks + next_blocks >= lesscore_threshold)))
1115 )
1116 {
1117 malloc_info *newinfo;
d0baac98 1118 size_t oldlimit = _heaplimit;
74ad5c7f
KH
1119
1120 /* Free the old info table, clearing _heaplimit to avoid
1121 recursion into this code. We don't want to return the
1122 table's blocks to the system before we have copied them to
1123 the new location. */
1124 _heaplimit = 0;
8d0d84d2 1125 _free_internal_nolock (_heapinfo);
74ad5c7f
KH
1126 _heaplimit = oldlimit;
1127
1128 /* Tell malloc to search from the beginning of the heap for
1129 free blocks, so it doesn't reuse the ones just freed. */
1130 _heapindex = 0;
1131
1132 /* Allocate new space for the info table and move its data. */
d0baac98 1133 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
5dcab13e 1134 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1135 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1136 _heapinfo = newinfo;
1137
1138 /* We should now have coalesced the free block with the
1139 blocks freed from the old info table. Examine the entire
1140 trailing free block to decide below whether to return some
1141 to the system. */
1142 block = _heapinfo[0].free.prev;
1143 blocks = _heapinfo[block].free.size;
1144 }
1145
1146 /* Now see if we can return stuff to the system. */
1147 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1148 {
d0baac98 1149 register size_t bytes = blocks * BLOCKSIZE;
74ad5c7f
KH
1150 _heaplimit -= blocks;
1151 (*__morecore) (-bytes);
1152 _heapinfo[_heapinfo[block].free.prev].free.next
1153 = _heapinfo[block].free.next;
1154 _heapinfo[_heapinfo[block].free.next].free.prev
1155 = _heapinfo[block].free.prev;
1156 block = _heapinfo[block].free.prev;
1157 --_chunks_free;
1158 _bytes_free -= bytes;
1159 }
1160 }
1161
1162 /* Set the next search to begin at this block. */
1163 _heapindex = block;
1164 break;
1165
1166 default:
1167 /* Do some of the statistics. */
1168 --_chunks_used;
1169 _bytes_used -= 1 << type;
1170 ++_chunks_free;
1171 _bytes_free += 1 << type;
1172
1173 /* Get the address of the first free fragment in this block. */
1174 prev = (struct list *) ((char *) ADDRESS (block) +
1175 (_heapinfo[block].busy.info.frag.first << type));
1176
1177 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1178 {
1179 /* If all fragments of this block are free, remove them
1180 from the fragment list and free the whole block. */
1181 next = prev;
d0baac98 1182 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
74ad5c7f
KH
1183 next = next->next;
1184 prev->prev->next = next;
1185 if (next != NULL)
1186 next->prev = prev->prev;
1187 _heapinfo[block].busy.type = 0;
1188 _heapinfo[block].busy.info.size = 1;
1189
1190 /* Keep the statistics accurate. */
1191 ++_chunks_used;
1192 _bytes_used += BLOCKSIZE;
1193 _chunks_free -= BLOCKSIZE >> type;
1194 _bytes_free -= BLOCKSIZE;
1195
8d0d84d2
YM
1196#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1197 _free_internal_nolock (ADDRESS (block));
8094989b 1198#else
74ad5c7f 1199 free (ADDRESS (block));
8094989b 1200#endif
74ad5c7f
KH
1201 }
1202 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1203 {
1204 /* If some fragments of this block are free, link this
1205 fragment into the fragment list after the first free
1206 fragment of this block. */
d0baac98 1207 next = ptr;
74ad5c7f
KH
1208 next->next = prev->next;
1209 next->prev = prev;
1210 prev->next = next;
1211 if (next->next != NULL)
1212 next->next->prev = next;
1213 ++_heapinfo[block].busy.info.frag.nfree;
1214 }
1215 else
1216 {
1217 /* No fragments of this block are free, so link this
1218 fragment into the fragment list and announce that
1219 it is the first free fragment of this block. */
d0baac98 1220 prev = ptr;
74ad5c7f 1221 _heapinfo[block].busy.info.frag.nfree = 1;
d0baac98
PE
1222 _heapinfo[block].busy.info.frag.first =
1223 (uintptr_t) ptr % BLOCKSIZE >> type;
74ad5c7f
KH
1224 prev->next = _fraghead[type].next;
1225 prev->prev = &_fraghead[type];
1226 prev->prev->next = prev;
1227 if (prev->next != NULL)
1228 prev->next->prev = prev;
1229 }
1230 break;
1231 }
177c0ea7 1232
5dcab13e 1233 PROTECT_MALLOC_STATE (1);
8d0d84d2
YM
1234}
1235
1236/* Return memory to the heap.
1237 Like `free' but don't call a __free_hook if there is one. */
1238void
d0baac98 1239_free_internal (void *ptr)
8d0d84d2
YM
1240{
1241 LOCK ();
1242 _free_internal_nolock (ptr);
2f213514 1243 UNLOCK ();
74ad5c7f
KH
1244}
1245
1246/* Return memory to the heap. */
ca9c0567 1247
4624371d 1248void
d0baac98 1249free (void *ptr)
74ad5c7f 1250{
d0baac98 1251 void (*hook) (void *) = __free_hook;
8d0d84d2
YM
1252
1253 if (hook != NULL)
1254 (*hook) (ptr);
74ad5c7f
KH
1255 else
1256 _free_internal (ptr);
1257}
1258
1259/* Define the `cfree' alias for `free'. */
1260#ifdef weak_alias
1261weak_alias (free, cfree)
1262#else
1263void
d0baac98 1264cfree (void *ptr)
74ad5c7f
KH
1265{
1266 free (ptr);
1267}
1268#endif
1269/* Change the size of a block allocated by `malloc'.
1270 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1271 Written May 1989 by Mike Haertel.
1272
1273This library is free software; you can redistribute it and/or
423a1f3c 1274modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1275published by the Free Software Foundation; either version 2 of the
1276License, or (at your option) any later version.
1277
1278This library is distributed in the hope that it will be useful,
1279but WITHOUT ANY WARRANTY; without even the implied warranty of
1280MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1281General Public License for more details.
74ad5c7f 1282
423a1f3c 1283You should have received a copy of the GNU General Public
fee0bd5f 1284License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
1285
1286 The author may be reached (Email) at the address mike@ai.mit.edu,
1287 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1288
62aba0d4 1289#ifndef min
74ad5c7f 1290#define min(A, B) ((A) < (B) ? (A) : (B))
62aba0d4 1291#endif
74ad5c7f 1292
a4579d33
KB
1293/* On Cygwin the dumped emacs may try to realloc storage allocated in
1294 the static heap. We just malloc space in the new heap and copy the
1295 data. */
1296#ifdef CYGWIN
d0baac98
PE
1297void *
1298special_realloc (void *ptr, size_t size)
a4579d33 1299{
d0baac98 1300 void *result;
a4579d33 1301 int type;
d0baac98 1302 size_t block, oldsize;
a4579d33
KB
1303
1304 block = ((char *) ptr - bss_sbrk_heapbase) / BLOCKSIZE + 1;
1305 type = bss_sbrk_heapinfo[block].busy.type;
1306 oldsize =
1307 type == 0 ? bss_sbrk_heapinfo[block].busy.info.size * BLOCKSIZE
d0baac98 1308 : (size_t) 1 << type;
a4579d33 1309 result = _malloc_internal_nolock (size);
220a304a
PE
1310 if (result)
1311 return memcpy (result, ptr, min (oldsize, size));
a4579d33
KB
1312 return result;
1313}
1314#endif
1315
74ad5c7f 1316/* Debugging hook for realloc. */
d0baac98 1317void *(*__realloc_hook) (void *ptr, size_t size);
74ad5c7f
KH
1318
1319/* Resize the given region to the new size, returning a pointer
1320 to the (possibly moved) region. This is optimized for speed;
1321 some benchmarks seem to indicate that greater compactness is
1322 achieved by unconditionally allocating and copying to a
1323 new region. This module has incestuous knowledge of the
1324 internals of both free and malloc. */
d0baac98
PE
1325void *
1326_realloc_internal_nolock (void *ptr, size_t size)
74ad5c7f 1327{
d0baac98 1328 void *result;
74ad5c7f 1329 int type;
d0baac98 1330 size_t block, blocks, oldlimit;
74ad5c7f
KH
1331
1332 if (size == 0)
1333 {
8d0d84d2
YM
1334 _free_internal_nolock (ptr);
1335 return _malloc_internal_nolock (0);
74ad5c7f
KH
1336 }
1337 else if (ptr == NULL)
8d0d84d2 1338 return _malloc_internal_nolock (size);
74ad5c7f 1339
a4579d33 1340#ifdef CYGWIN
1b170bc6 1341 if ((char *) ptr < _heapbase)
a4579d33
KB
1342 /* ptr points into the static heap */
1343 return special_realloc (ptr, size);
1344#endif
1345
74ad5c7f
KH
1346 block = BLOCK (ptr);
1347
5dcab13e 1348 PROTECT_MALLOC_STATE (0);
177c0ea7 1349
74ad5c7f
KH
1350 type = _heapinfo[block].busy.type;
1351 switch (type)
1352 {
1353 case 0:
1354 /* Maybe reallocate a large block to a small fragment. */
1355 if (size <= BLOCKSIZE / 2)
1356 {
8d0d84d2 1357 result = _malloc_internal_nolock (size);
74ad5c7f
KH
1358 if (result != NULL)
1359 {
1360 memcpy (result, ptr, size);
8d0d84d2 1361 _free_internal_nolock (ptr);
2f213514 1362 goto out;
74ad5c7f
KH
1363 }
1364 }
1365
1366 /* The new size is a large allocation as well;
1367 see if we can hold it in place. */
1368 blocks = BLOCKIFY (size);
1369 if (blocks < _heapinfo[block].busy.info.size)
1370 {
1371 /* The new size is smaller; return
1372 excess memory to the free list. */
1373 _heapinfo[block + blocks].busy.type = 0;
1374 _heapinfo[block + blocks].busy.info.size
1375 = _heapinfo[block].busy.info.size - blocks;
1376 _heapinfo[block].busy.info.size = blocks;
1377 /* We have just created a new chunk by splitting a chunk in two.
1378 Now we will free this chunk; increment the statistics counter
1379 so it doesn't become wrong when _free_internal decrements it. */
1380 ++_chunks_used;
8d0d84d2 1381 _free_internal_nolock (ADDRESS (block + blocks));
74ad5c7f
KH
1382 result = ptr;
1383 }
1384 else if (blocks == _heapinfo[block].busy.info.size)
1385 /* No size change necessary. */
1386 result = ptr;
1387 else
1388 {
1389 /* Won't fit, so allocate a new region that will.
1390 Free the old region first in case there is sufficient
1391 adjacent free space to grow without moving. */
1392 blocks = _heapinfo[block].busy.info.size;
1393 /* Prevent free from actually returning memory to the system. */
1394 oldlimit = _heaplimit;
1395 _heaplimit = 0;
8d0d84d2
YM
1396 _free_internal_nolock (ptr);
1397 result = _malloc_internal_nolock (size);
5dcab13e 1398 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1399 if (_heaplimit == 0)
1400 _heaplimit = oldlimit;
1401 if (result == NULL)
1402 {
1403 /* Now we're really in trouble. We have to unfree
1404 the thing we just freed. Unfortunately it might
1405 have been coalesced with its neighbors. */
1406 if (_heapindex == block)
8d0d84d2 1407 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
74ad5c7f
KH
1408 else
1409 {
d0baac98 1410 void *previous
8d0d84d2
YM
1411 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1412 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1413 _free_internal_nolock (previous);
74ad5c7f 1414 }
2f213514 1415 goto out;
74ad5c7f
KH
1416 }
1417 if (ptr != result)
1418 memmove (result, ptr, blocks * BLOCKSIZE);
1419 }
1420 break;
1421
1422 default:
1423 /* Old size is a fragment; type is logarithm
1424 to base two of the fragment size. */
d0baac98
PE
1425 if (size > (size_t) (1 << (type - 1)) &&
1426 size <= (size_t) (1 << type))
74ad5c7f
KH
1427 /* The new size is the same kind of fragment. */
1428 result = ptr;
1429 else
1430 {
1431 /* The new size is different; allocate a new space,
1432 and copy the lesser of the new size and the old. */
8d0d84d2 1433 result = _malloc_internal_nolock (size);
74ad5c7f 1434 if (result == NULL)
2f213514 1435 goto out;
d0baac98 1436 memcpy (result, ptr, min (size, (size_t) 1 << type));
8d0d84d2 1437 _free_internal_nolock (ptr);
74ad5c7f
KH
1438 }
1439 break;
1440 }
1441
5dcab13e 1442 PROTECT_MALLOC_STATE (1);
2f213514 1443 out:
8d0d84d2
YM
1444 return result;
1445}
1446
d0baac98
PE
1447void *
1448_realloc_internal (void *ptr, size_t size)
8d0d84d2 1449{
d0baac98 1450 void *result;
8d0d84d2 1451
5e617bc2 1452 LOCK ();
8d0d84d2 1453 result = _realloc_internal_nolock (ptr, size);
2f213514 1454 UNLOCK ();
8d0d84d2 1455
74ad5c7f
KH
1456 return result;
1457}
1458
d0baac98
PE
1459void *
1460realloc (void *ptr, size_t size)
74ad5c7f 1461{
d0baac98 1462 void *(*hook) (void *, size_t);
8d0d84d2 1463
74ad5c7f
KH
1464 if (!__malloc_initialized && !__malloc_initialize ())
1465 return NULL;
1466
8d0d84d2
YM
1467 hook = __realloc_hook;
1468 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
74ad5c7f
KH
1469}
1470/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1471
1472This library is free software; you can redistribute it and/or
423a1f3c 1473modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1474published by the Free Software Foundation; either version 2 of the
1475License, or (at your option) any later version.
1476
1477This library is distributed in the hope that it will be useful,
1478but WITHOUT ANY WARRANTY; without even the implied warranty of
1479MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1480General Public License for more details.
74ad5c7f 1481
423a1f3c 1482You should have received a copy of the GNU General Public
fee0bd5f 1483License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
1484
1485 The author may be reached (Email) at the address mike@ai.mit.edu,
1486 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1487
74ad5c7f
KH
1488/* Allocate an array of NMEMB elements each SIZE bytes long.
1489 The entire array is initialized to zeros. */
d0baac98 1490void *
aea07e2c 1491calloc (size_t nmemb, size_t size)
74ad5c7f 1492{
aea07e2c
PE
1493 void *result;
1494 size_t bytes = nmemb * size;
74ad5c7f 1495
aea07e2c
PE
1496 if (size != 0 && bytes / size != nmemb)
1497 {
1498 errno = ENOMEM;
1499 return NULL;
1500 }
74ad5c7f 1501
aea07e2c
PE
1502 result = malloc (bytes);
1503 if (result)
220a304a 1504 return memset (result, 0, bytes);
74ad5c7f
KH
1505 return result;
1506}
1507/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1508This file is part of the GNU C Library.
1509
1510The GNU C Library is free software; you can redistribute it and/or modify
1511it under the terms of the GNU General Public License as published by
1512the Free Software Foundation; either version 2, or (at your option)
1513any later version.
1514
1515The GNU C Library is distributed in the hope that it will be useful,
1516but WITHOUT ANY WARRANTY; without even the implied warranty of
1517MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1518GNU General Public License for more details.
1519
1520You should have received a copy of the GNU General Public License
fee0bd5f 1521along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
74ad5c7f 1522
65f451d0
DN
1523/* uClibc defines __GNU_LIBRARY__, but it is not completely
1524 compatible. */
5e617bc2 1525#if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
74ad5c7f 1526#define __sbrk sbrk
65f451d0 1527#else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1528/* It is best not to declare this and cast its result on foreign operating
1529 systems with potentially hostile include files. */
1530
d0baac98 1531extern void *__sbrk (ptrdiff_t increment);
65f451d0 1532#endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f 1533
74ad5c7f
KH
1534/* Allocate INCREMENT more bytes of data space,
1535 and return the start of data space, or NULL on errors.
1536 If INCREMENT is negative, shrink data space. */
d0baac98
PE
1537void *
1538__default_morecore (ptrdiff_t increment)
74ad5c7f 1539{
d0baac98 1540 void *result;
5e617bc2 1541#if defined (CYGWIN)
ef6d1039
SM
1542 if (!bss_sbrk_did_unexec)
1543 {
1544 return bss_sbrk (increment);
1545 }
1546#endif
d0baac98
PE
1547 result = (void *) __sbrk (increment);
1548 if (result == (void *) -1)
74ad5c7f
KH
1549 return NULL;
1550 return result;
1551}
1552/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1553
1554This library is free software; you can redistribute it and/or
423a1f3c 1555modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1556published by the Free Software Foundation; either version 2 of the
1557License, or (at your option) any later version.
1558
1559This library is distributed in the hope that it will be useful,
1560but WITHOUT ANY WARRANTY; without even the implied warranty of
1561MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1562General Public License for more details.
74ad5c7f 1563
423a1f3c 1564You should have received a copy of the GNU General Public
fee0bd5f 1565License along with this library. If not, see <http://www.gnu.org/licenses/>. */
74ad5c7f 1566
d0baac98 1567void *(*__memalign_hook) (size_t size, size_t alignment);
74ad5c7f 1568
d0baac98 1569void *
aea07e2c 1570aligned_alloc (size_t alignment, size_t size)
74ad5c7f 1571{
d0baac98
PE
1572 void *result;
1573 size_t adj, lastadj;
1574 void *(*hook) (size_t, size_t) = __memalign_hook;
74ad5c7f 1575
8d0d84d2
YM
1576 if (hook)
1577 return (*hook) (alignment, size);
74ad5c7f
KH
1578
1579 /* Allocate a block with enough extra space to pad the block with up to
1580 (ALIGNMENT - 1) bytes if necessary. */
aea07e2c
PE
1581 if (- size < alignment)
1582 {
1583 errno = ENOMEM;
1584 return NULL;
1585 }
74ad5c7f
KH
1586 result = malloc (size + alignment - 1);
1587 if (result == NULL)
1588 return NULL;
1589
1590 /* Figure out how much we will need to pad this particular block
1591 to achieve the required alignment. */
d0baac98 1592 adj = (uintptr_t) result % alignment;
74ad5c7f
KH
1593
1594 do
1595 {
1596 /* Reallocate the block with only as much excess as it needs. */
1597 free (result);
1598 result = malloc (adj + size);
1599 if (result == NULL) /* Impossible unless interrupted. */
1600 return NULL;
1601
1602 lastadj = adj;
d0baac98 1603 adj = (uintptr_t) result % alignment;
74ad5c7f
KH
1604 /* It's conceivable we might have been so unlucky as to get a
1605 different block with weaker alignment. If so, this block is too
1606 short to contain SIZE after alignment correction. So we must
1607 try again and get another block, slightly larger. */
1608 } while (adj > lastadj);
1609
1610 if (adj != 0)
1611 {
1612 /* Record this block in the list of aligned blocks, so that `free'
1613 can identify the pointer it is passed, which will be in the middle
1614 of an allocated block. */
1615
1616 struct alignlist *l;
8d0d84d2 1617 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1618 for (l = _aligned_blocks; l != NULL; l = l->next)
1619 if (l->aligned == NULL)
1620 /* This slot is free. Use it. */
1621 break;
1622 if (l == NULL)
1623 {
38182d90 1624 l = malloc (sizeof *l);
8d0d84d2 1625 if (l != NULL)
74ad5c7f 1626 {
8d0d84d2
YM
1627 l->next = _aligned_blocks;
1628 _aligned_blocks = l;
74ad5c7f 1629 }
74ad5c7f 1630 }
8d0d84d2
YM
1631 if (l != NULL)
1632 {
1633 l->exact = result;
1634 result = l->aligned = (char *) result + alignment - adj;
1635 }
1636 UNLOCK_ALIGNED_BLOCKS ();
1637 if (l == NULL)
1638 {
1639 free (result);
1640 result = NULL;
1641 }
74ad5c7f
KH
1642 }
1643
1644 return result;
1645}
1646
aea07e2c
PE
1647/* An obsolete alias for aligned_alloc, for any old libraries that use
1648 this alias. */
1649
1650void *
1651memalign (size_t alignment, size_t size)
1652{
1653 return aligned_alloc (alignment, size);
1654}
1655
72359c32 1656int
d0baac98 1657posix_memalign (void **memptr, size_t alignment, size_t size)
72359c32 1658{
d0baac98 1659 void *mem;
72359c32
YM
1660
1661 if (alignment == 0
d0baac98 1662 || alignment % sizeof (void *) != 0
72359c32
YM
1663 || (alignment & (alignment - 1)) != 0)
1664 return EINVAL;
1665
aea07e2c 1666 mem = aligned_alloc (alignment, size);
72359c32
YM
1667 if (mem == NULL)
1668 return ENOMEM;
1669
1670 *memptr = mem;
1671
1672 return 0;
1673}
1674
74ad5c7f
KH
1675/* Allocate memory on a page boundary.
1676 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1677
1678This library is free software; you can redistribute it and/or
423a1f3c 1679modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1680published by the Free Software Foundation; either version 2 of the
1681License, or (at your option) any later version.
1682
1683This library is distributed in the hope that it will be useful,
1684but WITHOUT ANY WARRANTY; without even the implied warranty of
1685MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1686General Public License for more details.
74ad5c7f 1687
423a1f3c 1688You should have received a copy of the GNU General Public
fee0bd5f 1689License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
1690
1691 The author may be reached (Email) at the address mike@ai.mit.edu,
1692 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1693
d0baac98
PE
1694/* Allocate SIZE bytes on a page boundary. */
1695extern void *valloc (size_t);
74ad5c7f 1696
d0baac98
PE
1697#if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1698# include "getpagesize.h"
1699#elif !defined getpagesize
1700extern int getpagesize (void);
74ad5c7f
KH
1701#endif
1702
d0baac98 1703static size_t pagesize;
74ad5c7f 1704
d0baac98
PE
1705void *
1706valloc (size_t size)
74ad5c7f
KH
1707{
1708 if (pagesize == 0)
d0baac98 1709 pagesize = getpagesize ();
74ad5c7f 1710
aea07e2c 1711 return aligned_alloc (pagesize, size);
74ad5c7f
KH
1712}
1713
a3ba27da
GM
1714#ifdef GC_MCHECK
1715
1716/* Standard debugging hooks for `malloc'.
1717 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1718 Written May 1989 by Mike Haertel.
1719
1720This library is free software; you can redistribute it and/or
423a1f3c 1721modify it under the terms of the GNU General Public License as
a3ba27da
GM
1722published by the Free Software Foundation; either version 2 of the
1723License, or (at your option) any later version.
1724
1725This library is distributed in the hope that it will be useful,
1726but WITHOUT ANY WARRANTY; without even the implied warranty of
1727MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1728General Public License for more details.
a3ba27da 1729
423a1f3c 1730You should have received a copy of the GNU General Public
fee0bd5f 1731License along with this library. If not, see <http://www.gnu.org/licenses/>.
a3ba27da
GM
1732
1733 The author may be reached (Email) at the address mike@ai.mit.edu,
1734 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1735
a3ba27da 1736#include <stdio.h>
a3ba27da
GM
1737
1738/* Old hook values. */
d0baac98
PE
1739static void (*old_free_hook) (void *ptr);
1740static void *(*old_malloc_hook) (size_t size);
1741static void *(*old_realloc_hook) (void *ptr, size_t size);
a3ba27da
GM
1742
1743/* Function to call when something awful happens. */
f57e2426 1744static void (*abortfunc) (enum mcheck_status);
a3ba27da
GM
1745
1746/* Arbitrary magical numbers. */
d0baac98
PE
1747#define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1748#define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
a3ba27da
GM
1749#define MAGICBYTE ((char) 0xd7)
1750#define MALLOCFLOOD ((char) 0x93)
1751#define FREEFLOOD ((char) 0x95)
1752
1753struct hdr
1754 {
d0baac98
PE
1755 size_t size; /* Exact size requested by user. */
1756 size_t magic; /* Magic number to check header integrity. */
a3ba27da
GM
1757 };
1758
a3ba27da 1759static enum mcheck_status
d0baac98 1760checkhdr (const struct hdr *hdr)
a3ba27da
GM
1761{
1762 enum mcheck_status status;
1763 switch (hdr->magic)
1764 {
1765 default:
1766 status = MCHECK_HEAD;
1767 break;
1768 case MAGICFREE:
1769 status = MCHECK_FREE;
1770 break;
1771 case MAGICWORD:
1772 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1773 status = MCHECK_TAIL;
1774 else
1775 status = MCHECK_OK;
1776 break;
1777 }
1778 if (status != MCHECK_OK)
1779 (*abortfunc) (status);
1780 return status;
1781}
1782
a3ba27da 1783static void
d0baac98 1784freehook (void *ptr)
a3ba27da
GM
1785{
1786 struct hdr *hdr;
177c0ea7 1787
a3ba27da
GM
1788 if (ptr)
1789 {
1790 hdr = ((struct hdr *) ptr) - 1;
1791 checkhdr (hdr);
1792 hdr->magic = MAGICFREE;
0e926e56 1793 memset (ptr, FREEFLOOD, hdr->size);
a3ba27da
GM
1794 }
1795 else
1796 hdr = NULL;
177c0ea7 1797
a3ba27da
GM
1798 __free_hook = old_free_hook;
1799 free (hdr);
1800 __free_hook = freehook;
1801}
1802
d0baac98
PE
1803static void *
1804mallochook (size_t size)
a3ba27da
GM
1805{
1806 struct hdr *hdr;
1807
1808 __malloc_hook = old_malloc_hook;
38182d90 1809 hdr = malloc (sizeof *hdr + size + 1);
a3ba27da
GM
1810 __malloc_hook = mallochook;
1811 if (hdr == NULL)
1812 return NULL;
1813
1814 hdr->size = size;
1815 hdr->magic = MAGICWORD;
1816 ((char *) &hdr[1])[size] = MAGICBYTE;
220a304a 1817 return memset (hdr + 1, MALLOCFLOOD, size);
a3ba27da
GM
1818}
1819
d0baac98
PE
1820static void *
1821reallochook (void *ptr, size_t size)
a3ba27da
GM
1822{
1823 struct hdr *hdr = NULL;
d0baac98 1824 size_t osize = 0;
177c0ea7 1825
a3ba27da
GM
1826 if (ptr)
1827 {
1828 hdr = ((struct hdr *) ptr) - 1;
1829 osize = hdr->size;
1830
1831 checkhdr (hdr);
1832 if (size < osize)
0e926e56 1833 memset ((char *) ptr + size, FREEFLOOD, osize - size);
a3ba27da 1834 }
177c0ea7 1835
a3ba27da
GM
1836 __free_hook = old_free_hook;
1837 __malloc_hook = old_malloc_hook;
1838 __realloc_hook = old_realloc_hook;
38182d90 1839 hdr = realloc (hdr, sizeof *hdr + size + 1);
a3ba27da
GM
1840 __free_hook = freehook;
1841 __malloc_hook = mallochook;
1842 __realloc_hook = reallochook;
1843 if (hdr == NULL)
1844 return NULL;
1845
1846 hdr->size = size;
1847 hdr->magic = MAGICWORD;
1848 ((char *) &hdr[1])[size] = MAGICBYTE;
1849 if (size > osize)
0e926e56 1850 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
d0baac98 1851 return hdr + 1;
a3ba27da
GM
1852}
1853
1854static void
d0baac98 1855mabort (enum mcheck_status status)
a3ba27da
GM
1856{
1857 const char *msg;
1858 switch (status)
1859 {
1860 case MCHECK_OK:
1861 msg = "memory is consistent, library is buggy";
1862 break;
1863 case MCHECK_HEAD:
1864 msg = "memory clobbered before allocated block";
1865 break;
1866 case MCHECK_TAIL:
1867 msg = "memory clobbered past end of allocated block";
1868 break;
1869 case MCHECK_FREE:
1870 msg = "block freed twice";
1871 break;
1872 default:
1873 msg = "bogus mcheck_status, library is buggy";
1874 break;
1875 }
1876#ifdef __GNU_LIBRARY__
1877 __libc_fatal (msg);
1878#else
1879 fprintf (stderr, "mcheck: %s\n", msg);
1880 fflush (stderr);
1881 abort ();
1882#endif
1883}
1884
1885static int mcheck_used = 0;
1886
1887int
d0baac98 1888mcheck (void (*func) (enum mcheck_status))
a3ba27da
GM
1889{
1890 abortfunc = (func != NULL) ? func : &mabort;
1891
1892 /* These hooks may not be safely inserted if malloc is already in use. */
1893 if (!__malloc_initialized && !mcheck_used)
1894 {
1895 old_free_hook = __free_hook;
1896 __free_hook = freehook;
1897 old_malloc_hook = __malloc_hook;
1898 __malloc_hook = mallochook;
1899 old_realloc_hook = __realloc_hook;
1900 __realloc_hook = reallochook;
1901 mcheck_used = 1;
1902 }
1903
1904 return mcheck_used ? 0 : -1;
1905}
1906
1907enum mcheck_status
d0baac98 1908mprobe (void *ptr)
a3ba27da
GM
1909{
1910 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1911}
1912
1913#endif /* GC_MCHECK */