remove `declare' macro
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f 1/* Declarations for `malloc' and friends.
ba318903 2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2014 Free
ab422c4d 3 Software Foundation, Inc.
74ad5c7f
KH
4 Written May 1989 by Mike Haertel.
5
6This library is free software; you can redistribute it and/or
423a1f3c 7modify it under the terms of the GNU General Public License as
74ad5c7f
KH
8published by the Free Software Foundation; either version 2 of the
9License, or (at your option) any later version.
10
11This library is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 14General Public License for more details.
74ad5c7f 15
423a1f3c 16You should have received a copy of the GNU General Public
fee0bd5f 17License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
18
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
21
74ad5c7f 22#include <config.h>
74ad5c7f 23
ae9e757a 24#ifdef HAVE_PTHREAD
8d0d84d2
YM
25#define USE_PTHREAD
26#endif
27
74ad5c7f 28#include <string.h>
74ad5c7f 29#include <limits.h>
d0baac98 30#include <stdint.h>
74ad5c7f 31#include <unistd.h>
74ad5c7f 32
2f213514
YM
33#ifdef USE_PTHREAD
34#include <pthread.h>
35#endif
36
62aba0d4
FP
37#ifdef WINDOWSNT
38#include <w32heap.h> /* for sbrk */
39#endif
40
201572ec
EZ
41#ifdef emacs
42extern void emacs_abort (void);
43#endif
44
74ad5c7f
KH
45#ifdef __cplusplus
46extern "C"
47{
48#endif
49
74ad5c7f 50#include <stddef.h>
74ad5c7f
KH
51
52
53/* Allocate SIZE bytes of memory. */
74fde0f4 54extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
74ad5c7f 55/* Re-allocate the previously allocated block
d0baac98 56 in ptr, making the new block SIZE bytes long. */
74fde0f4 57extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
74ad5c7f 58/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
74fde0f4 59extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
74ad5c7f 60/* Free a block allocated by `malloc', `realloc' or `calloc'. */
d0baac98 61extern void free (void *ptr);
74ad5c7f
KH
62
63/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
d0baac98 64#ifdef MSDOS
aea07e2c 65extern void *aligned_alloc (size_t, size_t);
d0baac98
PE
66extern void *memalign (size_t, size_t);
67extern int posix_memalign (void **, size_t, size_t);
74ad5c7f
KH
68#endif
69
3ceeb306
YM
70#ifdef USE_PTHREAD
71/* Set up mutexes and make malloc etc. thread-safe. */
d0baac98 72extern void malloc_enable_thread (void);
3ceeb306 73#endif
74ad5c7f 74
bd650c24
EZ
75#ifdef emacs
76extern void emacs_abort (void);
77#endif
78
74ad5c7f
KH
79/* The allocator divides the heap into blocks of fixed size; large
80 requests receive one or more whole blocks, and small requests
81 receive a fragment of a block. Fragment sizes are powers of two,
82 and all fragments of a block are the same size. When all the
83 fragments in a block have been freed, the block itself is freed. */
5e617bc2 84#define INT_BIT (CHAR_BIT * sizeof (int))
74ad5c7f
KH
85#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
86#define BLOCKSIZE (1 << BLOCKLOG)
87#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
88
89/* Determine the amount of memory spanned by the initial heap table
90 (not an absolute limit). */
91#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
92
93/* Number of contiguous free blocks allowed to build up at the end of
94 memory before they will be returned to the system. */
95#define FINAL_FREE_BLOCKS 8
96
97/* Data structure giving per-block information. */
98typedef union
99 {
100 /* Heap information for a busy block. */
101 struct
102 {
103 /* Zero for a large (multiblock) object, or positive giving the
104 logarithm to the base two of the fragment size. */
105 int type;
106 union
107 {
108 struct
109 {
d0baac98
PE
110 size_t nfree; /* Free frags in a fragmented block. */
111 size_t first; /* First free fragment of the block. */
74ad5c7f
KH
112 } frag;
113 /* For a large object, in its first block, this has the number
114 of blocks in the object. In the other blocks, this has a
115 negative number which says how far back the first block is. */
d0baac98 116 ptrdiff_t size;
74ad5c7f
KH
117 } info;
118 } busy;
119 /* Heap information for a free block
120 (that may be the first of a free cluster). */
121 struct
122 {
d0baac98
PE
123 size_t size; /* Size (in blocks) of a free cluster. */
124 size_t next; /* Index of next free cluster. */
125 size_t prev; /* Index of previous free cluster. */
74ad5c7f
KH
126 } free;
127 } malloc_info;
128
129/* Pointer to first block of the heap. */
130extern char *_heapbase;
131
132/* Table indexed by block number giving per-block information. */
133extern malloc_info *_heapinfo;
134
135/* Address to block number and vice versa. */
136#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
d0baac98 137#define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
74ad5c7f
KH
138
139/* Current search index for the heap table. */
d0baac98 140extern size_t _heapindex;
74ad5c7f
KH
141
142/* Limit of valid info table indices. */
d0baac98 143extern size_t _heaplimit;
74ad5c7f
KH
144
145/* Doubly linked lists of free fragments. */
146struct list
147 {
148 struct list *next;
149 struct list *prev;
150 };
151
152/* Free list headers for each fragment size. */
153extern struct list _fraghead[];
154
aea07e2c 155/* List of blocks allocated with aligned_alloc and friends. */
74ad5c7f
KH
156struct alignlist
157 {
158 struct alignlist *next;
aea07e2c 159 void *aligned; /* The address that aligned_alloc returned. */
d0baac98 160 void *exact; /* The address that malloc returned. */
74ad5c7f
KH
161 };
162extern struct alignlist *_aligned_blocks;
163
164/* Instrumentation. */
d0baac98
PE
165extern size_t _chunks_used;
166extern size_t _bytes_used;
167extern size_t _chunks_free;
168extern size_t _bytes_free;
74ad5c7f
KH
169
170/* Internal versions of `malloc', `realloc', and `free'
171 used when these functions need to call each other.
172 They are the same but don't call the hooks. */
d0baac98
PE
173extern void *_malloc_internal (size_t);
174extern void *_realloc_internal (void *, size_t);
175extern void _free_internal (void *);
176extern void *_malloc_internal_nolock (size_t);
177extern void *_realloc_internal_nolock (void *, size_t);
178extern void _free_internal_nolock (void *);
74ad5c7f 179
2f213514 180#ifdef USE_PTHREAD
8d0d84d2 181extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
3ceeb306
YM
182extern int _malloc_thread_enabled_p;
183#define LOCK() \
184 do { \
185 if (_malloc_thread_enabled_p) \
186 pthread_mutex_lock (&_malloc_mutex); \
187 } while (0)
188#define UNLOCK() \
189 do { \
190 if (_malloc_thread_enabled_p) \
191 pthread_mutex_unlock (&_malloc_mutex); \
192 } while (0)
193#define LOCK_ALIGNED_BLOCKS() \
194 do { \
195 if (_malloc_thread_enabled_p) \
196 pthread_mutex_lock (&_aligned_blocks_mutex); \
197 } while (0)
198#define UNLOCK_ALIGNED_BLOCKS() \
199 do { \
200 if (_malloc_thread_enabled_p) \
201 pthread_mutex_unlock (&_aligned_blocks_mutex); \
202 } while (0)
2f213514
YM
203#else
204#define LOCK()
205#define UNLOCK()
8d0d84d2
YM
206#define LOCK_ALIGNED_BLOCKS()
207#define UNLOCK_ALIGNED_BLOCKS()
2f213514
YM
208#endif
209
74ad5c7f
KH
210/* Given an address in the middle of a malloc'd object,
211 return the address of the beginning of the object. */
d0baac98 212extern void *malloc_find_object_address (void *ptr);
74ad5c7f
KH
213
214/* Underlying allocation function; successive calls should
215 return contiguous pieces of memory. */
d0baac98 216extern void *(*__morecore) (ptrdiff_t size);
74ad5c7f
KH
217
218/* Default value of `__morecore'. */
d0baac98 219extern void *__default_morecore (ptrdiff_t size);
74ad5c7f
KH
220
221/* If not NULL, this function is called after each time
222 `__morecore' is called to increase the data size. */
d0baac98 223extern void (*__after_morecore_hook) (void);
74ad5c7f
KH
224
225/* Number of extra blocks to get each time we ask for more core.
226 This reduces the frequency of calling `(*__morecore)'. */
d0baac98 227extern size_t __malloc_extra_blocks;
74ad5c7f
KH
228
229/* Nonzero if `malloc' has been called and done its initialization. */
230extern int __malloc_initialized;
231/* Function called to initialize malloc data structures. */
d0baac98 232extern int __malloc_initialize (void);
74ad5c7f
KH
233
234/* Hooks for debugging versions. */
d0baac98
PE
235extern void (*__malloc_initialize_hook) (void);
236extern void (*__free_hook) (void *ptr);
237extern void *(*__malloc_hook) (size_t size);
238extern void *(*__realloc_hook) (void *ptr, size_t size);
239extern void *(*__memalign_hook) (size_t size, size_t alignment);
74ad5c7f
KH
240
241/* Return values for `mprobe': these are the kinds of inconsistencies that
242 `mcheck' enables detection of. */
243enum mcheck_status
244 {
245 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
246 MCHECK_OK, /* Block is fine. */
247 MCHECK_FREE, /* Block freed twice. */
248 MCHECK_HEAD, /* Memory before the block was clobbered. */
249 MCHECK_TAIL /* Memory after the block was clobbered. */
250 };
251
252/* Activate a standard collection of debugging hooks. This must be called
253 before `malloc' is ever called. ABORTFUNC is called with an error code
254 (see enum above) when an inconsistency is detected. If ABORTFUNC is
255 null, the standard function prints on stderr and then calls `abort'. */
d0baac98 256extern int mcheck (void (*abortfunc) (enum mcheck_status));
74ad5c7f
KH
257
258/* Check for aberrations in a particular malloc'd block. You must have
259 called `mcheck' already. These are the same checks that `mcheck' does
260 when you free or reallocate a block. */
d0baac98 261extern enum mcheck_status mprobe (void *ptr);
74ad5c7f
KH
262
263/* Activate a standard collection of tracing hooks. */
d0baac98
PE
264extern void mtrace (void);
265extern void muntrace (void);
74ad5c7f
KH
266
267/* Statistics available to the user. */
268struct mstats
269 {
d0baac98
PE
270 size_t bytes_total; /* Total size of the heap. */
271 size_t chunks_used; /* Chunks allocated by the user. */
272 size_t bytes_used; /* Byte total of user-allocated chunks. */
273 size_t chunks_free; /* Chunks in the free list. */
274 size_t bytes_free; /* Byte total of chunks in the free list. */
74ad5c7f
KH
275 };
276
277/* Pick up the current statistics. */
d0baac98 278extern struct mstats mstats (void);
74ad5c7f
KH
279
280/* Call WARNFUN with a warning message when memory usage is high. */
d0baac98 281extern void memory_warnings (void *start, void (*warnfun) (const char *));
74ad5c7f
KH
282
283#ifdef __cplusplus
284}
285#endif
286
74ad5c7f
KH
287/* Memory allocator `malloc'.
288 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
289 Written May 1989 by Mike Haertel.
290
291This library is free software; you can redistribute it and/or
423a1f3c 292modify it under the terms of the GNU General Public License as
74ad5c7f
KH
293published by the Free Software Foundation; either version 2 of the
294License, or (at your option) any later version.
295
296This library is distributed in the hope that it will be useful,
297but WITHOUT ANY WARRANTY; without even the implied warranty of
298MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 299General Public License for more details.
74ad5c7f 300
423a1f3c 301You should have received a copy of the GNU General Public
fee0bd5f 302License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
303
304 The author may be reached (Email) at the address mike@ai.mit.edu,
305 or (US mail) as Mike Haertel c/o Free Software Foundation. */
306
74ad5c7f
KH
307#include <errno.h>
308
a4579d33
KB
309/* On Cygwin there are two heaps. temacs uses the static heap
310 (defined in sheap.c and managed with bss_sbrk), and the dumped
311 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
312 on Cygwin, it reinitializes malloc, and we save the old info for
313 use by free and realloc if they're called with a pointer into the
db76dd85
KB
314 static heap.
315
316 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
317 this is changed in the future, we'll have to similarly deal with
318 reinitializing ralloc. */
a4579d33 319#ifdef CYGWIN
d0baac98 320extern void *bss_sbrk (ptrdiff_t size);
ef6d1039 321extern int bss_sbrk_did_unexec;
a4579d33
KB
322char *bss_sbrk_heapbase; /* _heapbase for static heap */
323malloc_info *bss_sbrk_heapinfo; /* _heapinfo for static heap */
ef6d1039 324#endif
d0baac98 325void *(*__morecore) (ptrdiff_t size) = __default_morecore;
74ad5c7f
KH
326
327/* Debugging hook for `malloc'. */
d0baac98 328void *(*__malloc_hook) (size_t size);
74ad5c7f
KH
329
330/* Pointer to the base of the first block. */
331char *_heapbase;
332
333/* Block information table. Allocated with align/__free (not malloc/free). */
334malloc_info *_heapinfo;
335
336/* Number of info entries. */
d0baac98 337static size_t heapsize;
74ad5c7f
KH
338
339/* Search index in the info table. */
d0baac98 340size_t _heapindex;
74ad5c7f
KH
341
342/* Limit of valid info table indices. */
d0baac98 343size_t _heaplimit;
74ad5c7f
KH
344
345/* Free lists for each fragment size. */
346struct list _fraghead[BLOCKLOG];
347
348/* Instrumentation. */
d0baac98
PE
349size_t _chunks_used;
350size_t _bytes_used;
351size_t _chunks_free;
352size_t _bytes_free;
74ad5c7f
KH
353
354/* Are you experienced? */
355int __malloc_initialized;
356
d0baac98 357size_t __malloc_extra_blocks;
74ad5c7f 358
d0baac98
PE
359void (*__malloc_initialize_hook) (void);
360void (*__after_morecore_hook) (void);
74ad5c7f 361
5dcab13e
GM
362#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
363
364/* Some code for hunting a bug writing into _heapinfo.
365
366 Call this macro with argument PROT non-zero to protect internal
367 malloc state against writing to it, call it with a zero argument to
368 make it readable and writable.
369
370 Note that this only works if BLOCKSIZE == page size, which is
371 the case on the i386. */
372
373#include <sys/types.h>
374#include <sys/mman.h>
375
376static int state_protected_p;
d0baac98 377static size_t last_state_size;
5dcab13e
GM
378static malloc_info *last_heapinfo;
379
380void
d0baac98 381protect_malloc_state (int protect_p)
5dcab13e
GM
382{
383 /* If _heapinfo has been relocated, make sure its old location
384 isn't left read-only; it will be reused by malloc. */
385 if (_heapinfo != last_heapinfo
386 && last_heapinfo
387 && state_protected_p)
388 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
389
390 last_state_size = _heaplimit * sizeof *_heapinfo;
391 last_heapinfo = _heapinfo;
177c0ea7 392
5dcab13e
GM
393 if (protect_p != state_protected_p)
394 {
395 state_protected_p = protect_p;
396 if (mprotect (_heapinfo, last_state_size,
397 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
398 abort ();
399 }
400}
401
5e617bc2 402#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
5dcab13e
GM
403
404#else
405#define PROTECT_MALLOC_STATE(PROT) /* empty */
406#endif
407
74ad5c7f
KH
408
409/* Aligned allocation. */
d0baac98
PE
410static void *
411align (size_t size)
74ad5c7f 412{
d0baac98
PE
413 void *result;
414 ptrdiff_t adj;
74ad5c7f 415
ceeb3d7d 416 /* align accepts an unsigned argument, but __morecore accepts a
d0baac98
PE
417 signed one. This could lead to trouble if SIZE overflows the
418 ptrdiff_t type accepted by __morecore. We just punt in that
ceeb3d7d 419 case, since they are requesting a ludicrous amount anyway. */
d0baac98 420 if (PTRDIFF_MAX < size)
ceeb3d7d
EZ
421 result = 0;
422 else
423 result = (*__morecore) (size);
d0baac98 424 adj = (uintptr_t) result % BLOCKSIZE;
74ad5c7f
KH
425 if (adj != 0)
426 {
74ad5c7f 427 adj = BLOCKSIZE - adj;
d0baac98 428 (*__morecore) (adj);
74ad5c7f
KH
429 result = (char *) result + adj;
430 }
431
432 if (__after_morecore_hook)
433 (*__after_morecore_hook) ();
434
435 return result;
436}
437
438/* Get SIZE bytes, if we can get them starting at END.
439 Return the address of the space we got.
440 If we cannot get space at END, fail and return 0. */
d0baac98
PE
441static void *
442get_contiguous_space (ptrdiff_t size, void *position)
74ad5c7f 443{
d0baac98
PE
444 void *before;
445 void *after;
74ad5c7f
KH
446
447 before = (*__morecore) (0);
448 /* If we can tell in advance that the break is at the wrong place,
449 fail now. */
450 if (before != position)
451 return 0;
452
453 /* Allocate SIZE bytes and get the address of them. */
454 after = (*__morecore) (size);
455 if (!after)
456 return 0;
457
458 /* It was not contiguous--reject it. */
459 if (after != position)
460 {
461 (*__morecore) (- size);
462 return 0;
463 }
464
465 return after;
466}
467
468
469/* This is called when `_heapinfo' and `heapsize' have just
470 been set to describe a new info table. Set up the table
471 to describe itself and account for it in the statistics. */
b0ab8123 472static void
55d4c1b2 473register_heapinfo (void)
74ad5c7f 474{
d0baac98 475 size_t block, blocks;
74ad5c7f
KH
476
477 block = BLOCK (_heapinfo);
478 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
479
480 /* Account for the _heapinfo block itself in the statistics. */
481 _bytes_used += blocks * BLOCKSIZE;
482 ++_chunks_used;
483
484 /* Describe the heapinfo block itself in the heapinfo. */
485 _heapinfo[block].busy.type = 0;
486 _heapinfo[block].busy.info.size = blocks;
487 /* Leave back-pointers for malloc_find_address. */
488 while (--blocks > 0)
489 _heapinfo[block + blocks].busy.info.size = -blocks;
490}
491
2f213514 492#ifdef USE_PTHREAD
8d0d84d2
YM
493pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
494pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
3ceeb306
YM
495int _malloc_thread_enabled_p;
496
497static void
d0baac98 498malloc_atfork_handler_prepare (void)
3ceeb306
YM
499{
500 LOCK ();
501 LOCK_ALIGNED_BLOCKS ();
502}
503
504static void
d0baac98 505malloc_atfork_handler_parent (void)
3ceeb306
YM
506{
507 UNLOCK_ALIGNED_BLOCKS ();
508 UNLOCK ();
509}
510
511static void
d0baac98 512malloc_atfork_handler_child (void)
3ceeb306
YM
513{
514 UNLOCK_ALIGNED_BLOCKS ();
515 UNLOCK ();
516}
517
518/* Set up mutexes and make malloc etc. thread-safe. */
519void
d0baac98 520malloc_enable_thread (void)
3ceeb306
YM
521{
522 if (_malloc_thread_enabled_p)
523 return;
524
525 /* Some pthread implementations call malloc for statically
526 initialized mutexes when they are used first. To avoid such a
527 situation, we initialize mutexes here while their use is
528 disabled in malloc etc. */
529 pthread_mutex_init (&_malloc_mutex, NULL);
530 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
531 pthread_atfork (malloc_atfork_handler_prepare,
532 malloc_atfork_handler_parent,
533 malloc_atfork_handler_child);
534 _malloc_thread_enabled_p = 1;
535}
2f213514 536#endif
74ad5c7f 537
2f213514 538static void
d0baac98 539malloc_initialize_1 (void)
2f213514 540{
a3ba27da
GM
541#ifdef GC_MCHECK
542 mcheck (NULL);
543#endif
544
a4579d33
KB
545#ifdef CYGWIN
546 if (bss_sbrk_did_unexec)
547 /* we're reinitializing the dumped emacs */
548 {
549 bss_sbrk_heapbase = _heapbase;
550 bss_sbrk_heapinfo = _heapinfo;
551 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
552 }
553#endif
554
74ad5c7f
KH
555 if (__malloc_initialize_hook)
556 (*__malloc_initialize_hook) ();
557
558 heapsize = HEAP / BLOCKSIZE;
d0baac98 559 _heapinfo = align (heapsize * sizeof (malloc_info));
74ad5c7f 560 if (_heapinfo == NULL)
2f213514 561 return;
74ad5c7f
KH
562 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
563 _heapinfo[0].free.size = 0;
564 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
565 _heapindex = 0;
566 _heapbase = (char *) _heapinfo;
567 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
568
569 register_heapinfo ();
570
571 __malloc_initialized = 1;
5dcab13e 572 PROTECT_MALLOC_STATE (1);
2f213514
YM
573 return;
574}
575
784c1472
JD
576/* Set everything up and remember that we have.
577 main will call malloc which calls this function. That is before any threads
578 or signal handlers has been set up, so we don't need thread protection. */
2f213514 579int
d0baac98 580__malloc_initialize (void)
2f213514 581{
2f213514
YM
582 if (__malloc_initialized)
583 return 0;
584
585 malloc_initialize_1 ();
2f213514
YM
586
587 return __malloc_initialized;
74ad5c7f
KH
588}
589
590static int morecore_recursing;
591
592/* Get neatly aligned memory, initializing or
593 growing the heap info table as necessary. */
d0baac98
PE
594static void *
595morecore_nolock (size_t size)
74ad5c7f 596{
d0baac98 597 void *result;
74ad5c7f 598 malloc_info *newinfo, *oldinfo;
d0baac98 599 size_t newsize;
74ad5c7f
KH
600
601 if (morecore_recursing)
602 /* Avoid recursion. The caller will know how to handle a null return. */
603 return NULL;
604
605 result = align (size);
606 if (result == NULL)
607 return NULL;
608
5dcab13e
GM
609 PROTECT_MALLOC_STATE (0);
610
74ad5c7f 611 /* Check if we need to grow the info table. */
d0baac98 612 if ((size_t) BLOCK ((char *) result + size) > heapsize)
74ad5c7f
KH
613 {
614 /* Calculate the new _heapinfo table size. We do not account for the
615 added blocks in the table itself, as we hope to place them in
616 existing free space, which is already covered by part of the
617 existing table. */
618 newsize = heapsize;
619 do
620 newsize *= 2;
d0baac98 621 while ((size_t) BLOCK ((char *) result + size) > newsize);
74ad5c7f
KH
622
623 /* We must not reuse existing core for the new info table when called
624 from realloc in the case of growing a large block, because the
625 block being grown is momentarily marked as free. In this case
626 _heaplimit is zero so we know not to reuse space for internal
627 allocation. */
628 if (_heaplimit != 0)
629 {
630 /* First try to allocate the new info table in core we already
631 have, in the usual way using realloc. If realloc cannot
632 extend it in place or relocate it to existing sufficient core,
633 we will get called again, and the code above will notice the
634 `morecore_recursing' flag and return null. */
635 int save = errno; /* Don't want to clobber errno with ENOMEM. */
636 morecore_recursing = 1;
d0baac98
PE
637 newinfo = _realloc_internal_nolock (_heapinfo,
638 newsize * sizeof (malloc_info));
74ad5c7f
KH
639 morecore_recursing = 0;
640 if (newinfo == NULL)
641 errno = save;
642 else
643 {
644 /* We found some space in core, and realloc has put the old
645 table's blocks on the free list. Now zero the new part
646 of the table and install the new table location. */
647 memset (&newinfo[heapsize], 0,
648 (newsize - heapsize) * sizeof (malloc_info));
649 _heapinfo = newinfo;
650 heapsize = newsize;
651 goto got_heap;
652 }
653 }
654
655 /* Allocate new space for the malloc info table. */
656 while (1)
657 {
d0baac98 658 newinfo = align (newsize * sizeof (malloc_info));
74ad5c7f
KH
659
660 /* Did it fail? */
661 if (newinfo == NULL)
662 {
663 (*__morecore) (-size);
664 return NULL;
665 }
666
667 /* Is it big enough to record status for its own space?
668 If so, we win. */
d0baac98
PE
669 if ((size_t) BLOCK ((char *) newinfo
670 + newsize * sizeof (malloc_info))
74ad5c7f
KH
671 < newsize)
672 break;
673
674 /* Must try again. First give back most of what we just got. */
675 (*__morecore) (- newsize * sizeof (malloc_info));
676 newsize *= 2;
677 }
678
679 /* Copy the old table to the beginning of the new,
680 and zero the rest of the new table. */
681 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
682 memset (&newinfo[heapsize], 0,
683 (newsize - heapsize) * sizeof (malloc_info));
684 oldinfo = _heapinfo;
685 _heapinfo = newinfo;
686 heapsize = newsize;
687
688 register_heapinfo ();
689
690 /* Reset _heaplimit so _free_internal never decides
691 it can relocate or resize the info table. */
692 _heaplimit = 0;
8d0d84d2 693 _free_internal_nolock (oldinfo);
5dcab13e 694 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
695
696 /* The new heap limit includes the new table just allocated. */
697 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
698 return result;
699 }
700
701 got_heap:
702 _heaplimit = BLOCK ((char *) result + size);
703 return result;
704}
705
706/* Allocate memory from the heap. */
d0baac98
PE
707void *
708_malloc_internal_nolock (size_t size)
74ad5c7f 709{
d0baac98
PE
710 void *result;
711 size_t block, blocks, lastblocks, start;
712 register size_t i;
74ad5c7f
KH
713 struct list *next;
714
715 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
716 valid address you can realloc and free (though not dereference).
717
718 It turns out that some extant code (sunrpc, at least Ultrix's version)
719 expects `malloc (0)' to return non-NULL and breaks otherwise.
720 Be compatible. */
721
722#if 0
723 if (size == 0)
724 return NULL;
725#endif
726
5dcab13e
GM
727 PROTECT_MALLOC_STATE (0);
728
74ad5c7f
KH
729 if (size < sizeof (struct list))
730 size = sizeof (struct list);
731
74ad5c7f
KH
732 /* Determine the allocation policy based on the request size. */
733 if (size <= BLOCKSIZE / 2)
734 {
735 /* Small allocation to receive a fragment of a block.
736 Determine the logarithm to base two of the fragment size. */
d0baac98 737 register size_t log = 1;
74ad5c7f
KH
738 --size;
739 while ((size /= 2) != 0)
740 ++log;
741
742 /* Look in the fragment lists for a
743 free fragment of the desired size. */
744 next = _fraghead[log].next;
745 if (next != NULL)
746 {
747 /* There are free fragments of this size.
748 Pop a fragment out of the fragment list and return it.
749 Update the block's nfree and first counters. */
d0baac98 750 result = next;
74ad5c7f
KH
751 next->prev->next = next->next;
752 if (next->next != NULL)
753 next->next->prev = next->prev;
754 block = BLOCK (result);
755 if (--_heapinfo[block].busy.info.frag.nfree != 0)
d0baac98
PE
756 _heapinfo[block].busy.info.frag.first =
757 (uintptr_t) next->next % BLOCKSIZE >> log;
74ad5c7f
KH
758
759 /* Update the statistics. */
760 ++_chunks_used;
761 _bytes_used += 1 << log;
762 --_chunks_free;
763 _bytes_free -= 1 << log;
764 }
765 else
766 {
767 /* No free fragments of the desired size, so get a new block
768 and break it into fragments, returning the first. */
8094989b 769#ifdef GC_MALLOC_CHECK
8d0d84d2 770 result = _malloc_internal_nolock (BLOCKSIZE);
5dcab13e 771 PROTECT_MALLOC_STATE (0);
8d0d84d2
YM
772#elif defined (USE_PTHREAD)
773 result = _malloc_internal_nolock (BLOCKSIZE);
8094989b 774#else
74ad5c7f 775 result = malloc (BLOCKSIZE);
8094989b 776#endif
74ad5c7f 777 if (result == NULL)
5dcab13e
GM
778 {
779 PROTECT_MALLOC_STATE (1);
2f213514 780 goto out;
5dcab13e 781 }
74ad5c7f
KH
782
783 /* Link all fragments but the first into the free list. */
784 next = (struct list *) ((char *) result + (1 << log));
785 next->next = NULL;
786 next->prev = &_fraghead[log];
787 _fraghead[log].next = next;
788
d0baac98 789 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
74ad5c7f
KH
790 {
791 next = (struct list *) ((char *) result + (i << log));
792 next->next = _fraghead[log].next;
793 next->prev = &_fraghead[log];
794 next->prev->next = next;
795 next->next->prev = next;
796 }
797
798 /* Initialize the nfree and first counters for this block. */
799 block = BLOCK (result);
800 _heapinfo[block].busy.type = log;
801 _heapinfo[block].busy.info.frag.nfree = i - 1;
802 _heapinfo[block].busy.info.frag.first = i - 1;
803
804 _chunks_free += (BLOCKSIZE >> log) - 1;
805 _bytes_free += BLOCKSIZE - (1 << log);
806 _bytes_used -= BLOCKSIZE - (1 << log);
807 }
808 }
809 else
810 {
811 /* Large allocation to receive one or more blocks.
812 Search the free list in a circle starting at the last place visited.
813 If we loop completely around without finding a large enough
814 space we will have to get more memory from the system. */
815 blocks = BLOCKIFY (size);
816 start = block = _heapindex;
817 while (_heapinfo[block].free.size < blocks)
818 {
819 block = _heapinfo[block].free.next;
820 if (block == start)
821 {
822 /* Need to get more from the system. Get a little extra. */
d0baac98 823 size_t wantblocks = blocks + __malloc_extra_blocks;
74ad5c7f
KH
824 block = _heapinfo[0].free.prev;
825 lastblocks = _heapinfo[block].free.size;
826 /* Check to see if the new core will be contiguous with the
827 final free block; if so we don't need to get as much. */
828 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
829 /* We can't do this if we will have to make the heap info
cc4a96c6 830 table bigger to accommodate the new space. */
74ad5c7f
KH
831 block + wantblocks <= heapsize &&
832 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
833 ADDRESS (block + lastblocks)))
834 {
835 /* We got it contiguously. Which block we are extending
836 (the `final free block' referred to above) might have
837 changed, if it got combined with a freed info table. */
838 block = _heapinfo[0].free.prev;
839 _heapinfo[block].free.size += (wantblocks - lastblocks);
840 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
841 _heaplimit += wantblocks - lastblocks;
842 continue;
843 }
8d0d84d2 844 result = morecore_nolock (wantblocks * BLOCKSIZE);
74ad5c7f 845 if (result == NULL)
2f213514 846 goto out;
74ad5c7f
KH
847 block = BLOCK (result);
848 /* Put the new block at the end of the free list. */
849 _heapinfo[block].free.size = wantblocks;
850 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
851 _heapinfo[block].free.next = 0;
852 _heapinfo[0].free.prev = block;
853 _heapinfo[_heapinfo[block].free.prev].free.next = block;
854 ++_chunks_free;
855 /* Now loop to use some of that block for this allocation. */
856 }
857 }
858
859 /* At this point we have found a suitable free list entry.
860 Figure out how to remove what we need from the list. */
861 result = ADDRESS (block);
862 if (_heapinfo[block].free.size > blocks)
863 {
864 /* The block we found has a bit left over,
865 so relink the tail end back into the free list. */
866 _heapinfo[block + blocks].free.size
867 = _heapinfo[block].free.size - blocks;
868 _heapinfo[block + blocks].free.next
869 = _heapinfo[block].free.next;
870 _heapinfo[block + blocks].free.prev
871 = _heapinfo[block].free.prev;
872 _heapinfo[_heapinfo[block].free.prev].free.next
873 = _heapinfo[_heapinfo[block].free.next].free.prev
874 = _heapindex = block + blocks;
875 }
876 else
877 {
878 /* The block exactly matches our requirements,
879 so just remove it from the list. */
880 _heapinfo[_heapinfo[block].free.next].free.prev
881 = _heapinfo[block].free.prev;
882 _heapinfo[_heapinfo[block].free.prev].free.next
883 = _heapindex = _heapinfo[block].free.next;
884 --_chunks_free;
885 }
886
887 _heapinfo[block].busy.type = 0;
888 _heapinfo[block].busy.info.size = blocks;
889 ++_chunks_used;
890 _bytes_used += blocks * BLOCKSIZE;
891 _bytes_free -= blocks * BLOCKSIZE;
892
893 /* Mark all the blocks of the object just allocated except for the
894 first with a negative number so you can find the first block by
895 adding that adjustment. */
896 while (--blocks > 0)
897 _heapinfo[block + blocks].busy.info.size = -blocks;
898 }
899
5dcab13e 900 PROTECT_MALLOC_STATE (1);
2f213514 901 out:
8d0d84d2
YM
902 return result;
903}
904
d0baac98
PE
905void *
906_malloc_internal (size_t size)
8d0d84d2 907{
d0baac98 908 void *result;
8d0d84d2
YM
909
910 LOCK ();
911 result = _malloc_internal_nolock (size);
2f213514 912 UNLOCK ();
8d0d84d2 913
74ad5c7f
KH
914 return result;
915}
916
d0baac98
PE
917void *
918malloc (size_t size)
74ad5c7f 919{
d0baac98 920 void *(*hook) (size_t);
8d0d84d2 921
74ad5c7f
KH
922 if (!__malloc_initialized && !__malloc_initialize ())
923 return NULL;
924
8d0d84d2
YM
925 /* Copy the value of __malloc_hook to an automatic variable in case
926 __malloc_hook is modified in another thread between its
927 NULL-check and the use.
928
929 Note: Strictly speaking, this is not a right solution. We should
930 use mutexes to access non-read-only variables that are shared
931 among multiple threads. We just leave it for compatibility with
932 glibc malloc (i.e., assignments to __malloc_hook) for now. */
933 hook = __malloc_hook;
934 return (hook != NULL ? *hook : _malloc_internal) (size);
74ad5c7f
KH
935}
936\f
937#ifndef _LIBC
938
939/* On some ANSI C systems, some libc functions call _malloc, _free
940 and _realloc. Make them use the GNU functions. */
941
d0baac98
PE
942extern void *_malloc (size_t);
943extern void _free (void *);
944extern void *_realloc (void *, size_t);
945
946void *
947_malloc (size_t size)
74ad5c7f
KH
948{
949 return malloc (size);
950}
951
952void
d0baac98 953_free (void *ptr)
74ad5c7f
KH
954{
955 free (ptr);
956}
957
d0baac98
PE
958void *
959_realloc (void *ptr, size_t size)
74ad5c7f
KH
960{
961 return realloc (ptr, size);
962}
963
964#endif
965/* Free a block of memory allocated by `malloc'.
966 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
967 Written May 1989 by Mike Haertel.
968
969This library is free software; you can redistribute it and/or
423a1f3c 970modify it under the terms of the GNU General Public License as
74ad5c7f
KH
971published by the Free Software Foundation; either version 2 of the
972License, or (at your option) any later version.
973
974This library is distributed in the hope that it will be useful,
975but WITHOUT ANY WARRANTY; without even the implied warranty of
976MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 977General Public License for more details.
74ad5c7f 978
423a1f3c 979You should have received a copy of the GNU General Public
fee0bd5f 980License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
981
982 The author may be reached (Email) at the address mike@ai.mit.edu,
983 or (US mail) as Mike Haertel c/o Free Software Foundation. */
984
74ad5c7f 985
74ad5c7f 986/* Debugging hook for free. */
d0baac98 987void (*__free_hook) (void *__ptr);
74ad5c7f 988
aea07e2c 989/* List of blocks allocated by aligned_alloc. */
74ad5c7f
KH
990struct alignlist *_aligned_blocks = NULL;
991
992/* Return memory to the heap.
8d0d84d2 993 Like `_free_internal' but don't lock mutex. */
74ad5c7f 994void
d0baac98 995_free_internal_nolock (void *ptr)
74ad5c7f
KH
996{
997 int type;
d0baac98
PE
998 size_t block, blocks;
999 register size_t i;
74ad5c7f 1000 struct list *prev, *next;
d0baac98
PE
1001 void *curbrk;
1002 const size_t lesscore_threshold
74ad5c7f
KH
1003 /* Threshold of free space at which we will return some to the system. */
1004 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1005
1006 register struct alignlist *l;
1007
1008 if (ptr == NULL)
1009 return;
1010
a4579d33 1011#ifdef CYGWIN
1b170bc6 1012 if ((char *) ptr < _heapbase)
a4579d33
KB
1013 /* We're being asked to free something in the static heap. */
1014 return;
1015#endif
1016
5dcab13e 1017 PROTECT_MALLOC_STATE (0);
177c0ea7 1018
8d0d84d2 1019 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1020 for (l = _aligned_blocks; l != NULL; l = l->next)
1021 if (l->aligned == ptr)
1022 {
1023 l->aligned = NULL; /* Mark the slot in the list as free. */
1024 ptr = l->exact;
1025 break;
1026 }
8d0d84d2 1027 UNLOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1028
1029 block = BLOCK (ptr);
1030
1031 type = _heapinfo[block].busy.type;
1032 switch (type)
1033 {
1034 case 0:
1035 /* Get as many statistics as early as we can. */
1036 --_chunks_used;
1037 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1038 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1039
1040 /* Find the free cluster previous to this one in the free list.
1041 Start searching at the last block referenced; this may benefit
1042 programs with locality of allocation. */
1043 i = _heapindex;
1044 if (i > block)
1045 while (i > block)
1046 i = _heapinfo[i].free.prev;
1047 else
1048 {
1049 do
1050 i = _heapinfo[i].free.next;
1051 while (i > 0 && i < block);
1052 i = _heapinfo[i].free.prev;
1053 }
1054
1055 /* Determine how to link this block into the free list. */
1056 if (block == i + _heapinfo[i].free.size)
1057 {
1058 /* Coalesce this block with its predecessor. */
1059 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1060 block = i;
1061 }
1062 else
1063 {
1064 /* Really link this block back into the free list. */
1065 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1066 _heapinfo[block].free.next = _heapinfo[i].free.next;
1067 _heapinfo[block].free.prev = i;
1068 _heapinfo[i].free.next = block;
1069 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1070 ++_chunks_free;
1071 }
1072
1073 /* Now that the block is linked in, see if we can coalesce it
1074 with its successor (by deleting its successor from the list
1075 and adding in its size). */
1076 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1077 {
1078 _heapinfo[block].free.size
1079 += _heapinfo[_heapinfo[block].free.next].free.size;
1080 _heapinfo[block].free.next
1081 = _heapinfo[_heapinfo[block].free.next].free.next;
1082 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1083 --_chunks_free;
1084 }
1085
1086 /* How many trailing free blocks are there now? */
1087 blocks = _heapinfo[block].free.size;
1088
1089 /* Where is the current end of accessible core? */
1090 curbrk = (*__morecore) (0);
1091
1092 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1093 {
1094 /* The end of the malloc heap is at the end of accessible core.
1095 It's possible that moving _heapinfo will allow us to
1096 return some space to the system. */
1097
d0baac98
PE
1098 size_t info_block = BLOCK (_heapinfo);
1099 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1100 size_t prev_block = _heapinfo[block].free.prev;
1101 size_t prev_blocks = _heapinfo[prev_block].free.size;
1102 size_t next_block = _heapinfo[block].free.next;
1103 size_t next_blocks = _heapinfo[next_block].free.size;
74ad5c7f
KH
1104
1105 if (/* Win if this block being freed is last in core, the info table
1106 is just before it, the previous free block is just before the
1107 info table, and the two free blocks together form a useful
1108 amount to return to the system. */
1109 (block + blocks == _heaplimit &&
1110 info_block + info_blocks == block &&
1111 prev_block != 0 && prev_block + prev_blocks == info_block &&
1112 blocks + prev_blocks >= lesscore_threshold) ||
1113 /* Nope, not the case. We can also win if this block being
1114 freed is just before the info table, and the table extends
1115 to the end of core or is followed only by a free block,
1116 and the total free space is worth returning to the system. */
1117 (block + blocks == info_block &&
1118 ((info_block + info_blocks == _heaplimit &&
1119 blocks >= lesscore_threshold) ||
1120 (info_block + info_blocks == next_block &&
1121 next_block + next_blocks == _heaplimit &&
1122 blocks + next_blocks >= lesscore_threshold)))
1123 )
1124 {
1125 malloc_info *newinfo;
d0baac98 1126 size_t oldlimit = _heaplimit;
74ad5c7f
KH
1127
1128 /* Free the old info table, clearing _heaplimit to avoid
1129 recursion into this code. We don't want to return the
1130 table's blocks to the system before we have copied them to
1131 the new location. */
1132 _heaplimit = 0;
8d0d84d2 1133 _free_internal_nolock (_heapinfo);
74ad5c7f
KH
1134 _heaplimit = oldlimit;
1135
1136 /* Tell malloc to search from the beginning of the heap for
1137 free blocks, so it doesn't reuse the ones just freed. */
1138 _heapindex = 0;
1139
1140 /* Allocate new space for the info table and move its data. */
d0baac98 1141 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
5dcab13e 1142 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1143 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1144 _heapinfo = newinfo;
1145
1146 /* We should now have coalesced the free block with the
1147 blocks freed from the old info table. Examine the entire
1148 trailing free block to decide below whether to return some
1149 to the system. */
1150 block = _heapinfo[0].free.prev;
1151 blocks = _heapinfo[block].free.size;
1152 }
1153
1154 /* Now see if we can return stuff to the system. */
1155 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1156 {
d0baac98 1157 register size_t bytes = blocks * BLOCKSIZE;
74ad5c7f
KH
1158 _heaplimit -= blocks;
1159 (*__morecore) (-bytes);
1160 _heapinfo[_heapinfo[block].free.prev].free.next
1161 = _heapinfo[block].free.next;
1162 _heapinfo[_heapinfo[block].free.next].free.prev
1163 = _heapinfo[block].free.prev;
1164 block = _heapinfo[block].free.prev;
1165 --_chunks_free;
1166 _bytes_free -= bytes;
1167 }
1168 }
1169
1170 /* Set the next search to begin at this block. */
1171 _heapindex = block;
1172 break;
1173
1174 default:
1175 /* Do some of the statistics. */
1176 --_chunks_used;
1177 _bytes_used -= 1 << type;
1178 ++_chunks_free;
1179 _bytes_free += 1 << type;
1180
1181 /* Get the address of the first free fragment in this block. */
1182 prev = (struct list *) ((char *) ADDRESS (block) +
1183 (_heapinfo[block].busy.info.frag.first << type));
1184
1185 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1186 {
1187 /* If all fragments of this block are free, remove them
1188 from the fragment list and free the whole block. */
1189 next = prev;
d0baac98 1190 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
74ad5c7f
KH
1191 next = next->next;
1192 prev->prev->next = next;
1193 if (next != NULL)
1194 next->prev = prev->prev;
1195 _heapinfo[block].busy.type = 0;
1196 _heapinfo[block].busy.info.size = 1;
1197
1198 /* Keep the statistics accurate. */
1199 ++_chunks_used;
1200 _bytes_used += BLOCKSIZE;
1201 _chunks_free -= BLOCKSIZE >> type;
1202 _bytes_free -= BLOCKSIZE;
1203
8d0d84d2
YM
1204#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1205 _free_internal_nolock (ADDRESS (block));
8094989b 1206#else
74ad5c7f 1207 free (ADDRESS (block));
8094989b 1208#endif
74ad5c7f
KH
1209 }
1210 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1211 {
1212 /* If some fragments of this block are free, link this
1213 fragment into the fragment list after the first free
1214 fragment of this block. */
d0baac98 1215 next = ptr;
74ad5c7f
KH
1216 next->next = prev->next;
1217 next->prev = prev;
1218 prev->next = next;
1219 if (next->next != NULL)
1220 next->next->prev = next;
1221 ++_heapinfo[block].busy.info.frag.nfree;
1222 }
1223 else
1224 {
1225 /* No fragments of this block are free, so link this
1226 fragment into the fragment list and announce that
1227 it is the first free fragment of this block. */
d0baac98 1228 prev = ptr;
74ad5c7f 1229 _heapinfo[block].busy.info.frag.nfree = 1;
d0baac98
PE
1230 _heapinfo[block].busy.info.frag.first =
1231 (uintptr_t) ptr % BLOCKSIZE >> type;
74ad5c7f
KH
1232 prev->next = _fraghead[type].next;
1233 prev->prev = &_fraghead[type];
1234 prev->prev->next = prev;
1235 if (prev->next != NULL)
1236 prev->next->prev = prev;
1237 }
1238 break;
1239 }
177c0ea7 1240
5dcab13e 1241 PROTECT_MALLOC_STATE (1);
8d0d84d2
YM
1242}
1243
1244/* Return memory to the heap.
1245 Like `free' but don't call a __free_hook if there is one. */
1246void
d0baac98 1247_free_internal (void *ptr)
8d0d84d2
YM
1248{
1249 LOCK ();
1250 _free_internal_nolock (ptr);
2f213514 1251 UNLOCK ();
74ad5c7f
KH
1252}
1253
1254/* Return memory to the heap. */
ca9c0567 1255
4624371d 1256void
d0baac98 1257free (void *ptr)
74ad5c7f 1258{
d0baac98 1259 void (*hook) (void *) = __free_hook;
8d0d84d2
YM
1260
1261 if (hook != NULL)
1262 (*hook) (ptr);
74ad5c7f
KH
1263 else
1264 _free_internal (ptr);
1265}
1266
1267/* Define the `cfree' alias for `free'. */
1268#ifdef weak_alias
1269weak_alias (free, cfree)
1270#else
1271void
d0baac98 1272cfree (void *ptr)
74ad5c7f
KH
1273{
1274 free (ptr);
1275}
1276#endif
1277/* Change the size of a block allocated by `malloc'.
1278 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1279 Written May 1989 by Mike Haertel.
1280
1281This library is free software; you can redistribute it and/or
423a1f3c 1282modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1283published by the Free Software Foundation; either version 2 of the
1284License, or (at your option) any later version.
1285
1286This library is distributed in the hope that it will be useful,
1287but WITHOUT ANY WARRANTY; without even the implied warranty of
1288MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1289General Public License for more details.
74ad5c7f 1290
423a1f3c 1291You should have received a copy of the GNU General Public
fee0bd5f 1292License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
1293
1294 The author may be reached (Email) at the address mike@ai.mit.edu,
1295 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1296
62aba0d4 1297#ifndef min
74ad5c7f 1298#define min(A, B) ((A) < (B) ? (A) : (B))
62aba0d4 1299#endif
74ad5c7f 1300
a4579d33
KB
1301/* On Cygwin the dumped emacs may try to realloc storage allocated in
1302 the static heap. We just malloc space in the new heap and copy the
1303 data. */
1304#ifdef CYGWIN
d0baac98
PE
1305void *
1306special_realloc (void *ptr, size_t size)
a4579d33 1307{
d0baac98 1308 void *result;
a4579d33 1309 int type;
d0baac98 1310 size_t block, oldsize;
a4579d33
KB
1311
1312 block = ((char *) ptr - bss_sbrk_heapbase) / BLOCKSIZE + 1;
1313 type = bss_sbrk_heapinfo[block].busy.type;
1314 oldsize =
1315 type == 0 ? bss_sbrk_heapinfo[block].busy.info.size * BLOCKSIZE
d0baac98 1316 : (size_t) 1 << type;
a4579d33 1317 result = _malloc_internal_nolock (size);
220a304a
PE
1318 if (result)
1319 return memcpy (result, ptr, min (oldsize, size));
a4579d33
KB
1320 return result;
1321}
1322#endif
1323
74ad5c7f 1324/* Debugging hook for realloc. */
d0baac98 1325void *(*__realloc_hook) (void *ptr, size_t size);
74ad5c7f
KH
1326
1327/* Resize the given region to the new size, returning a pointer
1328 to the (possibly moved) region. This is optimized for speed;
1329 some benchmarks seem to indicate that greater compactness is
1330 achieved by unconditionally allocating and copying to a
1331 new region. This module has incestuous knowledge of the
1332 internals of both free and malloc. */
d0baac98
PE
1333void *
1334_realloc_internal_nolock (void *ptr, size_t size)
74ad5c7f 1335{
d0baac98 1336 void *result;
74ad5c7f 1337 int type;
d0baac98 1338 size_t block, blocks, oldlimit;
74ad5c7f
KH
1339
1340 if (size == 0)
1341 {
8d0d84d2
YM
1342 _free_internal_nolock (ptr);
1343 return _malloc_internal_nolock (0);
74ad5c7f
KH
1344 }
1345 else if (ptr == NULL)
8d0d84d2 1346 return _malloc_internal_nolock (size);
74ad5c7f 1347
a4579d33 1348#ifdef CYGWIN
1b170bc6 1349 if ((char *) ptr < _heapbase)
a4579d33
KB
1350 /* ptr points into the static heap */
1351 return special_realloc (ptr, size);
1352#endif
1353
74ad5c7f
KH
1354 block = BLOCK (ptr);
1355
5dcab13e 1356 PROTECT_MALLOC_STATE (0);
177c0ea7 1357
74ad5c7f
KH
1358 type = _heapinfo[block].busy.type;
1359 switch (type)
1360 {
1361 case 0:
1362 /* Maybe reallocate a large block to a small fragment. */
1363 if (size <= BLOCKSIZE / 2)
1364 {
8d0d84d2 1365 result = _malloc_internal_nolock (size);
74ad5c7f
KH
1366 if (result != NULL)
1367 {
1368 memcpy (result, ptr, size);
8d0d84d2 1369 _free_internal_nolock (ptr);
2f213514 1370 goto out;
74ad5c7f
KH
1371 }
1372 }
1373
1374 /* The new size is a large allocation as well;
1375 see if we can hold it in place. */
1376 blocks = BLOCKIFY (size);
1377 if (blocks < _heapinfo[block].busy.info.size)
1378 {
1379 /* The new size is smaller; return
1380 excess memory to the free list. */
1381 _heapinfo[block + blocks].busy.type = 0;
1382 _heapinfo[block + blocks].busy.info.size
1383 = _heapinfo[block].busy.info.size - blocks;
1384 _heapinfo[block].busy.info.size = blocks;
1385 /* We have just created a new chunk by splitting a chunk in two.
1386 Now we will free this chunk; increment the statistics counter
1387 so it doesn't become wrong when _free_internal decrements it. */
1388 ++_chunks_used;
8d0d84d2 1389 _free_internal_nolock (ADDRESS (block + blocks));
74ad5c7f
KH
1390 result = ptr;
1391 }
1392 else if (blocks == _heapinfo[block].busy.info.size)
1393 /* No size change necessary. */
1394 result = ptr;
1395 else
1396 {
1397 /* Won't fit, so allocate a new region that will.
1398 Free the old region first in case there is sufficient
1399 adjacent free space to grow without moving. */
1400 blocks = _heapinfo[block].busy.info.size;
1401 /* Prevent free from actually returning memory to the system. */
1402 oldlimit = _heaplimit;
1403 _heaplimit = 0;
8d0d84d2
YM
1404 _free_internal_nolock (ptr);
1405 result = _malloc_internal_nolock (size);
5dcab13e 1406 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1407 if (_heaplimit == 0)
1408 _heaplimit = oldlimit;
1409 if (result == NULL)
1410 {
1411 /* Now we're really in trouble. We have to unfree
1412 the thing we just freed. Unfortunately it might
1413 have been coalesced with its neighbors. */
1414 if (_heapindex == block)
8d0d84d2 1415 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
74ad5c7f
KH
1416 else
1417 {
d0baac98 1418 void *previous
8d0d84d2
YM
1419 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1420 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1421 _free_internal_nolock (previous);
74ad5c7f 1422 }
2f213514 1423 goto out;
74ad5c7f
KH
1424 }
1425 if (ptr != result)
1426 memmove (result, ptr, blocks * BLOCKSIZE);
1427 }
1428 break;
1429
1430 default:
1431 /* Old size is a fragment; type is logarithm
1432 to base two of the fragment size. */
d0baac98
PE
1433 if (size > (size_t) (1 << (type - 1)) &&
1434 size <= (size_t) (1 << type))
74ad5c7f
KH
1435 /* The new size is the same kind of fragment. */
1436 result = ptr;
1437 else
1438 {
1439 /* The new size is different; allocate a new space,
1440 and copy the lesser of the new size and the old. */
8d0d84d2 1441 result = _malloc_internal_nolock (size);
74ad5c7f 1442 if (result == NULL)
2f213514 1443 goto out;
d0baac98 1444 memcpy (result, ptr, min (size, (size_t) 1 << type));
8d0d84d2 1445 _free_internal_nolock (ptr);
74ad5c7f
KH
1446 }
1447 break;
1448 }
1449
5dcab13e 1450 PROTECT_MALLOC_STATE (1);
2f213514 1451 out:
8d0d84d2
YM
1452 return result;
1453}
1454
d0baac98
PE
1455void *
1456_realloc_internal (void *ptr, size_t size)
8d0d84d2 1457{
d0baac98 1458 void *result;
8d0d84d2 1459
5e617bc2 1460 LOCK ();
8d0d84d2 1461 result = _realloc_internal_nolock (ptr, size);
2f213514 1462 UNLOCK ();
8d0d84d2 1463
74ad5c7f
KH
1464 return result;
1465}
1466
d0baac98
PE
1467void *
1468realloc (void *ptr, size_t size)
74ad5c7f 1469{
d0baac98 1470 void *(*hook) (void *, size_t);
8d0d84d2 1471
74ad5c7f
KH
1472 if (!__malloc_initialized && !__malloc_initialize ())
1473 return NULL;
1474
8d0d84d2
YM
1475 hook = __realloc_hook;
1476 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
74ad5c7f
KH
1477}
1478/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1479
1480This library is free software; you can redistribute it and/or
423a1f3c 1481modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1482published by the Free Software Foundation; either version 2 of the
1483License, or (at your option) any later version.
1484
1485This library is distributed in the hope that it will be useful,
1486but WITHOUT ANY WARRANTY; without even the implied warranty of
1487MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1488General Public License for more details.
74ad5c7f 1489
423a1f3c 1490You should have received a copy of the GNU General Public
fee0bd5f 1491License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
1492
1493 The author may be reached (Email) at the address mike@ai.mit.edu,
1494 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1495
74ad5c7f
KH
1496/* Allocate an array of NMEMB elements each SIZE bytes long.
1497 The entire array is initialized to zeros. */
d0baac98 1498void *
aea07e2c 1499calloc (size_t nmemb, size_t size)
74ad5c7f 1500{
aea07e2c
PE
1501 void *result;
1502 size_t bytes = nmemb * size;
74ad5c7f 1503
aea07e2c
PE
1504 if (size != 0 && bytes / size != nmemb)
1505 {
1506 errno = ENOMEM;
1507 return NULL;
1508 }
74ad5c7f 1509
aea07e2c
PE
1510 result = malloc (bytes);
1511 if (result)
220a304a 1512 return memset (result, 0, bytes);
74ad5c7f
KH
1513 return result;
1514}
1515/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1516This file is part of the GNU C Library.
1517
1518The GNU C Library is free software; you can redistribute it and/or modify
1519it under the terms of the GNU General Public License as published by
1520the Free Software Foundation; either version 2, or (at your option)
1521any later version.
1522
1523The GNU C Library is distributed in the hope that it will be useful,
1524but WITHOUT ANY WARRANTY; without even the implied warranty of
1525MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1526GNU General Public License for more details.
1527
1528You should have received a copy of the GNU General Public License
fee0bd5f 1529along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
74ad5c7f 1530
65f451d0
DN
1531/* uClibc defines __GNU_LIBRARY__, but it is not completely
1532 compatible. */
5e617bc2 1533#if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
74ad5c7f 1534#define __sbrk sbrk
65f451d0 1535#else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1536/* It is best not to declare this and cast its result on foreign operating
1537 systems with potentially hostile include files. */
1538
d0baac98 1539extern void *__sbrk (ptrdiff_t increment);
65f451d0 1540#endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f 1541
74ad5c7f
KH
1542/* Allocate INCREMENT more bytes of data space,
1543 and return the start of data space, or NULL on errors.
1544 If INCREMENT is negative, shrink data space. */
d0baac98
PE
1545void *
1546__default_morecore (ptrdiff_t increment)
74ad5c7f 1547{
d0baac98 1548 void *result;
5e617bc2 1549#if defined (CYGWIN)
ef6d1039
SM
1550 if (!bss_sbrk_did_unexec)
1551 {
1552 return bss_sbrk (increment);
1553 }
1554#endif
d0baac98
PE
1555 result = (void *) __sbrk (increment);
1556 if (result == (void *) -1)
74ad5c7f
KH
1557 return NULL;
1558 return result;
1559}
1560/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1561
1562This library is free software; you can redistribute it and/or
423a1f3c 1563modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1564published by the Free Software Foundation; either version 2 of the
1565License, or (at your option) any later version.
1566
1567This library is distributed in the hope that it will be useful,
1568but WITHOUT ANY WARRANTY; without even the implied warranty of
1569MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1570General Public License for more details.
74ad5c7f 1571
423a1f3c 1572You should have received a copy of the GNU General Public
fee0bd5f 1573License along with this library. If not, see <http://www.gnu.org/licenses/>. */
74ad5c7f 1574
d0baac98 1575void *(*__memalign_hook) (size_t size, size_t alignment);
74ad5c7f 1576
d0baac98 1577void *
aea07e2c 1578aligned_alloc (size_t alignment, size_t size)
74ad5c7f 1579{
d0baac98
PE
1580 void *result;
1581 size_t adj, lastadj;
1582 void *(*hook) (size_t, size_t) = __memalign_hook;
74ad5c7f 1583
8d0d84d2
YM
1584 if (hook)
1585 return (*hook) (alignment, size);
74ad5c7f
KH
1586
1587 /* Allocate a block with enough extra space to pad the block with up to
1588 (ALIGNMENT - 1) bytes if necessary. */
aea07e2c
PE
1589 if (- size < alignment)
1590 {
1591 errno = ENOMEM;
1592 return NULL;
1593 }
74ad5c7f
KH
1594 result = malloc (size + alignment - 1);
1595 if (result == NULL)
1596 return NULL;
1597
1598 /* Figure out how much we will need to pad this particular block
1599 to achieve the required alignment. */
94a089b5
KB
1600 adj = alignment - (uintptr_t) result % alignment;
1601 if (adj == alignment)
1602 adj = 0;
74ad5c7f 1603
94a089b5 1604 if (adj != alignment - 1)
74ad5c7f 1605 {
201572ec
EZ
1606 do
1607 {
1608 /* Reallocate the block with only as much excess as it
1609 needs. */
1610 free (result);
94a089b5 1611 result = malloc (size + adj);
201572ec
EZ
1612 if (result == NULL) /* Impossible unless interrupted. */
1613 return NULL;
1614
1615 lastadj = adj;
94a089b5
KB
1616 adj = alignment - (uintptr_t) result % alignment;
1617 if (adj == alignment)
1618 adj = 0;
201572ec
EZ
1619 /* It's conceivable we might have been so unlucky as to get
1620 a different block with weaker alignment. If so, this
1621 block is too short to contain SIZE after alignment
1622 correction. So we must try again and get another block,
1623 slightly larger. */
94a089b5 1624 } while (adj > lastadj);
201572ec 1625 }
74ad5c7f 1626
94a089b5 1627 if (adj != 0)
74ad5c7f
KH
1628 {
1629 /* Record this block in the list of aligned blocks, so that `free'
1630 can identify the pointer it is passed, which will be in the middle
1631 of an allocated block. */
1632
1633 struct alignlist *l;
8d0d84d2 1634 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1635 for (l = _aligned_blocks; l != NULL; l = l->next)
1636 if (l->aligned == NULL)
1637 /* This slot is free. Use it. */
1638 break;
1639 if (l == NULL)
1640 {
38182d90 1641 l = malloc (sizeof *l);
8d0d84d2 1642 if (l != NULL)
74ad5c7f 1643 {
8d0d84d2
YM
1644 l->next = _aligned_blocks;
1645 _aligned_blocks = l;
74ad5c7f 1646 }
74ad5c7f 1647 }
8d0d84d2
YM
1648 if (l != NULL)
1649 {
1650 l->exact = result;
94a089b5 1651 result = l->aligned = (char *) result + adj;
8d0d84d2
YM
1652 }
1653 UNLOCK_ALIGNED_BLOCKS ();
1654 if (l == NULL)
1655 {
1656 free (result);
1657 result = NULL;
1658 }
74ad5c7f
KH
1659 }
1660
1661 return result;
1662}
1663
aea07e2c
PE
1664/* An obsolete alias for aligned_alloc, for any old libraries that use
1665 this alias. */
1666
1667void *
1668memalign (size_t alignment, size_t size)
1669{
1670 return aligned_alloc (alignment, size);
1671}
1672
72359c32 1673int
d0baac98 1674posix_memalign (void **memptr, size_t alignment, size_t size)
72359c32 1675{
d0baac98 1676 void *mem;
72359c32
YM
1677
1678 if (alignment == 0
d0baac98 1679 || alignment % sizeof (void *) != 0
72359c32
YM
1680 || (alignment & (alignment - 1)) != 0)
1681 return EINVAL;
1682
aea07e2c 1683 mem = aligned_alloc (alignment, size);
72359c32
YM
1684 if (mem == NULL)
1685 return ENOMEM;
1686
1687 *memptr = mem;
1688
1689 return 0;
1690}
1691
74ad5c7f
KH
1692/* Allocate memory on a page boundary.
1693 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1694
1695This library is free software; you can redistribute it and/or
423a1f3c 1696modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1697published by the Free Software Foundation; either version 2 of the
1698License, or (at your option) any later version.
1699
1700This library is distributed in the hope that it will be useful,
1701but WITHOUT ANY WARRANTY; without even the implied warranty of
1702MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1703General Public License for more details.
74ad5c7f 1704
423a1f3c 1705You should have received a copy of the GNU General Public
fee0bd5f 1706License along with this library. If not, see <http://www.gnu.org/licenses/>.
74ad5c7f
KH
1707
1708 The author may be reached (Email) at the address mike@ai.mit.edu,
1709 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1710
d0baac98
PE
1711/* Allocate SIZE bytes on a page boundary. */
1712extern void *valloc (size_t);
74ad5c7f 1713
d0baac98
PE
1714#if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1715# include "getpagesize.h"
1716#elif !defined getpagesize
1717extern int getpagesize (void);
74ad5c7f
KH
1718#endif
1719
d0baac98 1720static size_t pagesize;
74ad5c7f 1721
d0baac98
PE
1722void *
1723valloc (size_t size)
74ad5c7f
KH
1724{
1725 if (pagesize == 0)
d0baac98 1726 pagesize = getpagesize ();
74ad5c7f 1727
aea07e2c 1728 return aligned_alloc (pagesize, size);
74ad5c7f
KH
1729}
1730
a3ba27da
GM
1731#ifdef GC_MCHECK
1732
1733/* Standard debugging hooks for `malloc'.
1734 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1735 Written May 1989 by Mike Haertel.
1736
1737This library is free software; you can redistribute it and/or
423a1f3c 1738modify it under the terms of the GNU General Public License as
a3ba27da
GM
1739published by the Free Software Foundation; either version 2 of the
1740License, or (at your option) any later version.
1741
1742This library is distributed in the hope that it will be useful,
1743but WITHOUT ANY WARRANTY; without even the implied warranty of
1744MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1745General Public License for more details.
a3ba27da 1746
423a1f3c 1747You should have received a copy of the GNU General Public
fee0bd5f 1748License along with this library. If not, see <http://www.gnu.org/licenses/>.
a3ba27da
GM
1749
1750 The author may be reached (Email) at the address mike@ai.mit.edu,
1751 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1752
a3ba27da 1753#include <stdio.h>
a3ba27da
GM
1754
1755/* Old hook values. */
d0baac98
PE
1756static void (*old_free_hook) (void *ptr);
1757static void *(*old_malloc_hook) (size_t size);
1758static void *(*old_realloc_hook) (void *ptr, size_t size);
a3ba27da
GM
1759
1760/* Function to call when something awful happens. */
f57e2426 1761static void (*abortfunc) (enum mcheck_status);
a3ba27da
GM
1762
1763/* Arbitrary magical numbers. */
d0baac98
PE
1764#define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1765#define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
a3ba27da
GM
1766#define MAGICBYTE ((char) 0xd7)
1767#define MALLOCFLOOD ((char) 0x93)
1768#define FREEFLOOD ((char) 0x95)
1769
1770struct hdr
1771 {
d0baac98
PE
1772 size_t size; /* Exact size requested by user. */
1773 size_t magic; /* Magic number to check header integrity. */
a3ba27da
GM
1774 };
1775
a3ba27da 1776static enum mcheck_status
d0baac98 1777checkhdr (const struct hdr *hdr)
a3ba27da
GM
1778{
1779 enum mcheck_status status;
1780 switch (hdr->magic)
1781 {
1782 default:
1783 status = MCHECK_HEAD;
1784 break;
1785 case MAGICFREE:
1786 status = MCHECK_FREE;
1787 break;
1788 case MAGICWORD:
1789 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1790 status = MCHECK_TAIL;
1791 else
1792 status = MCHECK_OK;
1793 break;
1794 }
1795 if (status != MCHECK_OK)
1796 (*abortfunc) (status);
1797 return status;
1798}
1799
a3ba27da 1800static void
d0baac98 1801freehook (void *ptr)
a3ba27da
GM
1802{
1803 struct hdr *hdr;
177c0ea7 1804
a3ba27da
GM
1805 if (ptr)
1806 {
bd650c24
EZ
1807 struct alignlist *l;
1808
1809 /* If the block was allocated by aligned_alloc, its real pointer
1810 to free is recorded in _aligned_blocks; find that. */
1811 PROTECT_MALLOC_STATE (0);
1812 LOCK_ALIGNED_BLOCKS ();
1813 for (l = _aligned_blocks; l != NULL; l = l->next)
1814 if (l->aligned == ptr)
1815 {
1816 l->aligned = NULL; /* Mark the slot in the list as free. */
1817 ptr = l->exact;
1818 break;
1819 }
1820 UNLOCK_ALIGNED_BLOCKS ();
1821 PROTECT_MALLOC_STATE (1);
1822
a3ba27da
GM
1823 hdr = ((struct hdr *) ptr) - 1;
1824 checkhdr (hdr);
1825 hdr->magic = MAGICFREE;
0e926e56 1826 memset (ptr, FREEFLOOD, hdr->size);
a3ba27da
GM
1827 }
1828 else
1829 hdr = NULL;
177c0ea7 1830
a3ba27da
GM
1831 __free_hook = old_free_hook;
1832 free (hdr);
1833 __free_hook = freehook;
1834}
1835
d0baac98
PE
1836static void *
1837mallochook (size_t size)
a3ba27da
GM
1838{
1839 struct hdr *hdr;
1840
1841 __malloc_hook = old_malloc_hook;
38182d90 1842 hdr = malloc (sizeof *hdr + size + 1);
a3ba27da
GM
1843 __malloc_hook = mallochook;
1844 if (hdr == NULL)
1845 return NULL;
1846
1847 hdr->size = size;
1848 hdr->magic = MAGICWORD;
1849 ((char *) &hdr[1])[size] = MAGICBYTE;
220a304a 1850 return memset (hdr + 1, MALLOCFLOOD, size);
a3ba27da
GM
1851}
1852
d0baac98
PE
1853static void *
1854reallochook (void *ptr, size_t size)
a3ba27da
GM
1855{
1856 struct hdr *hdr = NULL;
d0baac98 1857 size_t osize = 0;
177c0ea7 1858
a3ba27da
GM
1859 if (ptr)
1860 {
1861 hdr = ((struct hdr *) ptr) - 1;
1862 osize = hdr->size;
1863
1864 checkhdr (hdr);
1865 if (size < osize)
0e926e56 1866 memset ((char *) ptr + size, FREEFLOOD, osize - size);
a3ba27da 1867 }
177c0ea7 1868
a3ba27da
GM
1869 __free_hook = old_free_hook;
1870 __malloc_hook = old_malloc_hook;
1871 __realloc_hook = old_realloc_hook;
38182d90 1872 hdr = realloc (hdr, sizeof *hdr + size + 1);
a3ba27da
GM
1873 __free_hook = freehook;
1874 __malloc_hook = mallochook;
1875 __realloc_hook = reallochook;
1876 if (hdr == NULL)
1877 return NULL;
1878
1879 hdr->size = size;
1880 hdr->magic = MAGICWORD;
1881 ((char *) &hdr[1])[size] = MAGICBYTE;
1882 if (size > osize)
0e926e56 1883 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
d0baac98 1884 return hdr + 1;
a3ba27da
GM
1885}
1886
1887static void
d0baac98 1888mabort (enum mcheck_status status)
a3ba27da
GM
1889{
1890 const char *msg;
1891 switch (status)
1892 {
1893 case MCHECK_OK:
1894 msg = "memory is consistent, library is buggy";
1895 break;
1896 case MCHECK_HEAD:
1897 msg = "memory clobbered before allocated block";
1898 break;
1899 case MCHECK_TAIL:
1900 msg = "memory clobbered past end of allocated block";
1901 break;
1902 case MCHECK_FREE:
1903 msg = "block freed twice";
1904 break;
1905 default:
1906 msg = "bogus mcheck_status, library is buggy";
1907 break;
1908 }
1909#ifdef __GNU_LIBRARY__
1910 __libc_fatal (msg);
1911#else
1912 fprintf (stderr, "mcheck: %s\n", msg);
1913 fflush (stderr);
bd650c24
EZ
1914# ifdef emacs
1915 emacs_abort ();
1916# else
a3ba27da 1917 abort ();
bd650c24 1918# endif
a3ba27da
GM
1919#endif
1920}
1921
1922static int mcheck_used = 0;
1923
1924int
d0baac98 1925mcheck (void (*func) (enum mcheck_status))
a3ba27da
GM
1926{
1927 abortfunc = (func != NULL) ? func : &mabort;
1928
1929 /* These hooks may not be safely inserted if malloc is already in use. */
1930 if (!__malloc_initialized && !mcheck_used)
1931 {
1932 old_free_hook = __free_hook;
1933 __free_hook = freehook;
1934 old_malloc_hook = __malloc_hook;
1935 __malloc_hook = mallochook;
1936 old_realloc_hook = __realloc_hook;
1937 __realloc_hook = reallochook;
1938 mcheck_used = 1;
1939 }
1940
1941 return mcheck_used ? 0 : -1;
1942}
1943
1944enum mcheck_status
d0baac98 1945mprobe (void *ptr)
a3ba27da
GM
1946{
1947 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1948}
1949
1950#endif /* GC_MCHECK */