Get rid of compiler warnings on Cygwin.
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f 1/* Declarations for `malloc' and friends.
0b5538bd 2 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
4e6835db 3 2005, 2006, 2007 Free Software Foundation, Inc.
74ad5c7f
KH
4 Written May 1989 by Mike Haertel.
5
6This library is free software; you can redistribute it and/or
423a1f3c 7modify it under the terms of the GNU General Public License as
74ad5c7f
KH
8published by the Free Software Foundation; either version 2 of the
9License, or (at your option) any later version.
10
11This library is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 14General Public License for more details.
74ad5c7f 15
423a1f3c
JB
16You should have received a copy of the GNU General Public
17License along with this library; see the file COPYING. If
3ef97fb6
LK
18not, write to the Free Software Foundation, Inc., 51 Franklin Street,
19Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
20
21 The author may be reached (Email) at the address mike@ai.mit.edu,
22 or (US mail) as Mike Haertel c/o Free Software Foundation. */
23
74ad5c7f
KH
24#ifdef HAVE_CONFIG_H
25#include <config.h>
26#endif
27
ae9e757a 28#ifdef HAVE_PTHREAD
8d0d84d2
YM
29#define USE_PTHREAD
30#endif
31
74ad5c7f 32#include <string.h>
74ad5c7f 33#include <limits.h>
d0baac98 34#include <stdint.h>
74ad5c7f 35#include <unistd.h>
74ad5c7f 36
2f213514
YM
37#ifdef USE_PTHREAD
38#include <pthread.h>
39#endif
40
74ad5c7f
KH
41#ifdef __cplusplus
42extern "C"
43{
44#endif
45
74ad5c7f 46#include <stddef.h>
74ad5c7f
KH
47
48
49/* Allocate SIZE bytes of memory. */
d0baac98 50extern void *malloc (size_t size);
74ad5c7f 51/* Re-allocate the previously allocated block
d0baac98
PE
52 in ptr, making the new block SIZE bytes long. */
53extern void *realloc (void *ptr, size_t size);
74ad5c7f 54/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
d0baac98 55extern void *calloc (size_t nmemb, size_t size);
74ad5c7f 56/* Free a block allocated by `malloc', `realloc' or `calloc'. */
d0baac98 57extern void free (void *ptr);
74ad5c7f
KH
58
59/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
d0baac98
PE
60#ifdef MSDOS
61extern void *memalign (size_t, size_t);
62extern int posix_memalign (void **, size_t, size_t);
74ad5c7f
KH
63#endif
64
3ceeb306
YM
65#ifdef USE_PTHREAD
66/* Set up mutexes and make malloc etc. thread-safe. */
d0baac98 67extern void malloc_enable_thread (void);
3ceeb306 68#endif
74ad5c7f 69
74ad5c7f
KH
70/* The allocator divides the heap into blocks of fixed size; large
71 requests receive one or more whole blocks, and small requests
72 receive a fragment of a block. Fragment sizes are powers of two,
73 and all fragments of a block are the same size. When all the
74 fragments in a block have been freed, the block itself is freed. */
5e617bc2 75#define INT_BIT (CHAR_BIT * sizeof (int))
74ad5c7f
KH
76#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
77#define BLOCKSIZE (1 << BLOCKLOG)
78#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
79
80/* Determine the amount of memory spanned by the initial heap table
81 (not an absolute limit). */
82#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
83
84/* Number of contiguous free blocks allowed to build up at the end of
85 memory before they will be returned to the system. */
86#define FINAL_FREE_BLOCKS 8
87
88/* Data structure giving per-block information. */
89typedef union
90 {
91 /* Heap information for a busy block. */
92 struct
93 {
94 /* Zero for a large (multiblock) object, or positive giving the
95 logarithm to the base two of the fragment size. */
96 int type;
97 union
98 {
99 struct
100 {
d0baac98
PE
101 size_t nfree; /* Free frags in a fragmented block. */
102 size_t first; /* First free fragment of the block. */
74ad5c7f
KH
103 } frag;
104 /* For a large object, in its first block, this has the number
105 of blocks in the object. In the other blocks, this has a
106 negative number which says how far back the first block is. */
d0baac98 107 ptrdiff_t size;
74ad5c7f
KH
108 } info;
109 } busy;
110 /* Heap information for a free block
111 (that may be the first of a free cluster). */
112 struct
113 {
d0baac98
PE
114 size_t size; /* Size (in blocks) of a free cluster. */
115 size_t next; /* Index of next free cluster. */
116 size_t prev; /* Index of previous free cluster. */
74ad5c7f
KH
117 } free;
118 } malloc_info;
119
120/* Pointer to first block of the heap. */
121extern char *_heapbase;
122
123/* Table indexed by block number giving per-block information. */
124extern malloc_info *_heapinfo;
125
126/* Address to block number and vice versa. */
127#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
d0baac98 128#define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
74ad5c7f
KH
129
130/* Current search index for the heap table. */
d0baac98 131extern size_t _heapindex;
74ad5c7f
KH
132
133/* Limit of valid info table indices. */
d0baac98 134extern size_t _heaplimit;
74ad5c7f
KH
135
136/* Doubly linked lists of free fragments. */
137struct list
138 {
139 struct list *next;
140 struct list *prev;
141 };
142
143/* Free list headers for each fragment size. */
144extern struct list _fraghead[];
145
146/* List of blocks allocated with `memalign' (or `valloc'). */
147struct alignlist
148 {
149 struct alignlist *next;
d0baac98
PE
150 void *aligned; /* The address that memaligned returned. */
151 void *exact; /* The address that malloc returned. */
74ad5c7f
KH
152 };
153extern struct alignlist *_aligned_blocks;
154
155/* Instrumentation. */
d0baac98
PE
156extern size_t _chunks_used;
157extern size_t _bytes_used;
158extern size_t _chunks_free;
159extern size_t _bytes_free;
74ad5c7f
KH
160
161/* Internal versions of `malloc', `realloc', and `free'
162 used when these functions need to call each other.
163 They are the same but don't call the hooks. */
d0baac98
PE
164extern void *_malloc_internal (size_t);
165extern void *_realloc_internal (void *, size_t);
166extern void _free_internal (void *);
167extern void *_malloc_internal_nolock (size_t);
168extern void *_realloc_internal_nolock (void *, size_t);
169extern void _free_internal_nolock (void *);
74ad5c7f 170
2f213514 171#ifdef USE_PTHREAD
8d0d84d2 172extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
3ceeb306
YM
173extern int _malloc_thread_enabled_p;
174#define LOCK() \
175 do { \
176 if (_malloc_thread_enabled_p) \
177 pthread_mutex_lock (&_malloc_mutex); \
178 } while (0)
179#define UNLOCK() \
180 do { \
181 if (_malloc_thread_enabled_p) \
182 pthread_mutex_unlock (&_malloc_mutex); \
183 } while (0)
184#define LOCK_ALIGNED_BLOCKS() \
185 do { \
186 if (_malloc_thread_enabled_p) \
187 pthread_mutex_lock (&_aligned_blocks_mutex); \
188 } while (0)
189#define UNLOCK_ALIGNED_BLOCKS() \
190 do { \
191 if (_malloc_thread_enabled_p) \
192 pthread_mutex_unlock (&_aligned_blocks_mutex); \
193 } while (0)
2f213514
YM
194#else
195#define LOCK()
196#define UNLOCK()
8d0d84d2
YM
197#define LOCK_ALIGNED_BLOCKS()
198#define UNLOCK_ALIGNED_BLOCKS()
2f213514
YM
199#endif
200
74ad5c7f
KH
201/* Given an address in the middle of a malloc'd object,
202 return the address of the beginning of the object. */
d0baac98 203extern void *malloc_find_object_address (void *ptr);
74ad5c7f
KH
204
205/* Underlying allocation function; successive calls should
206 return contiguous pieces of memory. */
d0baac98 207extern void *(*__morecore) (ptrdiff_t size);
74ad5c7f
KH
208
209/* Default value of `__morecore'. */
d0baac98 210extern void *__default_morecore (ptrdiff_t size);
74ad5c7f
KH
211
212/* If not NULL, this function is called after each time
213 `__morecore' is called to increase the data size. */
d0baac98 214extern void (*__after_morecore_hook) (void);
74ad5c7f
KH
215
216/* Number of extra blocks to get each time we ask for more core.
217 This reduces the frequency of calling `(*__morecore)'. */
d0baac98 218extern size_t __malloc_extra_blocks;
74ad5c7f
KH
219
220/* Nonzero if `malloc' has been called and done its initialization. */
221extern int __malloc_initialized;
222/* Function called to initialize malloc data structures. */
d0baac98 223extern int __malloc_initialize (void);
74ad5c7f
KH
224
225/* Hooks for debugging versions. */
d0baac98
PE
226extern void (*__malloc_initialize_hook) (void);
227extern void (*__free_hook) (void *ptr);
228extern void *(*__malloc_hook) (size_t size);
229extern void *(*__realloc_hook) (void *ptr, size_t size);
230extern void *(*__memalign_hook) (size_t size, size_t alignment);
74ad5c7f
KH
231
232/* Return values for `mprobe': these are the kinds of inconsistencies that
233 `mcheck' enables detection of. */
234enum mcheck_status
235 {
236 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
237 MCHECK_OK, /* Block is fine. */
238 MCHECK_FREE, /* Block freed twice. */
239 MCHECK_HEAD, /* Memory before the block was clobbered. */
240 MCHECK_TAIL /* Memory after the block was clobbered. */
241 };
242
243/* Activate a standard collection of debugging hooks. This must be called
244 before `malloc' is ever called. ABORTFUNC is called with an error code
245 (see enum above) when an inconsistency is detected. If ABORTFUNC is
246 null, the standard function prints on stderr and then calls `abort'. */
d0baac98 247extern int mcheck (void (*abortfunc) (enum mcheck_status));
74ad5c7f
KH
248
249/* Check for aberrations in a particular malloc'd block. You must have
250 called `mcheck' already. These are the same checks that `mcheck' does
251 when you free or reallocate a block. */
d0baac98 252extern enum mcheck_status mprobe (void *ptr);
74ad5c7f
KH
253
254/* Activate a standard collection of tracing hooks. */
d0baac98
PE
255extern void mtrace (void);
256extern void muntrace (void);
74ad5c7f
KH
257
258/* Statistics available to the user. */
259struct mstats
260 {
d0baac98
PE
261 size_t bytes_total; /* Total size of the heap. */
262 size_t chunks_used; /* Chunks allocated by the user. */
263 size_t bytes_used; /* Byte total of user-allocated chunks. */
264 size_t chunks_free; /* Chunks in the free list. */
265 size_t bytes_free; /* Byte total of chunks in the free list. */
74ad5c7f
KH
266 };
267
268/* Pick up the current statistics. */
d0baac98 269extern struct mstats mstats (void);
74ad5c7f
KH
270
271/* Call WARNFUN with a warning message when memory usage is high. */
d0baac98 272extern void memory_warnings (void *start, void (*warnfun) (const char *));
74ad5c7f
KH
273
274#ifdef __cplusplus
275}
276#endif
277
74ad5c7f
KH
278/* Memory allocator `malloc'.
279 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
280 Written May 1989 by Mike Haertel.
281
282This library is free software; you can redistribute it and/or
423a1f3c 283modify it under the terms of the GNU General Public License as
74ad5c7f
KH
284published by the Free Software Foundation; either version 2 of the
285License, or (at your option) any later version.
286
287This library is distributed in the hope that it will be useful,
288but WITHOUT ANY WARRANTY; without even the implied warranty of
289MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 290General Public License for more details.
74ad5c7f 291
423a1f3c
JB
292You should have received a copy of the GNU General Public
293License along with this library; see the file COPYING. If
3ef97fb6
LK
294not, write to the Free Software Foundation, Inc., 51 Franklin Street,
295Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
296
297 The author may be reached (Email) at the address mike@ai.mit.edu,
298 or (US mail) as Mike Haertel c/o Free Software Foundation. */
299
74ad5c7f
KH
300#include <errno.h>
301
a4579d33
KB
302/* On Cygwin there are two heaps. temacs uses the static heap
303 (defined in sheap.c and managed with bss_sbrk), and the dumped
304 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
305 on Cygwin, it reinitializes malloc, and we save the old info for
306 use by free and realloc if they're called with a pointer into the
db76dd85
KB
307 static heap.
308
309 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
310 this is changed in the future, we'll have to similarly deal with
311 reinitializing ralloc. */
a4579d33 312#ifdef CYGWIN
d0baac98 313extern void *bss_sbrk (ptrdiff_t size);
ef6d1039 314extern int bss_sbrk_did_unexec;
a4579d33
KB
315char *bss_sbrk_heapbase; /* _heapbase for static heap */
316malloc_info *bss_sbrk_heapinfo; /* _heapinfo for static heap */
ef6d1039 317#endif
d0baac98 318void *(*__morecore) (ptrdiff_t size) = __default_morecore;
74ad5c7f
KH
319
320/* Debugging hook for `malloc'. */
d0baac98 321void *(*__malloc_hook) (size_t size);
74ad5c7f
KH
322
323/* Pointer to the base of the first block. */
324char *_heapbase;
325
326/* Block information table. Allocated with align/__free (not malloc/free). */
327malloc_info *_heapinfo;
328
329/* Number of info entries. */
d0baac98 330static size_t heapsize;
74ad5c7f
KH
331
332/* Search index in the info table. */
d0baac98 333size_t _heapindex;
74ad5c7f
KH
334
335/* Limit of valid info table indices. */
d0baac98 336size_t _heaplimit;
74ad5c7f
KH
337
338/* Free lists for each fragment size. */
339struct list _fraghead[BLOCKLOG];
340
341/* Instrumentation. */
d0baac98
PE
342size_t _chunks_used;
343size_t _bytes_used;
344size_t _chunks_free;
345size_t _bytes_free;
74ad5c7f
KH
346
347/* Are you experienced? */
348int __malloc_initialized;
349
d0baac98 350size_t __malloc_extra_blocks;
74ad5c7f 351
d0baac98
PE
352void (*__malloc_initialize_hook) (void);
353void (*__after_morecore_hook) (void);
74ad5c7f 354
5dcab13e
GM
355#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
356
357/* Some code for hunting a bug writing into _heapinfo.
358
359 Call this macro with argument PROT non-zero to protect internal
360 malloc state against writing to it, call it with a zero argument to
361 make it readable and writable.
362
363 Note that this only works if BLOCKSIZE == page size, which is
364 the case on the i386. */
365
366#include <sys/types.h>
367#include <sys/mman.h>
368
369static int state_protected_p;
d0baac98 370static size_t last_state_size;
5dcab13e
GM
371static malloc_info *last_heapinfo;
372
373void
d0baac98 374protect_malloc_state (int protect_p)
5dcab13e
GM
375{
376 /* If _heapinfo has been relocated, make sure its old location
377 isn't left read-only; it will be reused by malloc. */
378 if (_heapinfo != last_heapinfo
379 && last_heapinfo
380 && state_protected_p)
381 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
382
383 last_state_size = _heaplimit * sizeof *_heapinfo;
384 last_heapinfo = _heapinfo;
177c0ea7 385
5dcab13e
GM
386 if (protect_p != state_protected_p)
387 {
388 state_protected_p = protect_p;
389 if (mprotect (_heapinfo, last_state_size,
390 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
391 abort ();
392 }
393}
394
5e617bc2 395#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
5dcab13e
GM
396
397#else
398#define PROTECT_MALLOC_STATE(PROT) /* empty */
399#endif
400
74ad5c7f
KH
401
402/* Aligned allocation. */
d0baac98
PE
403static void *
404align (size_t size)
74ad5c7f 405{
d0baac98
PE
406 void *result;
407 ptrdiff_t adj;
74ad5c7f 408
ceeb3d7d 409 /* align accepts an unsigned argument, but __morecore accepts a
d0baac98
PE
410 signed one. This could lead to trouble if SIZE overflows the
411 ptrdiff_t type accepted by __morecore. We just punt in that
ceeb3d7d 412 case, since they are requesting a ludicrous amount anyway. */
d0baac98 413 if (PTRDIFF_MAX < size)
ceeb3d7d
EZ
414 result = 0;
415 else
416 result = (*__morecore) (size);
d0baac98 417 adj = (uintptr_t) result % BLOCKSIZE;
74ad5c7f
KH
418 if (adj != 0)
419 {
74ad5c7f 420 adj = BLOCKSIZE - adj;
d0baac98 421 (*__morecore) (adj);
74ad5c7f
KH
422 result = (char *) result + adj;
423 }
424
425 if (__after_morecore_hook)
426 (*__after_morecore_hook) ();
427
428 return result;
429}
430
431/* Get SIZE bytes, if we can get them starting at END.
432 Return the address of the space we got.
433 If we cannot get space at END, fail and return 0. */
d0baac98
PE
434static void *
435get_contiguous_space (ptrdiff_t size, void *position)
74ad5c7f 436{
d0baac98
PE
437 void *before;
438 void *after;
74ad5c7f
KH
439
440 before = (*__morecore) (0);
441 /* If we can tell in advance that the break is at the wrong place,
442 fail now. */
443 if (before != position)
444 return 0;
445
446 /* Allocate SIZE bytes and get the address of them. */
447 after = (*__morecore) (size);
448 if (!after)
449 return 0;
450
451 /* It was not contiguous--reject it. */
452 if (after != position)
453 {
454 (*__morecore) (- size);
455 return 0;
456 }
457
458 return after;
459}
460
461
462/* This is called when `_heapinfo' and `heapsize' have just
463 been set to describe a new info table. Set up the table
464 to describe itself and account for it in the statistics. */
55d4c1b2
PE
465static inline void
466register_heapinfo (void)
74ad5c7f 467{
d0baac98 468 size_t block, blocks;
74ad5c7f
KH
469
470 block = BLOCK (_heapinfo);
471 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
472
473 /* Account for the _heapinfo block itself in the statistics. */
474 _bytes_used += blocks * BLOCKSIZE;
475 ++_chunks_used;
476
477 /* Describe the heapinfo block itself in the heapinfo. */
478 _heapinfo[block].busy.type = 0;
479 _heapinfo[block].busy.info.size = blocks;
480 /* Leave back-pointers for malloc_find_address. */
481 while (--blocks > 0)
482 _heapinfo[block + blocks].busy.info.size = -blocks;
483}
484
2f213514 485#ifdef USE_PTHREAD
8d0d84d2
YM
486pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
487pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
3ceeb306
YM
488int _malloc_thread_enabled_p;
489
490static void
d0baac98 491malloc_atfork_handler_prepare (void)
3ceeb306
YM
492{
493 LOCK ();
494 LOCK_ALIGNED_BLOCKS ();
495}
496
497static void
d0baac98 498malloc_atfork_handler_parent (void)
3ceeb306
YM
499{
500 UNLOCK_ALIGNED_BLOCKS ();
501 UNLOCK ();
502}
503
504static void
d0baac98 505malloc_atfork_handler_child (void)
3ceeb306
YM
506{
507 UNLOCK_ALIGNED_BLOCKS ();
508 UNLOCK ();
509}
510
511/* Set up mutexes and make malloc etc. thread-safe. */
512void
d0baac98 513malloc_enable_thread (void)
3ceeb306
YM
514{
515 if (_malloc_thread_enabled_p)
516 return;
517
518 /* Some pthread implementations call malloc for statically
519 initialized mutexes when they are used first. To avoid such a
520 situation, we initialize mutexes here while their use is
521 disabled in malloc etc. */
522 pthread_mutex_init (&_malloc_mutex, NULL);
523 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
524 pthread_atfork (malloc_atfork_handler_prepare,
525 malloc_atfork_handler_parent,
526 malloc_atfork_handler_child);
527 _malloc_thread_enabled_p = 1;
528}
2f213514 529#endif
74ad5c7f 530
2f213514 531static void
d0baac98 532malloc_initialize_1 (void)
2f213514 533{
a3ba27da
GM
534#ifdef GC_MCHECK
535 mcheck (NULL);
536#endif
537
a4579d33
KB
538#ifdef CYGWIN
539 if (bss_sbrk_did_unexec)
540 /* we're reinitializing the dumped emacs */
541 {
542 bss_sbrk_heapbase = _heapbase;
543 bss_sbrk_heapinfo = _heapinfo;
544 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
545 }
546#endif
547
74ad5c7f
KH
548 if (__malloc_initialize_hook)
549 (*__malloc_initialize_hook) ();
550
551 heapsize = HEAP / BLOCKSIZE;
d0baac98 552 _heapinfo = align (heapsize * sizeof (malloc_info));
74ad5c7f 553 if (_heapinfo == NULL)
2f213514 554 return;
74ad5c7f
KH
555 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
556 _heapinfo[0].free.size = 0;
557 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
558 _heapindex = 0;
559 _heapbase = (char *) _heapinfo;
560 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
561
562 register_heapinfo ();
563
564 __malloc_initialized = 1;
5dcab13e 565 PROTECT_MALLOC_STATE (1);
2f213514
YM
566 return;
567}
568
784c1472
JD
569/* Set everything up and remember that we have.
570 main will call malloc which calls this function. That is before any threads
571 or signal handlers has been set up, so we don't need thread protection. */
2f213514 572int
d0baac98 573__malloc_initialize (void)
2f213514 574{
2f213514
YM
575 if (__malloc_initialized)
576 return 0;
577
578 malloc_initialize_1 ();
2f213514
YM
579
580 return __malloc_initialized;
74ad5c7f
KH
581}
582
583static int morecore_recursing;
584
585/* Get neatly aligned memory, initializing or
586 growing the heap info table as necessary. */
d0baac98
PE
587static void *
588morecore_nolock (size_t size)
74ad5c7f 589{
d0baac98 590 void *result;
74ad5c7f 591 malloc_info *newinfo, *oldinfo;
d0baac98 592 size_t newsize;
74ad5c7f
KH
593
594 if (morecore_recursing)
595 /* Avoid recursion. The caller will know how to handle a null return. */
596 return NULL;
597
598 result = align (size);
599 if (result == NULL)
600 return NULL;
601
5dcab13e
GM
602 PROTECT_MALLOC_STATE (0);
603
74ad5c7f 604 /* Check if we need to grow the info table. */
d0baac98 605 if ((size_t) BLOCK ((char *) result + size) > heapsize)
74ad5c7f
KH
606 {
607 /* Calculate the new _heapinfo table size. We do not account for the
608 added blocks in the table itself, as we hope to place them in
609 existing free space, which is already covered by part of the
610 existing table. */
611 newsize = heapsize;
612 do
613 newsize *= 2;
d0baac98 614 while ((size_t) BLOCK ((char *) result + size) > newsize);
74ad5c7f
KH
615
616 /* We must not reuse existing core for the new info table when called
617 from realloc in the case of growing a large block, because the
618 block being grown is momentarily marked as free. In this case
619 _heaplimit is zero so we know not to reuse space for internal
620 allocation. */
621 if (_heaplimit != 0)
622 {
623 /* First try to allocate the new info table in core we already
624 have, in the usual way using realloc. If realloc cannot
625 extend it in place or relocate it to existing sufficient core,
626 we will get called again, and the code above will notice the
627 `morecore_recursing' flag and return null. */
628 int save = errno; /* Don't want to clobber errno with ENOMEM. */
629 morecore_recursing = 1;
d0baac98
PE
630 newinfo = _realloc_internal_nolock (_heapinfo,
631 newsize * sizeof (malloc_info));
74ad5c7f
KH
632 morecore_recursing = 0;
633 if (newinfo == NULL)
634 errno = save;
635 else
636 {
637 /* We found some space in core, and realloc has put the old
638 table's blocks on the free list. Now zero the new part
639 of the table and install the new table location. */
640 memset (&newinfo[heapsize], 0,
641 (newsize - heapsize) * sizeof (malloc_info));
642 _heapinfo = newinfo;
643 heapsize = newsize;
644 goto got_heap;
645 }
646 }
647
648 /* Allocate new space for the malloc info table. */
649 while (1)
650 {
d0baac98 651 newinfo = align (newsize * sizeof (malloc_info));
74ad5c7f
KH
652
653 /* Did it fail? */
654 if (newinfo == NULL)
655 {
656 (*__morecore) (-size);
657 return NULL;
658 }
659
660 /* Is it big enough to record status for its own space?
661 If so, we win. */
d0baac98
PE
662 if ((size_t) BLOCK ((char *) newinfo
663 + newsize * sizeof (malloc_info))
74ad5c7f
KH
664 < newsize)
665 break;
666
667 /* Must try again. First give back most of what we just got. */
668 (*__morecore) (- newsize * sizeof (malloc_info));
669 newsize *= 2;
670 }
671
672 /* Copy the old table to the beginning of the new,
673 and zero the rest of the new table. */
674 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
675 memset (&newinfo[heapsize], 0,
676 (newsize - heapsize) * sizeof (malloc_info));
677 oldinfo = _heapinfo;
678 _heapinfo = newinfo;
679 heapsize = newsize;
680
681 register_heapinfo ();
682
683 /* Reset _heaplimit so _free_internal never decides
684 it can relocate or resize the info table. */
685 _heaplimit = 0;
8d0d84d2 686 _free_internal_nolock (oldinfo);
5dcab13e 687 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
688
689 /* The new heap limit includes the new table just allocated. */
690 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
691 return result;
692 }
693
694 got_heap:
695 _heaplimit = BLOCK ((char *) result + size);
696 return result;
697}
698
699/* Allocate memory from the heap. */
d0baac98
PE
700void *
701_malloc_internal_nolock (size_t size)
74ad5c7f 702{
d0baac98
PE
703 void *result;
704 size_t block, blocks, lastblocks, start;
705 register size_t i;
74ad5c7f
KH
706 struct list *next;
707
708 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
709 valid address you can realloc and free (though not dereference).
710
711 It turns out that some extant code (sunrpc, at least Ultrix's version)
712 expects `malloc (0)' to return non-NULL and breaks otherwise.
713 Be compatible. */
714
715#if 0
716 if (size == 0)
717 return NULL;
718#endif
719
5dcab13e
GM
720 PROTECT_MALLOC_STATE (0);
721
74ad5c7f
KH
722 if (size < sizeof (struct list))
723 size = sizeof (struct list);
724
74ad5c7f
KH
725 /* Determine the allocation policy based on the request size. */
726 if (size <= BLOCKSIZE / 2)
727 {
728 /* Small allocation to receive a fragment of a block.
729 Determine the logarithm to base two of the fragment size. */
d0baac98 730 register size_t log = 1;
74ad5c7f
KH
731 --size;
732 while ((size /= 2) != 0)
733 ++log;
734
735 /* Look in the fragment lists for a
736 free fragment of the desired size. */
737 next = _fraghead[log].next;
738 if (next != NULL)
739 {
740 /* There are free fragments of this size.
741 Pop a fragment out of the fragment list and return it.
742 Update the block's nfree and first counters. */
d0baac98 743 result = next;
74ad5c7f
KH
744 next->prev->next = next->next;
745 if (next->next != NULL)
746 next->next->prev = next->prev;
747 block = BLOCK (result);
748 if (--_heapinfo[block].busy.info.frag.nfree != 0)
d0baac98
PE
749 _heapinfo[block].busy.info.frag.first =
750 (uintptr_t) next->next % BLOCKSIZE >> log;
74ad5c7f
KH
751
752 /* Update the statistics. */
753 ++_chunks_used;
754 _bytes_used += 1 << log;
755 --_chunks_free;
756 _bytes_free -= 1 << log;
757 }
758 else
759 {
760 /* No free fragments of the desired size, so get a new block
761 and break it into fragments, returning the first. */
8094989b 762#ifdef GC_MALLOC_CHECK
8d0d84d2 763 result = _malloc_internal_nolock (BLOCKSIZE);
5dcab13e 764 PROTECT_MALLOC_STATE (0);
8d0d84d2
YM
765#elif defined (USE_PTHREAD)
766 result = _malloc_internal_nolock (BLOCKSIZE);
8094989b 767#else
74ad5c7f 768 result = malloc (BLOCKSIZE);
8094989b 769#endif
74ad5c7f 770 if (result == NULL)
5dcab13e
GM
771 {
772 PROTECT_MALLOC_STATE (1);
2f213514 773 goto out;
5dcab13e 774 }
74ad5c7f
KH
775
776 /* Link all fragments but the first into the free list. */
777 next = (struct list *) ((char *) result + (1 << log));
778 next->next = NULL;
779 next->prev = &_fraghead[log];
780 _fraghead[log].next = next;
781
d0baac98 782 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
74ad5c7f
KH
783 {
784 next = (struct list *) ((char *) result + (i << log));
785 next->next = _fraghead[log].next;
786 next->prev = &_fraghead[log];
787 next->prev->next = next;
788 next->next->prev = next;
789 }
790
791 /* Initialize the nfree and first counters for this block. */
792 block = BLOCK (result);
793 _heapinfo[block].busy.type = log;
794 _heapinfo[block].busy.info.frag.nfree = i - 1;
795 _heapinfo[block].busy.info.frag.first = i - 1;
796
797 _chunks_free += (BLOCKSIZE >> log) - 1;
798 _bytes_free += BLOCKSIZE - (1 << log);
799 _bytes_used -= BLOCKSIZE - (1 << log);
800 }
801 }
802 else
803 {
804 /* Large allocation to receive one or more blocks.
805 Search the free list in a circle starting at the last place visited.
806 If we loop completely around without finding a large enough
807 space we will have to get more memory from the system. */
808 blocks = BLOCKIFY (size);
809 start = block = _heapindex;
810 while (_heapinfo[block].free.size < blocks)
811 {
812 block = _heapinfo[block].free.next;
813 if (block == start)
814 {
815 /* Need to get more from the system. Get a little extra. */
d0baac98 816 size_t wantblocks = blocks + __malloc_extra_blocks;
74ad5c7f
KH
817 block = _heapinfo[0].free.prev;
818 lastblocks = _heapinfo[block].free.size;
819 /* Check to see if the new core will be contiguous with the
820 final free block; if so we don't need to get as much. */
821 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
822 /* We can't do this if we will have to make the heap info
cc4a96c6 823 table bigger to accommodate the new space. */
74ad5c7f
KH
824 block + wantblocks <= heapsize &&
825 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
826 ADDRESS (block + lastblocks)))
827 {
828 /* We got it contiguously. Which block we are extending
829 (the `final free block' referred to above) might have
830 changed, if it got combined with a freed info table. */
831 block = _heapinfo[0].free.prev;
832 _heapinfo[block].free.size += (wantblocks - lastblocks);
833 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
834 _heaplimit += wantblocks - lastblocks;
835 continue;
836 }
8d0d84d2 837 result = morecore_nolock (wantblocks * BLOCKSIZE);
74ad5c7f 838 if (result == NULL)
2f213514 839 goto out;
74ad5c7f
KH
840 block = BLOCK (result);
841 /* Put the new block at the end of the free list. */
842 _heapinfo[block].free.size = wantblocks;
843 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
844 _heapinfo[block].free.next = 0;
845 _heapinfo[0].free.prev = block;
846 _heapinfo[_heapinfo[block].free.prev].free.next = block;
847 ++_chunks_free;
848 /* Now loop to use some of that block for this allocation. */
849 }
850 }
851
852 /* At this point we have found a suitable free list entry.
853 Figure out how to remove what we need from the list. */
854 result = ADDRESS (block);
855 if (_heapinfo[block].free.size > blocks)
856 {
857 /* The block we found has a bit left over,
858 so relink the tail end back into the free list. */
859 _heapinfo[block + blocks].free.size
860 = _heapinfo[block].free.size - blocks;
861 _heapinfo[block + blocks].free.next
862 = _heapinfo[block].free.next;
863 _heapinfo[block + blocks].free.prev
864 = _heapinfo[block].free.prev;
865 _heapinfo[_heapinfo[block].free.prev].free.next
866 = _heapinfo[_heapinfo[block].free.next].free.prev
867 = _heapindex = block + blocks;
868 }
869 else
870 {
871 /* The block exactly matches our requirements,
872 so just remove it from the list. */
873 _heapinfo[_heapinfo[block].free.next].free.prev
874 = _heapinfo[block].free.prev;
875 _heapinfo[_heapinfo[block].free.prev].free.next
876 = _heapindex = _heapinfo[block].free.next;
877 --_chunks_free;
878 }
879
880 _heapinfo[block].busy.type = 0;
881 _heapinfo[block].busy.info.size = blocks;
882 ++_chunks_used;
883 _bytes_used += blocks * BLOCKSIZE;
884 _bytes_free -= blocks * BLOCKSIZE;
885
886 /* Mark all the blocks of the object just allocated except for the
887 first with a negative number so you can find the first block by
888 adding that adjustment. */
889 while (--blocks > 0)
890 _heapinfo[block + blocks].busy.info.size = -blocks;
891 }
892
5dcab13e 893 PROTECT_MALLOC_STATE (1);
2f213514 894 out:
8d0d84d2
YM
895 return result;
896}
897
d0baac98
PE
898void *
899_malloc_internal (size_t size)
8d0d84d2 900{
d0baac98 901 void *result;
8d0d84d2
YM
902
903 LOCK ();
904 result = _malloc_internal_nolock (size);
2f213514 905 UNLOCK ();
8d0d84d2 906
74ad5c7f
KH
907 return result;
908}
909
d0baac98
PE
910void *
911malloc (size_t size)
74ad5c7f 912{
d0baac98 913 void *(*hook) (size_t);
8d0d84d2 914
74ad5c7f
KH
915 if (!__malloc_initialized && !__malloc_initialize ())
916 return NULL;
917
8d0d84d2
YM
918 /* Copy the value of __malloc_hook to an automatic variable in case
919 __malloc_hook is modified in another thread between its
920 NULL-check and the use.
921
922 Note: Strictly speaking, this is not a right solution. We should
923 use mutexes to access non-read-only variables that are shared
924 among multiple threads. We just leave it for compatibility with
925 glibc malloc (i.e., assignments to __malloc_hook) for now. */
926 hook = __malloc_hook;
927 return (hook != NULL ? *hook : _malloc_internal) (size);
74ad5c7f
KH
928}
929\f
930#ifndef _LIBC
931
932/* On some ANSI C systems, some libc functions call _malloc, _free
933 and _realloc. Make them use the GNU functions. */
934
d0baac98
PE
935extern void *_malloc (size_t);
936extern void _free (void *);
937extern void *_realloc (void *, size_t);
938
939void *
940_malloc (size_t size)
74ad5c7f
KH
941{
942 return malloc (size);
943}
944
945void
d0baac98 946_free (void *ptr)
74ad5c7f
KH
947{
948 free (ptr);
949}
950
d0baac98
PE
951void *
952_realloc (void *ptr, size_t size)
74ad5c7f
KH
953{
954 return realloc (ptr, size);
955}
956
957#endif
958/* Free a block of memory allocated by `malloc'.
959 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
960 Written May 1989 by Mike Haertel.
961
962This library is free software; you can redistribute it and/or
423a1f3c 963modify it under the terms of the GNU General Public License as
74ad5c7f
KH
964published by the Free Software Foundation; either version 2 of the
965License, or (at your option) any later version.
966
967This library is distributed in the hope that it will be useful,
968but WITHOUT ANY WARRANTY; without even the implied warranty of
969MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 970General Public License for more details.
74ad5c7f 971
423a1f3c
JB
972You should have received a copy of the GNU General Public
973License along with this library; see the file COPYING. If
3ef97fb6
LK
974not, write to the Free Software Foundation, Inc., 51 Franklin Street,
975Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
976
977 The author may be reached (Email) at the address mike@ai.mit.edu,
978 or (US mail) as Mike Haertel c/o Free Software Foundation. */
979
74ad5c7f 980
74ad5c7f 981/* Debugging hook for free. */
d0baac98 982void (*__free_hook) (void *__ptr);
74ad5c7f
KH
983
984/* List of blocks allocated by memalign. */
985struct alignlist *_aligned_blocks = NULL;
986
987/* Return memory to the heap.
8d0d84d2 988 Like `_free_internal' but don't lock mutex. */
74ad5c7f 989void
d0baac98 990_free_internal_nolock (void *ptr)
74ad5c7f
KH
991{
992 int type;
d0baac98
PE
993 size_t block, blocks;
994 register size_t i;
74ad5c7f 995 struct list *prev, *next;
d0baac98
PE
996 void *curbrk;
997 const size_t lesscore_threshold
74ad5c7f
KH
998 /* Threshold of free space at which we will return some to the system. */
999 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1000
1001 register struct alignlist *l;
1002
1003 if (ptr == NULL)
1004 return;
1005
a4579d33 1006#ifdef CYGWIN
1b170bc6 1007 if ((char *) ptr < _heapbase)
a4579d33
KB
1008 /* We're being asked to free something in the static heap. */
1009 return;
1010#endif
1011
5dcab13e 1012 PROTECT_MALLOC_STATE (0);
177c0ea7 1013
8d0d84d2 1014 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1015 for (l = _aligned_blocks; l != NULL; l = l->next)
1016 if (l->aligned == ptr)
1017 {
1018 l->aligned = NULL; /* Mark the slot in the list as free. */
1019 ptr = l->exact;
1020 break;
1021 }
8d0d84d2 1022 UNLOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1023
1024 block = BLOCK (ptr);
1025
1026 type = _heapinfo[block].busy.type;
1027 switch (type)
1028 {
1029 case 0:
1030 /* Get as many statistics as early as we can. */
1031 --_chunks_used;
1032 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1033 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1034
1035 /* Find the free cluster previous to this one in the free list.
1036 Start searching at the last block referenced; this may benefit
1037 programs with locality of allocation. */
1038 i = _heapindex;
1039 if (i > block)
1040 while (i > block)
1041 i = _heapinfo[i].free.prev;
1042 else
1043 {
1044 do
1045 i = _heapinfo[i].free.next;
1046 while (i > 0 && i < block);
1047 i = _heapinfo[i].free.prev;
1048 }
1049
1050 /* Determine how to link this block into the free list. */
1051 if (block == i + _heapinfo[i].free.size)
1052 {
1053 /* Coalesce this block with its predecessor. */
1054 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1055 block = i;
1056 }
1057 else
1058 {
1059 /* Really link this block back into the free list. */
1060 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1061 _heapinfo[block].free.next = _heapinfo[i].free.next;
1062 _heapinfo[block].free.prev = i;
1063 _heapinfo[i].free.next = block;
1064 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1065 ++_chunks_free;
1066 }
1067
1068 /* Now that the block is linked in, see if we can coalesce it
1069 with its successor (by deleting its successor from the list
1070 and adding in its size). */
1071 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1072 {
1073 _heapinfo[block].free.size
1074 += _heapinfo[_heapinfo[block].free.next].free.size;
1075 _heapinfo[block].free.next
1076 = _heapinfo[_heapinfo[block].free.next].free.next;
1077 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1078 --_chunks_free;
1079 }
1080
1081 /* How many trailing free blocks are there now? */
1082 blocks = _heapinfo[block].free.size;
1083
1084 /* Where is the current end of accessible core? */
1085 curbrk = (*__morecore) (0);
1086
1087 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1088 {
1089 /* The end of the malloc heap is at the end of accessible core.
1090 It's possible that moving _heapinfo will allow us to
1091 return some space to the system. */
1092
d0baac98
PE
1093 size_t info_block = BLOCK (_heapinfo);
1094 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1095 size_t prev_block = _heapinfo[block].free.prev;
1096 size_t prev_blocks = _heapinfo[prev_block].free.size;
1097 size_t next_block = _heapinfo[block].free.next;
1098 size_t next_blocks = _heapinfo[next_block].free.size;
74ad5c7f
KH
1099
1100 if (/* Win if this block being freed is last in core, the info table
1101 is just before it, the previous free block is just before the
1102 info table, and the two free blocks together form a useful
1103 amount to return to the system. */
1104 (block + blocks == _heaplimit &&
1105 info_block + info_blocks == block &&
1106 prev_block != 0 && prev_block + prev_blocks == info_block &&
1107 blocks + prev_blocks >= lesscore_threshold) ||
1108 /* Nope, not the case. We can also win if this block being
1109 freed is just before the info table, and the table extends
1110 to the end of core or is followed only by a free block,
1111 and the total free space is worth returning to the system. */
1112 (block + blocks == info_block &&
1113 ((info_block + info_blocks == _heaplimit &&
1114 blocks >= lesscore_threshold) ||
1115 (info_block + info_blocks == next_block &&
1116 next_block + next_blocks == _heaplimit &&
1117 blocks + next_blocks >= lesscore_threshold)))
1118 )
1119 {
1120 malloc_info *newinfo;
d0baac98 1121 size_t oldlimit = _heaplimit;
74ad5c7f
KH
1122
1123 /* Free the old info table, clearing _heaplimit to avoid
1124 recursion into this code. We don't want to return the
1125 table's blocks to the system before we have copied them to
1126 the new location. */
1127 _heaplimit = 0;
8d0d84d2 1128 _free_internal_nolock (_heapinfo);
74ad5c7f
KH
1129 _heaplimit = oldlimit;
1130
1131 /* Tell malloc to search from the beginning of the heap for
1132 free blocks, so it doesn't reuse the ones just freed. */
1133 _heapindex = 0;
1134
1135 /* Allocate new space for the info table and move its data. */
d0baac98 1136 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
5dcab13e 1137 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1138 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1139 _heapinfo = newinfo;
1140
1141 /* We should now have coalesced the free block with the
1142 blocks freed from the old info table. Examine the entire
1143 trailing free block to decide below whether to return some
1144 to the system. */
1145 block = _heapinfo[0].free.prev;
1146 blocks = _heapinfo[block].free.size;
1147 }
1148
1149 /* Now see if we can return stuff to the system. */
1150 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1151 {
d0baac98 1152 register size_t bytes = blocks * BLOCKSIZE;
74ad5c7f
KH
1153 _heaplimit -= blocks;
1154 (*__morecore) (-bytes);
1155 _heapinfo[_heapinfo[block].free.prev].free.next
1156 = _heapinfo[block].free.next;
1157 _heapinfo[_heapinfo[block].free.next].free.prev
1158 = _heapinfo[block].free.prev;
1159 block = _heapinfo[block].free.prev;
1160 --_chunks_free;
1161 _bytes_free -= bytes;
1162 }
1163 }
1164
1165 /* Set the next search to begin at this block. */
1166 _heapindex = block;
1167 break;
1168
1169 default:
1170 /* Do some of the statistics. */
1171 --_chunks_used;
1172 _bytes_used -= 1 << type;
1173 ++_chunks_free;
1174 _bytes_free += 1 << type;
1175
1176 /* Get the address of the first free fragment in this block. */
1177 prev = (struct list *) ((char *) ADDRESS (block) +
1178 (_heapinfo[block].busy.info.frag.first << type));
1179
1180 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1181 {
1182 /* If all fragments of this block are free, remove them
1183 from the fragment list and free the whole block. */
1184 next = prev;
d0baac98 1185 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
74ad5c7f
KH
1186 next = next->next;
1187 prev->prev->next = next;
1188 if (next != NULL)
1189 next->prev = prev->prev;
1190 _heapinfo[block].busy.type = 0;
1191 _heapinfo[block].busy.info.size = 1;
1192
1193 /* Keep the statistics accurate. */
1194 ++_chunks_used;
1195 _bytes_used += BLOCKSIZE;
1196 _chunks_free -= BLOCKSIZE >> type;
1197 _bytes_free -= BLOCKSIZE;
1198
8d0d84d2
YM
1199#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1200 _free_internal_nolock (ADDRESS (block));
8094989b 1201#else
74ad5c7f 1202 free (ADDRESS (block));
8094989b 1203#endif
74ad5c7f
KH
1204 }
1205 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1206 {
1207 /* If some fragments of this block are free, link this
1208 fragment into the fragment list after the first free
1209 fragment of this block. */
d0baac98 1210 next = ptr;
74ad5c7f
KH
1211 next->next = prev->next;
1212 next->prev = prev;
1213 prev->next = next;
1214 if (next->next != NULL)
1215 next->next->prev = next;
1216 ++_heapinfo[block].busy.info.frag.nfree;
1217 }
1218 else
1219 {
1220 /* No fragments of this block are free, so link this
1221 fragment into the fragment list and announce that
1222 it is the first free fragment of this block. */
d0baac98 1223 prev = ptr;
74ad5c7f 1224 _heapinfo[block].busy.info.frag.nfree = 1;
d0baac98
PE
1225 _heapinfo[block].busy.info.frag.first =
1226 (uintptr_t) ptr % BLOCKSIZE >> type;
74ad5c7f
KH
1227 prev->next = _fraghead[type].next;
1228 prev->prev = &_fraghead[type];
1229 prev->prev->next = prev;
1230 if (prev->next != NULL)
1231 prev->next->prev = prev;
1232 }
1233 break;
1234 }
177c0ea7 1235
5dcab13e 1236 PROTECT_MALLOC_STATE (1);
8d0d84d2
YM
1237}
1238
1239/* Return memory to the heap.
1240 Like `free' but don't call a __free_hook if there is one. */
1241void
d0baac98 1242_free_internal (void *ptr)
8d0d84d2
YM
1243{
1244 LOCK ();
1245 _free_internal_nolock (ptr);
2f213514 1246 UNLOCK ();
74ad5c7f
KH
1247}
1248
1249/* Return memory to the heap. */
ca9c0567 1250
4624371d 1251void
d0baac98 1252free (void *ptr)
74ad5c7f 1253{
d0baac98 1254 void (*hook) (void *) = __free_hook;
8d0d84d2
YM
1255
1256 if (hook != NULL)
1257 (*hook) (ptr);
74ad5c7f
KH
1258 else
1259 _free_internal (ptr);
1260}
1261
1262/* Define the `cfree' alias for `free'. */
1263#ifdef weak_alias
1264weak_alias (free, cfree)
1265#else
1266void
d0baac98 1267cfree (void *ptr)
74ad5c7f
KH
1268{
1269 free (ptr);
1270}
1271#endif
1272/* Change the size of a block allocated by `malloc'.
1273 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1274 Written May 1989 by Mike Haertel.
1275
1276This library is free software; you can redistribute it and/or
423a1f3c 1277modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1278published by the Free Software Foundation; either version 2 of the
1279License, or (at your option) any later version.
1280
1281This library is distributed in the hope that it will be useful,
1282but WITHOUT ANY WARRANTY; without even the implied warranty of
1283MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1284General Public License for more details.
74ad5c7f 1285
423a1f3c
JB
1286You should have received a copy of the GNU General Public
1287License along with this library; see the file COPYING. If
3ef97fb6
LK
1288not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1289Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1290
1291 The author may be reached (Email) at the address mike@ai.mit.edu,
1292 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1293
74ad5c7f
KH
1294#define min(A, B) ((A) < (B) ? (A) : (B))
1295
a4579d33
KB
1296/* On Cygwin the dumped emacs may try to realloc storage allocated in
1297 the static heap. We just malloc space in the new heap and copy the
1298 data. */
1299#ifdef CYGWIN
d0baac98
PE
1300void *
1301special_realloc (void *ptr, size_t size)
a4579d33 1302{
d0baac98 1303 void *result;
a4579d33 1304 int type;
d0baac98 1305 size_t block, oldsize;
a4579d33
KB
1306
1307 block = ((char *) ptr - bss_sbrk_heapbase) / BLOCKSIZE + 1;
1308 type = bss_sbrk_heapinfo[block].busy.type;
1309 oldsize =
1310 type == 0 ? bss_sbrk_heapinfo[block].busy.info.size * BLOCKSIZE
d0baac98 1311 : (size_t) 1 << type;
a4579d33
KB
1312 result = _malloc_internal_nolock (size);
1313 if (result != NULL)
1314 memcpy (result, ptr, min (oldsize, size));
1315 return result;
1316}
1317#endif
1318
74ad5c7f 1319/* Debugging hook for realloc. */
d0baac98 1320void *(*__realloc_hook) (void *ptr, size_t size);
74ad5c7f
KH
1321
1322/* Resize the given region to the new size, returning a pointer
1323 to the (possibly moved) region. This is optimized for speed;
1324 some benchmarks seem to indicate that greater compactness is
1325 achieved by unconditionally allocating and copying to a
1326 new region. This module has incestuous knowledge of the
1327 internals of both free and malloc. */
d0baac98
PE
1328void *
1329_realloc_internal_nolock (void *ptr, size_t size)
74ad5c7f 1330{
d0baac98 1331 void *result;
74ad5c7f 1332 int type;
d0baac98 1333 size_t block, blocks, oldlimit;
74ad5c7f
KH
1334
1335 if (size == 0)
1336 {
8d0d84d2
YM
1337 _free_internal_nolock (ptr);
1338 return _malloc_internal_nolock (0);
74ad5c7f
KH
1339 }
1340 else if (ptr == NULL)
8d0d84d2 1341 return _malloc_internal_nolock (size);
74ad5c7f 1342
a4579d33 1343#ifdef CYGWIN
1b170bc6 1344 if ((char *) ptr < _heapbase)
a4579d33
KB
1345 /* ptr points into the static heap */
1346 return special_realloc (ptr, size);
1347#endif
1348
74ad5c7f
KH
1349 block = BLOCK (ptr);
1350
5dcab13e 1351 PROTECT_MALLOC_STATE (0);
177c0ea7 1352
74ad5c7f
KH
1353 type = _heapinfo[block].busy.type;
1354 switch (type)
1355 {
1356 case 0:
1357 /* Maybe reallocate a large block to a small fragment. */
1358 if (size <= BLOCKSIZE / 2)
1359 {
8d0d84d2 1360 result = _malloc_internal_nolock (size);
74ad5c7f
KH
1361 if (result != NULL)
1362 {
1363 memcpy (result, ptr, size);
8d0d84d2 1364 _free_internal_nolock (ptr);
2f213514 1365 goto out;
74ad5c7f
KH
1366 }
1367 }
1368
1369 /* The new size is a large allocation as well;
1370 see if we can hold it in place. */
1371 blocks = BLOCKIFY (size);
1372 if (blocks < _heapinfo[block].busy.info.size)
1373 {
1374 /* The new size is smaller; return
1375 excess memory to the free list. */
1376 _heapinfo[block + blocks].busy.type = 0;
1377 _heapinfo[block + blocks].busy.info.size
1378 = _heapinfo[block].busy.info.size - blocks;
1379 _heapinfo[block].busy.info.size = blocks;
1380 /* We have just created a new chunk by splitting a chunk in two.
1381 Now we will free this chunk; increment the statistics counter
1382 so it doesn't become wrong when _free_internal decrements it. */
1383 ++_chunks_used;
8d0d84d2 1384 _free_internal_nolock (ADDRESS (block + blocks));
74ad5c7f
KH
1385 result = ptr;
1386 }
1387 else if (blocks == _heapinfo[block].busy.info.size)
1388 /* No size change necessary. */
1389 result = ptr;
1390 else
1391 {
1392 /* Won't fit, so allocate a new region that will.
1393 Free the old region first in case there is sufficient
1394 adjacent free space to grow without moving. */
1395 blocks = _heapinfo[block].busy.info.size;
1396 /* Prevent free from actually returning memory to the system. */
1397 oldlimit = _heaplimit;
1398 _heaplimit = 0;
8d0d84d2
YM
1399 _free_internal_nolock (ptr);
1400 result = _malloc_internal_nolock (size);
5dcab13e 1401 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1402 if (_heaplimit == 0)
1403 _heaplimit = oldlimit;
1404 if (result == NULL)
1405 {
1406 /* Now we're really in trouble. We have to unfree
1407 the thing we just freed. Unfortunately it might
1408 have been coalesced with its neighbors. */
1409 if (_heapindex == block)
8d0d84d2 1410 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
74ad5c7f
KH
1411 else
1412 {
d0baac98 1413 void *previous
8d0d84d2
YM
1414 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1415 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1416 _free_internal_nolock (previous);
74ad5c7f 1417 }
2f213514 1418 goto out;
74ad5c7f
KH
1419 }
1420 if (ptr != result)
1421 memmove (result, ptr, blocks * BLOCKSIZE);
1422 }
1423 break;
1424
1425 default:
1426 /* Old size is a fragment; type is logarithm
1427 to base two of the fragment size. */
d0baac98
PE
1428 if (size > (size_t) (1 << (type - 1)) &&
1429 size <= (size_t) (1 << type))
74ad5c7f
KH
1430 /* The new size is the same kind of fragment. */
1431 result = ptr;
1432 else
1433 {
1434 /* The new size is different; allocate a new space,
1435 and copy the lesser of the new size and the old. */
8d0d84d2 1436 result = _malloc_internal_nolock (size);
74ad5c7f 1437 if (result == NULL)
2f213514 1438 goto out;
d0baac98 1439 memcpy (result, ptr, min (size, (size_t) 1 << type));
8d0d84d2 1440 _free_internal_nolock (ptr);
74ad5c7f
KH
1441 }
1442 break;
1443 }
1444
5dcab13e 1445 PROTECT_MALLOC_STATE (1);
2f213514 1446 out:
8d0d84d2
YM
1447 return result;
1448}
1449
d0baac98
PE
1450void *
1451_realloc_internal (void *ptr, size_t size)
8d0d84d2 1452{
d0baac98 1453 void *result;
8d0d84d2 1454
5e617bc2 1455 LOCK ();
8d0d84d2 1456 result = _realloc_internal_nolock (ptr, size);
2f213514 1457 UNLOCK ();
8d0d84d2 1458
74ad5c7f
KH
1459 return result;
1460}
1461
d0baac98
PE
1462void *
1463realloc (void *ptr, size_t size)
74ad5c7f 1464{
d0baac98 1465 void *(*hook) (void *, size_t);
8d0d84d2 1466
74ad5c7f
KH
1467 if (!__malloc_initialized && !__malloc_initialize ())
1468 return NULL;
1469
8d0d84d2
YM
1470 hook = __realloc_hook;
1471 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
74ad5c7f
KH
1472}
1473/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1474
1475This library is free software; you can redistribute it and/or
423a1f3c 1476modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1477published by the Free Software Foundation; either version 2 of the
1478License, or (at your option) any later version.
1479
1480This library is distributed in the hope that it will be useful,
1481but WITHOUT ANY WARRANTY; without even the implied warranty of
1482MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1483General Public License for more details.
74ad5c7f 1484
423a1f3c
JB
1485You should have received a copy of the GNU General Public
1486License along with this library; see the file COPYING. If
3ef97fb6
LK
1487not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1488Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1489
1490 The author may be reached (Email) at the address mike@ai.mit.edu,
1491 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1492
74ad5c7f
KH
1493/* Allocate an array of NMEMB elements each SIZE bytes long.
1494 The entire array is initialized to zeros. */
d0baac98
PE
1495void *
1496calloc (register size_t nmemb, register size_t size)
74ad5c7f 1497{
d0baac98 1498 register void *result = malloc (nmemb * size);
74ad5c7f
KH
1499
1500 if (result != NULL)
1501 (void) memset (result, 0, nmemb * size);
1502
1503 return result;
1504}
1505/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1506This file is part of the GNU C Library.
1507
1508The GNU C Library is free software; you can redistribute it and/or modify
1509it under the terms of the GNU General Public License as published by
1510the Free Software Foundation; either version 2, or (at your option)
1511any later version.
1512
1513The GNU C Library is distributed in the hope that it will be useful,
1514but WITHOUT ANY WARRANTY; without even the implied warranty of
1515MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1516GNU General Public License for more details.
1517
1518You should have received a copy of the GNU General Public License
1519along with the GNU C Library; see the file COPYING. If not, write to
3ef97fb6
LK
1520the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1521MA 02110-1301, USA. */
74ad5c7f 1522
65f451d0
DN
1523/* uClibc defines __GNU_LIBRARY__, but it is not completely
1524 compatible. */
5e617bc2 1525#if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
74ad5c7f 1526#define __sbrk sbrk
65f451d0 1527#else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1528/* It is best not to declare this and cast its result on foreign operating
1529 systems with potentially hostile include files. */
1530
d0baac98 1531extern void *__sbrk (ptrdiff_t increment);
65f451d0 1532#endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1533
1534#ifndef NULL
1535#define NULL 0
1536#endif
1537
1538/* Allocate INCREMENT more bytes of data space,
1539 and return the start of data space, or NULL on errors.
1540 If INCREMENT is negative, shrink data space. */
d0baac98
PE
1541void *
1542__default_morecore (ptrdiff_t increment)
74ad5c7f 1543{
d0baac98 1544 void *result;
5e617bc2 1545#if defined (CYGWIN)
ef6d1039
SM
1546 if (!bss_sbrk_did_unexec)
1547 {
1548 return bss_sbrk (increment);
1549 }
1550#endif
d0baac98
PE
1551 result = (void *) __sbrk (increment);
1552 if (result == (void *) -1)
74ad5c7f
KH
1553 return NULL;
1554 return result;
1555}
1556/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1557
1558This library is free software; you can redistribute it and/or
423a1f3c 1559modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1560published by the Free Software Foundation; either version 2 of the
1561License, or (at your option) any later version.
1562
1563This library is distributed in the hope that it will be useful,
1564but WITHOUT ANY WARRANTY; without even the implied warranty of
1565MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1566General Public License for more details.
74ad5c7f 1567
423a1f3c
JB
1568You should have received a copy of the GNU General Public
1569License along with this library; see the file COPYING. If
3ef97fb6
LK
1570not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1571Fifth Floor, Boston, MA 02110-1301, USA. */
74ad5c7f 1572
d0baac98 1573void *(*__memalign_hook) (size_t size, size_t alignment);
74ad5c7f 1574
d0baac98
PE
1575void *
1576memalign (size_t alignment, size_t size)
74ad5c7f 1577{
d0baac98
PE
1578 void *result;
1579 size_t adj, lastadj;
1580 void *(*hook) (size_t, size_t) = __memalign_hook;
74ad5c7f 1581
8d0d84d2
YM
1582 if (hook)
1583 return (*hook) (alignment, size);
74ad5c7f
KH
1584
1585 /* Allocate a block with enough extra space to pad the block with up to
1586 (ALIGNMENT - 1) bytes if necessary. */
1587 result = malloc (size + alignment - 1);
1588 if (result == NULL)
1589 return NULL;
1590
1591 /* Figure out how much we will need to pad this particular block
1592 to achieve the required alignment. */
d0baac98 1593 adj = (uintptr_t) result % alignment;
74ad5c7f
KH
1594
1595 do
1596 {
1597 /* Reallocate the block with only as much excess as it needs. */
1598 free (result);
1599 result = malloc (adj + size);
1600 if (result == NULL) /* Impossible unless interrupted. */
1601 return NULL;
1602
1603 lastadj = adj;
d0baac98 1604 adj = (uintptr_t) result % alignment;
74ad5c7f
KH
1605 /* It's conceivable we might have been so unlucky as to get a
1606 different block with weaker alignment. If so, this block is too
1607 short to contain SIZE after alignment correction. So we must
1608 try again and get another block, slightly larger. */
1609 } while (adj > lastadj);
1610
1611 if (adj != 0)
1612 {
1613 /* Record this block in the list of aligned blocks, so that `free'
1614 can identify the pointer it is passed, which will be in the middle
1615 of an allocated block. */
1616
1617 struct alignlist *l;
8d0d84d2 1618 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1619 for (l = _aligned_blocks; l != NULL; l = l->next)
1620 if (l->aligned == NULL)
1621 /* This slot is free. Use it. */
1622 break;
1623 if (l == NULL)
1624 {
d0baac98 1625 l = malloc (sizeof (struct alignlist));
8d0d84d2 1626 if (l != NULL)
74ad5c7f 1627 {
8d0d84d2
YM
1628 l->next = _aligned_blocks;
1629 _aligned_blocks = l;
74ad5c7f 1630 }
74ad5c7f 1631 }
8d0d84d2
YM
1632 if (l != NULL)
1633 {
1634 l->exact = result;
1635 result = l->aligned = (char *) result + alignment - adj;
1636 }
1637 UNLOCK_ALIGNED_BLOCKS ();
1638 if (l == NULL)
1639 {
1640 free (result);
1641 result = NULL;
1642 }
74ad5c7f
KH
1643 }
1644
1645 return result;
1646}
1647
72359c32
YM
1648#ifndef ENOMEM
1649#define ENOMEM 12
1650#endif
1651
1652#ifndef EINVAL
1653#define EINVAL 22
1654#endif
1655
1656int
d0baac98 1657posix_memalign (void **memptr, size_t alignment, size_t size)
72359c32 1658{
d0baac98 1659 void *mem;
72359c32
YM
1660
1661 if (alignment == 0
d0baac98 1662 || alignment % sizeof (void *) != 0
72359c32
YM
1663 || (alignment & (alignment - 1)) != 0)
1664 return EINVAL;
1665
1666 mem = memalign (alignment, size);
1667 if (mem == NULL)
1668 return ENOMEM;
1669
1670 *memptr = mem;
1671
1672 return 0;
1673}
1674
74ad5c7f
KH
1675/* Allocate memory on a page boundary.
1676 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1677
1678This library is free software; you can redistribute it and/or
423a1f3c 1679modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1680published by the Free Software Foundation; either version 2 of the
1681License, or (at your option) any later version.
1682
1683This library is distributed in the hope that it will be useful,
1684but WITHOUT ANY WARRANTY; without even the implied warranty of
1685MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1686General Public License for more details.
74ad5c7f 1687
423a1f3c
JB
1688You should have received a copy of the GNU General Public
1689License along with this library; see the file COPYING. If
3ef97fb6
LK
1690not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1691Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1692
1693 The author may be reached (Email) at the address mike@ai.mit.edu,
1694 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1695
74ad5c7f
KH
1696/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1697 on MSDOS, where it conflicts with a system header file. */
1698
d0baac98 1699#ifndef GMALLOC_INHIBIT_VALLOC
74ad5c7f 1700
d0baac98
PE
1701/* Allocate SIZE bytes on a page boundary. */
1702extern void *valloc (size_t);
74ad5c7f 1703
d0baac98
PE
1704#if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1705# include "getpagesize.h"
1706#elif !defined getpagesize
1707extern int getpagesize (void);
74ad5c7f
KH
1708#endif
1709
d0baac98 1710static size_t pagesize;
74ad5c7f 1711
d0baac98
PE
1712void *
1713valloc (size_t size)
74ad5c7f
KH
1714{
1715 if (pagesize == 0)
d0baac98 1716 pagesize = getpagesize ();
74ad5c7f
KH
1717
1718 return memalign (pagesize, size);
1719}
1720
1721#endif /* Not ELIDE_VALLOC. */
a3ba27da
GM
1722
1723#ifdef GC_MCHECK
1724
1725/* Standard debugging hooks for `malloc'.
1726 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1727 Written May 1989 by Mike Haertel.
1728
1729This library is free software; you can redistribute it and/or
423a1f3c 1730modify it under the terms of the GNU General Public License as
a3ba27da
GM
1731published by the Free Software Foundation; either version 2 of the
1732License, or (at your option) any later version.
1733
1734This library is distributed in the hope that it will be useful,
1735but WITHOUT ANY WARRANTY; without even the implied warranty of
1736MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1737General Public License for more details.
a3ba27da 1738
423a1f3c
JB
1739You should have received a copy of the GNU General Public
1740License along with this library; see the file COPYING. If
3ef97fb6
LK
1741not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1742Fifth Floor, Boston, MA 02110-1301, USA.
a3ba27da
GM
1743
1744 The author may be reached (Email) at the address mike@ai.mit.edu,
1745 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1746
a3ba27da 1747#include <stdio.h>
a3ba27da
GM
1748
1749/* Old hook values. */
d0baac98
PE
1750static void (*old_free_hook) (void *ptr);
1751static void *(*old_malloc_hook) (size_t size);
1752static void *(*old_realloc_hook) (void *ptr, size_t size);
a3ba27da
GM
1753
1754/* Function to call when something awful happens. */
f57e2426 1755static void (*abortfunc) (enum mcheck_status);
a3ba27da
GM
1756
1757/* Arbitrary magical numbers. */
d0baac98
PE
1758#define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1759#define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
a3ba27da
GM
1760#define MAGICBYTE ((char) 0xd7)
1761#define MALLOCFLOOD ((char) 0x93)
1762#define FREEFLOOD ((char) 0x95)
1763
1764struct hdr
1765 {
d0baac98
PE
1766 size_t size; /* Exact size requested by user. */
1767 size_t magic; /* Magic number to check header integrity. */
a3ba27da
GM
1768 };
1769
a3ba27da 1770static enum mcheck_status
d0baac98 1771checkhdr (const struct hdr *hdr)
a3ba27da
GM
1772{
1773 enum mcheck_status status;
1774 switch (hdr->magic)
1775 {
1776 default:
1777 status = MCHECK_HEAD;
1778 break;
1779 case MAGICFREE:
1780 status = MCHECK_FREE;
1781 break;
1782 case MAGICWORD:
1783 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1784 status = MCHECK_TAIL;
1785 else
1786 status = MCHECK_OK;
1787 break;
1788 }
1789 if (status != MCHECK_OK)
1790 (*abortfunc) (status);
1791 return status;
1792}
1793
a3ba27da 1794static void
d0baac98 1795freehook (void *ptr)
a3ba27da
GM
1796{
1797 struct hdr *hdr;
177c0ea7 1798
a3ba27da
GM
1799 if (ptr)
1800 {
1801 hdr = ((struct hdr *) ptr) - 1;
1802 checkhdr (hdr);
1803 hdr->magic = MAGICFREE;
0e926e56 1804 memset (ptr, FREEFLOOD, hdr->size);
a3ba27da
GM
1805 }
1806 else
1807 hdr = NULL;
177c0ea7 1808
a3ba27da
GM
1809 __free_hook = old_free_hook;
1810 free (hdr);
1811 __free_hook = freehook;
1812}
1813
d0baac98
PE
1814static void *
1815mallochook (size_t size)
a3ba27da
GM
1816{
1817 struct hdr *hdr;
1818
1819 __malloc_hook = old_malloc_hook;
d0baac98 1820 hdr = malloc (sizeof (struct hdr) + size + 1);
a3ba27da
GM
1821 __malloc_hook = mallochook;
1822 if (hdr == NULL)
1823 return NULL;
1824
1825 hdr->size = size;
1826 hdr->magic = MAGICWORD;
1827 ((char *) &hdr[1])[size] = MAGICBYTE;
d0baac98
PE
1828 memset (hdr + 1, MALLOCFLOOD, size);
1829 return hdr + 1;
a3ba27da
GM
1830}
1831
d0baac98
PE
1832static void *
1833reallochook (void *ptr, size_t size)
a3ba27da
GM
1834{
1835 struct hdr *hdr = NULL;
d0baac98 1836 size_t osize = 0;
177c0ea7 1837
a3ba27da
GM
1838 if (ptr)
1839 {
1840 hdr = ((struct hdr *) ptr) - 1;
1841 osize = hdr->size;
1842
1843 checkhdr (hdr);
1844 if (size < osize)
0e926e56 1845 memset ((char *) ptr + size, FREEFLOOD, osize - size);
a3ba27da 1846 }
177c0ea7 1847
a3ba27da
GM
1848 __free_hook = old_free_hook;
1849 __malloc_hook = old_malloc_hook;
1850 __realloc_hook = old_realloc_hook;
d0baac98 1851 hdr = realloc (hdr, sizeof (struct hdr) + size + 1);
a3ba27da
GM
1852 __free_hook = freehook;
1853 __malloc_hook = mallochook;
1854 __realloc_hook = reallochook;
1855 if (hdr == NULL)
1856 return NULL;
1857
1858 hdr->size = size;
1859 hdr->magic = MAGICWORD;
1860 ((char *) &hdr[1])[size] = MAGICBYTE;
1861 if (size > osize)
0e926e56 1862 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
d0baac98 1863 return hdr + 1;
a3ba27da
GM
1864}
1865
1866static void
d0baac98 1867mabort (enum mcheck_status status)
a3ba27da
GM
1868{
1869 const char *msg;
1870 switch (status)
1871 {
1872 case MCHECK_OK:
1873 msg = "memory is consistent, library is buggy";
1874 break;
1875 case MCHECK_HEAD:
1876 msg = "memory clobbered before allocated block";
1877 break;
1878 case MCHECK_TAIL:
1879 msg = "memory clobbered past end of allocated block";
1880 break;
1881 case MCHECK_FREE:
1882 msg = "block freed twice";
1883 break;
1884 default:
1885 msg = "bogus mcheck_status, library is buggy";
1886 break;
1887 }
1888#ifdef __GNU_LIBRARY__
1889 __libc_fatal (msg);
1890#else
1891 fprintf (stderr, "mcheck: %s\n", msg);
1892 fflush (stderr);
1893 abort ();
1894#endif
1895}
1896
1897static int mcheck_used = 0;
1898
1899int
d0baac98 1900mcheck (void (*func) (enum mcheck_status))
a3ba27da
GM
1901{
1902 abortfunc = (func != NULL) ? func : &mabort;
1903
1904 /* These hooks may not be safely inserted if malloc is already in use. */
1905 if (!__malloc_initialized && !mcheck_used)
1906 {
1907 old_free_hook = __free_hook;
1908 __free_hook = freehook;
1909 old_malloc_hook = __malloc_hook;
1910 __malloc_hook = mallochook;
1911 old_realloc_hook = __realloc_hook;
1912 __realloc_hook = reallochook;
1913 mcheck_used = 1;
1914 }
1915
1916 return mcheck_used ? 0 : -1;
1917}
1918
1919enum mcheck_status
d0baac98 1920mprobe (void *ptr)
a3ba27da
GM
1921{
1922 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1923}
1924
1925#endif /* GC_MCHECK */