Fix recently-introduced typos in Windows port.
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f 1/* Declarations for `malloc' and friends.
0b5538bd 2 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
4e6835db 3 2005, 2006, 2007 Free Software Foundation, Inc.
74ad5c7f
KH
4 Written May 1989 by Mike Haertel.
5
6This library is free software; you can redistribute it and/or
423a1f3c 7modify it under the terms of the GNU General Public License as
74ad5c7f
KH
8published by the Free Software Foundation; either version 2 of the
9License, or (at your option) any later version.
10
11This library is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 14General Public License for more details.
74ad5c7f 15
423a1f3c
JB
16You should have received a copy of the GNU General Public
17License along with this library; see the file COPYING. If
3ef97fb6
LK
18not, write to the Free Software Foundation, Inc., 51 Franklin Street,
19Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
20
21 The author may be reached (Email) at the address mike@ai.mit.edu,
22 or (US mail) as Mike Haertel c/o Free Software Foundation. */
23
74ad5c7f 24#include <config.h>
74ad5c7f 25
ae9e757a 26#ifdef HAVE_PTHREAD
8d0d84d2
YM
27#define USE_PTHREAD
28#endif
29
74ad5c7f 30#include <string.h>
74ad5c7f 31#include <limits.h>
d0baac98 32#include <stdint.h>
74ad5c7f 33#include <unistd.h>
74ad5c7f 34
2f213514
YM
35#ifdef USE_PTHREAD
36#include <pthread.h>
37#endif
38
74ad5c7f
KH
39#ifdef __cplusplus
40extern "C"
41{
42#endif
43
74ad5c7f 44#include <stddef.h>
74ad5c7f
KH
45
46
47/* Allocate SIZE bytes of memory. */
d0baac98 48extern void *malloc (size_t size);
74ad5c7f 49/* Re-allocate the previously allocated block
d0baac98
PE
50 in ptr, making the new block SIZE bytes long. */
51extern void *realloc (void *ptr, size_t size);
74ad5c7f 52/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
d0baac98 53extern void *calloc (size_t nmemb, size_t size);
74ad5c7f 54/* Free a block allocated by `malloc', `realloc' or `calloc'. */
d0baac98 55extern void free (void *ptr);
74ad5c7f
KH
56
57/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
d0baac98
PE
58#ifdef MSDOS
59extern void *memalign (size_t, size_t);
60extern int posix_memalign (void **, size_t, size_t);
74ad5c7f
KH
61#endif
62
3ceeb306
YM
63#ifdef USE_PTHREAD
64/* Set up mutexes and make malloc etc. thread-safe. */
d0baac98 65extern void malloc_enable_thread (void);
3ceeb306 66#endif
74ad5c7f 67
74ad5c7f
KH
68/* The allocator divides the heap into blocks of fixed size; large
69 requests receive one or more whole blocks, and small requests
70 receive a fragment of a block. Fragment sizes are powers of two,
71 and all fragments of a block are the same size. When all the
72 fragments in a block have been freed, the block itself is freed. */
5e617bc2 73#define INT_BIT (CHAR_BIT * sizeof (int))
74ad5c7f
KH
74#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
75#define BLOCKSIZE (1 << BLOCKLOG)
76#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
77
78/* Determine the amount of memory spanned by the initial heap table
79 (not an absolute limit). */
80#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
81
82/* Number of contiguous free blocks allowed to build up at the end of
83 memory before they will be returned to the system. */
84#define FINAL_FREE_BLOCKS 8
85
86/* Data structure giving per-block information. */
87typedef union
88 {
89 /* Heap information for a busy block. */
90 struct
91 {
92 /* Zero for a large (multiblock) object, or positive giving the
93 logarithm to the base two of the fragment size. */
94 int type;
95 union
96 {
97 struct
98 {
d0baac98
PE
99 size_t nfree; /* Free frags in a fragmented block. */
100 size_t first; /* First free fragment of the block. */
74ad5c7f
KH
101 } frag;
102 /* For a large object, in its first block, this has the number
103 of blocks in the object. In the other blocks, this has a
104 negative number which says how far back the first block is. */
d0baac98 105 ptrdiff_t size;
74ad5c7f
KH
106 } info;
107 } busy;
108 /* Heap information for a free block
109 (that may be the first of a free cluster). */
110 struct
111 {
d0baac98
PE
112 size_t size; /* Size (in blocks) of a free cluster. */
113 size_t next; /* Index of next free cluster. */
114 size_t prev; /* Index of previous free cluster. */
74ad5c7f
KH
115 } free;
116 } malloc_info;
117
118/* Pointer to first block of the heap. */
119extern char *_heapbase;
120
121/* Table indexed by block number giving per-block information. */
122extern malloc_info *_heapinfo;
123
124/* Address to block number and vice versa. */
125#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
d0baac98 126#define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
74ad5c7f
KH
127
128/* Current search index for the heap table. */
d0baac98 129extern size_t _heapindex;
74ad5c7f
KH
130
131/* Limit of valid info table indices. */
d0baac98 132extern size_t _heaplimit;
74ad5c7f
KH
133
134/* Doubly linked lists of free fragments. */
135struct list
136 {
137 struct list *next;
138 struct list *prev;
139 };
140
141/* Free list headers for each fragment size. */
142extern struct list _fraghead[];
143
144/* List of blocks allocated with `memalign' (or `valloc'). */
145struct alignlist
146 {
147 struct alignlist *next;
d0baac98
PE
148 void *aligned; /* The address that memaligned returned. */
149 void *exact; /* The address that malloc returned. */
74ad5c7f
KH
150 };
151extern struct alignlist *_aligned_blocks;
152
153/* Instrumentation. */
d0baac98
PE
154extern size_t _chunks_used;
155extern size_t _bytes_used;
156extern size_t _chunks_free;
157extern size_t _bytes_free;
74ad5c7f
KH
158
159/* Internal versions of `malloc', `realloc', and `free'
160 used when these functions need to call each other.
161 They are the same but don't call the hooks. */
d0baac98
PE
162extern void *_malloc_internal (size_t);
163extern void *_realloc_internal (void *, size_t);
164extern void _free_internal (void *);
165extern void *_malloc_internal_nolock (size_t);
166extern void *_realloc_internal_nolock (void *, size_t);
167extern void _free_internal_nolock (void *);
74ad5c7f 168
2f213514 169#ifdef USE_PTHREAD
8d0d84d2 170extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
3ceeb306
YM
171extern int _malloc_thread_enabled_p;
172#define LOCK() \
173 do { \
174 if (_malloc_thread_enabled_p) \
175 pthread_mutex_lock (&_malloc_mutex); \
176 } while (0)
177#define UNLOCK() \
178 do { \
179 if (_malloc_thread_enabled_p) \
180 pthread_mutex_unlock (&_malloc_mutex); \
181 } while (0)
182#define LOCK_ALIGNED_BLOCKS() \
183 do { \
184 if (_malloc_thread_enabled_p) \
185 pthread_mutex_lock (&_aligned_blocks_mutex); \
186 } while (0)
187#define UNLOCK_ALIGNED_BLOCKS() \
188 do { \
189 if (_malloc_thread_enabled_p) \
190 pthread_mutex_unlock (&_aligned_blocks_mutex); \
191 } while (0)
2f213514
YM
192#else
193#define LOCK()
194#define UNLOCK()
8d0d84d2
YM
195#define LOCK_ALIGNED_BLOCKS()
196#define UNLOCK_ALIGNED_BLOCKS()
2f213514
YM
197#endif
198
74ad5c7f
KH
199/* Given an address in the middle of a malloc'd object,
200 return the address of the beginning of the object. */
d0baac98 201extern void *malloc_find_object_address (void *ptr);
74ad5c7f
KH
202
203/* Underlying allocation function; successive calls should
204 return contiguous pieces of memory. */
d0baac98 205extern void *(*__morecore) (ptrdiff_t size);
74ad5c7f
KH
206
207/* Default value of `__morecore'. */
d0baac98 208extern void *__default_morecore (ptrdiff_t size);
74ad5c7f
KH
209
210/* If not NULL, this function is called after each time
211 `__morecore' is called to increase the data size. */
d0baac98 212extern void (*__after_morecore_hook) (void);
74ad5c7f
KH
213
214/* Number of extra blocks to get each time we ask for more core.
215 This reduces the frequency of calling `(*__morecore)'. */
d0baac98 216extern size_t __malloc_extra_blocks;
74ad5c7f
KH
217
218/* Nonzero if `malloc' has been called and done its initialization. */
219extern int __malloc_initialized;
220/* Function called to initialize malloc data structures. */
d0baac98 221extern int __malloc_initialize (void);
74ad5c7f
KH
222
223/* Hooks for debugging versions. */
d0baac98
PE
224extern void (*__malloc_initialize_hook) (void);
225extern void (*__free_hook) (void *ptr);
226extern void *(*__malloc_hook) (size_t size);
227extern void *(*__realloc_hook) (void *ptr, size_t size);
228extern void *(*__memalign_hook) (size_t size, size_t alignment);
74ad5c7f
KH
229
230/* Return values for `mprobe': these are the kinds of inconsistencies that
231 `mcheck' enables detection of. */
232enum mcheck_status
233 {
234 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
235 MCHECK_OK, /* Block is fine. */
236 MCHECK_FREE, /* Block freed twice. */
237 MCHECK_HEAD, /* Memory before the block was clobbered. */
238 MCHECK_TAIL /* Memory after the block was clobbered. */
239 };
240
241/* Activate a standard collection of debugging hooks. This must be called
242 before `malloc' is ever called. ABORTFUNC is called with an error code
243 (see enum above) when an inconsistency is detected. If ABORTFUNC is
244 null, the standard function prints on stderr and then calls `abort'. */
d0baac98 245extern int mcheck (void (*abortfunc) (enum mcheck_status));
74ad5c7f
KH
246
247/* Check for aberrations in a particular malloc'd block. You must have
248 called `mcheck' already. These are the same checks that `mcheck' does
249 when you free or reallocate a block. */
d0baac98 250extern enum mcheck_status mprobe (void *ptr);
74ad5c7f
KH
251
252/* Activate a standard collection of tracing hooks. */
d0baac98
PE
253extern void mtrace (void);
254extern void muntrace (void);
74ad5c7f
KH
255
256/* Statistics available to the user. */
257struct mstats
258 {
d0baac98
PE
259 size_t bytes_total; /* Total size of the heap. */
260 size_t chunks_used; /* Chunks allocated by the user. */
261 size_t bytes_used; /* Byte total of user-allocated chunks. */
262 size_t chunks_free; /* Chunks in the free list. */
263 size_t bytes_free; /* Byte total of chunks in the free list. */
74ad5c7f
KH
264 };
265
266/* Pick up the current statistics. */
d0baac98 267extern struct mstats mstats (void);
74ad5c7f
KH
268
269/* Call WARNFUN with a warning message when memory usage is high. */
d0baac98 270extern void memory_warnings (void *start, void (*warnfun) (const char *));
74ad5c7f
KH
271
272#ifdef __cplusplus
273}
274#endif
275
74ad5c7f
KH
276/* Memory allocator `malloc'.
277 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
278 Written May 1989 by Mike Haertel.
279
280This library is free software; you can redistribute it and/or
423a1f3c 281modify it under the terms of the GNU General Public License as
74ad5c7f
KH
282published by the Free Software Foundation; either version 2 of the
283License, or (at your option) any later version.
284
285This library is distributed in the hope that it will be useful,
286but WITHOUT ANY WARRANTY; without even the implied warranty of
287MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 288General Public License for more details.
74ad5c7f 289
423a1f3c
JB
290You should have received a copy of the GNU General Public
291License along with this library; see the file COPYING. If
3ef97fb6
LK
292not, write to the Free Software Foundation, Inc., 51 Franklin Street,
293Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
294
295 The author may be reached (Email) at the address mike@ai.mit.edu,
296 or (US mail) as Mike Haertel c/o Free Software Foundation. */
297
74ad5c7f
KH
298#include <errno.h>
299
a4579d33
KB
300/* On Cygwin there are two heaps. temacs uses the static heap
301 (defined in sheap.c and managed with bss_sbrk), and the dumped
302 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
303 on Cygwin, it reinitializes malloc, and we save the old info for
304 use by free and realloc if they're called with a pointer into the
db76dd85
KB
305 static heap.
306
307 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
308 this is changed in the future, we'll have to similarly deal with
309 reinitializing ralloc. */
a4579d33 310#ifdef CYGWIN
d0baac98 311extern void *bss_sbrk (ptrdiff_t size);
ef6d1039 312extern int bss_sbrk_did_unexec;
a4579d33
KB
313char *bss_sbrk_heapbase; /* _heapbase for static heap */
314malloc_info *bss_sbrk_heapinfo; /* _heapinfo for static heap */
ef6d1039 315#endif
d0baac98 316void *(*__morecore) (ptrdiff_t size) = __default_morecore;
74ad5c7f
KH
317
318/* Debugging hook for `malloc'. */
d0baac98 319void *(*__malloc_hook) (size_t size);
74ad5c7f
KH
320
321/* Pointer to the base of the first block. */
322char *_heapbase;
323
324/* Block information table. Allocated with align/__free (not malloc/free). */
325malloc_info *_heapinfo;
326
327/* Number of info entries. */
d0baac98 328static size_t heapsize;
74ad5c7f
KH
329
330/* Search index in the info table. */
d0baac98 331size_t _heapindex;
74ad5c7f
KH
332
333/* Limit of valid info table indices. */
d0baac98 334size_t _heaplimit;
74ad5c7f
KH
335
336/* Free lists for each fragment size. */
337struct list _fraghead[BLOCKLOG];
338
339/* Instrumentation. */
d0baac98
PE
340size_t _chunks_used;
341size_t _bytes_used;
342size_t _chunks_free;
343size_t _bytes_free;
74ad5c7f
KH
344
345/* Are you experienced? */
346int __malloc_initialized;
347
d0baac98 348size_t __malloc_extra_blocks;
74ad5c7f 349
d0baac98
PE
350void (*__malloc_initialize_hook) (void);
351void (*__after_morecore_hook) (void);
74ad5c7f 352
5dcab13e
GM
353#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
354
355/* Some code for hunting a bug writing into _heapinfo.
356
357 Call this macro with argument PROT non-zero to protect internal
358 malloc state against writing to it, call it with a zero argument to
359 make it readable and writable.
360
361 Note that this only works if BLOCKSIZE == page size, which is
362 the case on the i386. */
363
364#include <sys/types.h>
365#include <sys/mman.h>
366
367static int state_protected_p;
d0baac98 368static size_t last_state_size;
5dcab13e
GM
369static malloc_info *last_heapinfo;
370
371void
d0baac98 372protect_malloc_state (int protect_p)
5dcab13e
GM
373{
374 /* If _heapinfo has been relocated, make sure its old location
375 isn't left read-only; it will be reused by malloc. */
376 if (_heapinfo != last_heapinfo
377 && last_heapinfo
378 && state_protected_p)
379 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
380
381 last_state_size = _heaplimit * sizeof *_heapinfo;
382 last_heapinfo = _heapinfo;
177c0ea7 383
5dcab13e
GM
384 if (protect_p != state_protected_p)
385 {
386 state_protected_p = protect_p;
387 if (mprotect (_heapinfo, last_state_size,
388 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
389 abort ();
390 }
391}
392
5e617bc2 393#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
5dcab13e
GM
394
395#else
396#define PROTECT_MALLOC_STATE(PROT) /* empty */
397#endif
398
74ad5c7f
KH
399
400/* Aligned allocation. */
d0baac98
PE
401static void *
402align (size_t size)
74ad5c7f 403{
d0baac98
PE
404 void *result;
405 ptrdiff_t adj;
74ad5c7f 406
ceeb3d7d 407 /* align accepts an unsigned argument, but __morecore accepts a
d0baac98
PE
408 signed one. This could lead to trouble if SIZE overflows the
409 ptrdiff_t type accepted by __morecore. We just punt in that
ceeb3d7d 410 case, since they are requesting a ludicrous amount anyway. */
d0baac98 411 if (PTRDIFF_MAX < size)
ceeb3d7d
EZ
412 result = 0;
413 else
414 result = (*__morecore) (size);
d0baac98 415 adj = (uintptr_t) result % BLOCKSIZE;
74ad5c7f
KH
416 if (adj != 0)
417 {
74ad5c7f 418 adj = BLOCKSIZE - adj;
d0baac98 419 (*__morecore) (adj);
74ad5c7f
KH
420 result = (char *) result + adj;
421 }
422
423 if (__after_morecore_hook)
424 (*__after_morecore_hook) ();
425
426 return result;
427}
428
429/* Get SIZE bytes, if we can get them starting at END.
430 Return the address of the space we got.
431 If we cannot get space at END, fail and return 0. */
d0baac98
PE
432static void *
433get_contiguous_space (ptrdiff_t size, void *position)
74ad5c7f 434{
d0baac98
PE
435 void *before;
436 void *after;
74ad5c7f
KH
437
438 before = (*__morecore) (0);
439 /* If we can tell in advance that the break is at the wrong place,
440 fail now. */
441 if (before != position)
442 return 0;
443
444 /* Allocate SIZE bytes and get the address of them. */
445 after = (*__morecore) (size);
446 if (!after)
447 return 0;
448
449 /* It was not contiguous--reject it. */
450 if (after != position)
451 {
452 (*__morecore) (- size);
453 return 0;
454 }
455
456 return after;
457}
458
459
460/* This is called when `_heapinfo' and `heapsize' have just
461 been set to describe a new info table. Set up the table
462 to describe itself and account for it in the statistics. */
55d4c1b2
PE
463static inline void
464register_heapinfo (void)
74ad5c7f 465{
d0baac98 466 size_t block, blocks;
74ad5c7f
KH
467
468 block = BLOCK (_heapinfo);
469 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
470
471 /* Account for the _heapinfo block itself in the statistics. */
472 _bytes_used += blocks * BLOCKSIZE;
473 ++_chunks_used;
474
475 /* Describe the heapinfo block itself in the heapinfo. */
476 _heapinfo[block].busy.type = 0;
477 _heapinfo[block].busy.info.size = blocks;
478 /* Leave back-pointers for malloc_find_address. */
479 while (--blocks > 0)
480 _heapinfo[block + blocks].busy.info.size = -blocks;
481}
482
2f213514 483#ifdef USE_PTHREAD
8d0d84d2
YM
484pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
485pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
3ceeb306
YM
486int _malloc_thread_enabled_p;
487
488static void
d0baac98 489malloc_atfork_handler_prepare (void)
3ceeb306
YM
490{
491 LOCK ();
492 LOCK_ALIGNED_BLOCKS ();
493}
494
495static void
d0baac98 496malloc_atfork_handler_parent (void)
3ceeb306
YM
497{
498 UNLOCK_ALIGNED_BLOCKS ();
499 UNLOCK ();
500}
501
502static void
d0baac98 503malloc_atfork_handler_child (void)
3ceeb306
YM
504{
505 UNLOCK_ALIGNED_BLOCKS ();
506 UNLOCK ();
507}
508
509/* Set up mutexes and make malloc etc. thread-safe. */
510void
d0baac98 511malloc_enable_thread (void)
3ceeb306
YM
512{
513 if (_malloc_thread_enabled_p)
514 return;
515
516 /* Some pthread implementations call malloc for statically
517 initialized mutexes when they are used first. To avoid such a
518 situation, we initialize mutexes here while their use is
519 disabled in malloc etc. */
520 pthread_mutex_init (&_malloc_mutex, NULL);
521 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
522 pthread_atfork (malloc_atfork_handler_prepare,
523 malloc_atfork_handler_parent,
524 malloc_atfork_handler_child);
525 _malloc_thread_enabled_p = 1;
526}
2f213514 527#endif
74ad5c7f 528
2f213514 529static void
d0baac98 530malloc_initialize_1 (void)
2f213514 531{
a3ba27da
GM
532#ifdef GC_MCHECK
533 mcheck (NULL);
534#endif
535
a4579d33
KB
536#ifdef CYGWIN
537 if (bss_sbrk_did_unexec)
538 /* we're reinitializing the dumped emacs */
539 {
540 bss_sbrk_heapbase = _heapbase;
541 bss_sbrk_heapinfo = _heapinfo;
542 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
543 }
544#endif
545
74ad5c7f
KH
546 if (__malloc_initialize_hook)
547 (*__malloc_initialize_hook) ();
548
549 heapsize = HEAP / BLOCKSIZE;
d0baac98 550 _heapinfo = align (heapsize * sizeof (malloc_info));
74ad5c7f 551 if (_heapinfo == NULL)
2f213514 552 return;
74ad5c7f
KH
553 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
554 _heapinfo[0].free.size = 0;
555 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
556 _heapindex = 0;
557 _heapbase = (char *) _heapinfo;
558 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
559
560 register_heapinfo ();
561
562 __malloc_initialized = 1;
5dcab13e 563 PROTECT_MALLOC_STATE (1);
2f213514
YM
564 return;
565}
566
784c1472
JD
567/* Set everything up and remember that we have.
568 main will call malloc which calls this function. That is before any threads
569 or signal handlers has been set up, so we don't need thread protection. */
2f213514 570int
d0baac98 571__malloc_initialize (void)
2f213514 572{
2f213514
YM
573 if (__malloc_initialized)
574 return 0;
575
576 malloc_initialize_1 ();
2f213514
YM
577
578 return __malloc_initialized;
74ad5c7f
KH
579}
580
581static int morecore_recursing;
582
583/* Get neatly aligned memory, initializing or
584 growing the heap info table as necessary. */
d0baac98
PE
585static void *
586morecore_nolock (size_t size)
74ad5c7f 587{
d0baac98 588 void *result;
74ad5c7f 589 malloc_info *newinfo, *oldinfo;
d0baac98 590 size_t newsize;
74ad5c7f
KH
591
592 if (morecore_recursing)
593 /* Avoid recursion. The caller will know how to handle a null return. */
594 return NULL;
595
596 result = align (size);
597 if (result == NULL)
598 return NULL;
599
5dcab13e
GM
600 PROTECT_MALLOC_STATE (0);
601
74ad5c7f 602 /* Check if we need to grow the info table. */
d0baac98 603 if ((size_t) BLOCK ((char *) result + size) > heapsize)
74ad5c7f
KH
604 {
605 /* Calculate the new _heapinfo table size. We do not account for the
606 added blocks in the table itself, as we hope to place them in
607 existing free space, which is already covered by part of the
608 existing table. */
609 newsize = heapsize;
610 do
611 newsize *= 2;
d0baac98 612 while ((size_t) BLOCK ((char *) result + size) > newsize);
74ad5c7f
KH
613
614 /* We must not reuse existing core for the new info table when called
615 from realloc in the case of growing a large block, because the
616 block being grown is momentarily marked as free. In this case
617 _heaplimit is zero so we know not to reuse space for internal
618 allocation. */
619 if (_heaplimit != 0)
620 {
621 /* First try to allocate the new info table in core we already
622 have, in the usual way using realloc. If realloc cannot
623 extend it in place or relocate it to existing sufficient core,
624 we will get called again, and the code above will notice the
625 `morecore_recursing' flag and return null. */
626 int save = errno; /* Don't want to clobber errno with ENOMEM. */
627 morecore_recursing = 1;
d0baac98
PE
628 newinfo = _realloc_internal_nolock (_heapinfo,
629 newsize * sizeof (malloc_info));
74ad5c7f
KH
630 morecore_recursing = 0;
631 if (newinfo == NULL)
632 errno = save;
633 else
634 {
635 /* We found some space in core, and realloc has put the old
636 table's blocks on the free list. Now zero the new part
637 of the table and install the new table location. */
638 memset (&newinfo[heapsize], 0,
639 (newsize - heapsize) * sizeof (malloc_info));
640 _heapinfo = newinfo;
641 heapsize = newsize;
642 goto got_heap;
643 }
644 }
645
646 /* Allocate new space for the malloc info table. */
647 while (1)
648 {
d0baac98 649 newinfo = align (newsize * sizeof (malloc_info));
74ad5c7f
KH
650
651 /* Did it fail? */
652 if (newinfo == NULL)
653 {
654 (*__morecore) (-size);
655 return NULL;
656 }
657
658 /* Is it big enough to record status for its own space?
659 If so, we win. */
d0baac98
PE
660 if ((size_t) BLOCK ((char *) newinfo
661 + newsize * sizeof (malloc_info))
74ad5c7f
KH
662 < newsize)
663 break;
664
665 /* Must try again. First give back most of what we just got. */
666 (*__morecore) (- newsize * sizeof (malloc_info));
667 newsize *= 2;
668 }
669
670 /* Copy the old table to the beginning of the new,
671 and zero the rest of the new table. */
672 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
673 memset (&newinfo[heapsize], 0,
674 (newsize - heapsize) * sizeof (malloc_info));
675 oldinfo = _heapinfo;
676 _heapinfo = newinfo;
677 heapsize = newsize;
678
679 register_heapinfo ();
680
681 /* Reset _heaplimit so _free_internal never decides
682 it can relocate or resize the info table. */
683 _heaplimit = 0;
8d0d84d2 684 _free_internal_nolock (oldinfo);
5dcab13e 685 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
686
687 /* The new heap limit includes the new table just allocated. */
688 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
689 return result;
690 }
691
692 got_heap:
693 _heaplimit = BLOCK ((char *) result + size);
694 return result;
695}
696
697/* Allocate memory from the heap. */
d0baac98
PE
698void *
699_malloc_internal_nolock (size_t size)
74ad5c7f 700{
d0baac98
PE
701 void *result;
702 size_t block, blocks, lastblocks, start;
703 register size_t i;
74ad5c7f
KH
704 struct list *next;
705
706 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
707 valid address you can realloc and free (though not dereference).
708
709 It turns out that some extant code (sunrpc, at least Ultrix's version)
710 expects `malloc (0)' to return non-NULL and breaks otherwise.
711 Be compatible. */
712
713#if 0
714 if (size == 0)
715 return NULL;
716#endif
717
5dcab13e
GM
718 PROTECT_MALLOC_STATE (0);
719
74ad5c7f
KH
720 if (size < sizeof (struct list))
721 size = sizeof (struct list);
722
74ad5c7f
KH
723 /* Determine the allocation policy based on the request size. */
724 if (size <= BLOCKSIZE / 2)
725 {
726 /* Small allocation to receive a fragment of a block.
727 Determine the logarithm to base two of the fragment size. */
d0baac98 728 register size_t log = 1;
74ad5c7f
KH
729 --size;
730 while ((size /= 2) != 0)
731 ++log;
732
733 /* Look in the fragment lists for a
734 free fragment of the desired size. */
735 next = _fraghead[log].next;
736 if (next != NULL)
737 {
738 /* There are free fragments of this size.
739 Pop a fragment out of the fragment list and return it.
740 Update the block's nfree and first counters. */
d0baac98 741 result = next;
74ad5c7f
KH
742 next->prev->next = next->next;
743 if (next->next != NULL)
744 next->next->prev = next->prev;
745 block = BLOCK (result);
746 if (--_heapinfo[block].busy.info.frag.nfree != 0)
d0baac98
PE
747 _heapinfo[block].busy.info.frag.first =
748 (uintptr_t) next->next % BLOCKSIZE >> log;
74ad5c7f
KH
749
750 /* Update the statistics. */
751 ++_chunks_used;
752 _bytes_used += 1 << log;
753 --_chunks_free;
754 _bytes_free -= 1 << log;
755 }
756 else
757 {
758 /* No free fragments of the desired size, so get a new block
759 and break it into fragments, returning the first. */
8094989b 760#ifdef GC_MALLOC_CHECK
8d0d84d2 761 result = _malloc_internal_nolock (BLOCKSIZE);
5dcab13e 762 PROTECT_MALLOC_STATE (0);
8d0d84d2
YM
763#elif defined (USE_PTHREAD)
764 result = _malloc_internal_nolock (BLOCKSIZE);
8094989b 765#else
74ad5c7f 766 result = malloc (BLOCKSIZE);
8094989b 767#endif
74ad5c7f 768 if (result == NULL)
5dcab13e
GM
769 {
770 PROTECT_MALLOC_STATE (1);
2f213514 771 goto out;
5dcab13e 772 }
74ad5c7f
KH
773
774 /* Link all fragments but the first into the free list. */
775 next = (struct list *) ((char *) result + (1 << log));
776 next->next = NULL;
777 next->prev = &_fraghead[log];
778 _fraghead[log].next = next;
779
d0baac98 780 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
74ad5c7f
KH
781 {
782 next = (struct list *) ((char *) result + (i << log));
783 next->next = _fraghead[log].next;
784 next->prev = &_fraghead[log];
785 next->prev->next = next;
786 next->next->prev = next;
787 }
788
789 /* Initialize the nfree and first counters for this block. */
790 block = BLOCK (result);
791 _heapinfo[block].busy.type = log;
792 _heapinfo[block].busy.info.frag.nfree = i - 1;
793 _heapinfo[block].busy.info.frag.first = i - 1;
794
795 _chunks_free += (BLOCKSIZE >> log) - 1;
796 _bytes_free += BLOCKSIZE - (1 << log);
797 _bytes_used -= BLOCKSIZE - (1 << log);
798 }
799 }
800 else
801 {
802 /* Large allocation to receive one or more blocks.
803 Search the free list in a circle starting at the last place visited.
804 If we loop completely around without finding a large enough
805 space we will have to get more memory from the system. */
806 blocks = BLOCKIFY (size);
807 start = block = _heapindex;
808 while (_heapinfo[block].free.size < blocks)
809 {
810 block = _heapinfo[block].free.next;
811 if (block == start)
812 {
813 /* Need to get more from the system. Get a little extra. */
d0baac98 814 size_t wantblocks = blocks + __malloc_extra_blocks;
74ad5c7f
KH
815 block = _heapinfo[0].free.prev;
816 lastblocks = _heapinfo[block].free.size;
817 /* Check to see if the new core will be contiguous with the
818 final free block; if so we don't need to get as much. */
819 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
820 /* We can't do this if we will have to make the heap info
cc4a96c6 821 table bigger to accommodate the new space. */
74ad5c7f
KH
822 block + wantblocks <= heapsize &&
823 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
824 ADDRESS (block + lastblocks)))
825 {
826 /* We got it contiguously. Which block we are extending
827 (the `final free block' referred to above) might have
828 changed, if it got combined with a freed info table. */
829 block = _heapinfo[0].free.prev;
830 _heapinfo[block].free.size += (wantblocks - lastblocks);
831 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
832 _heaplimit += wantblocks - lastblocks;
833 continue;
834 }
8d0d84d2 835 result = morecore_nolock (wantblocks * BLOCKSIZE);
74ad5c7f 836 if (result == NULL)
2f213514 837 goto out;
74ad5c7f
KH
838 block = BLOCK (result);
839 /* Put the new block at the end of the free list. */
840 _heapinfo[block].free.size = wantblocks;
841 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
842 _heapinfo[block].free.next = 0;
843 _heapinfo[0].free.prev = block;
844 _heapinfo[_heapinfo[block].free.prev].free.next = block;
845 ++_chunks_free;
846 /* Now loop to use some of that block for this allocation. */
847 }
848 }
849
850 /* At this point we have found a suitable free list entry.
851 Figure out how to remove what we need from the list. */
852 result = ADDRESS (block);
853 if (_heapinfo[block].free.size > blocks)
854 {
855 /* The block we found has a bit left over,
856 so relink the tail end back into the free list. */
857 _heapinfo[block + blocks].free.size
858 = _heapinfo[block].free.size - blocks;
859 _heapinfo[block + blocks].free.next
860 = _heapinfo[block].free.next;
861 _heapinfo[block + blocks].free.prev
862 = _heapinfo[block].free.prev;
863 _heapinfo[_heapinfo[block].free.prev].free.next
864 = _heapinfo[_heapinfo[block].free.next].free.prev
865 = _heapindex = block + blocks;
866 }
867 else
868 {
869 /* The block exactly matches our requirements,
870 so just remove it from the list. */
871 _heapinfo[_heapinfo[block].free.next].free.prev
872 = _heapinfo[block].free.prev;
873 _heapinfo[_heapinfo[block].free.prev].free.next
874 = _heapindex = _heapinfo[block].free.next;
875 --_chunks_free;
876 }
877
878 _heapinfo[block].busy.type = 0;
879 _heapinfo[block].busy.info.size = blocks;
880 ++_chunks_used;
881 _bytes_used += blocks * BLOCKSIZE;
882 _bytes_free -= blocks * BLOCKSIZE;
883
884 /* Mark all the blocks of the object just allocated except for the
885 first with a negative number so you can find the first block by
886 adding that adjustment. */
887 while (--blocks > 0)
888 _heapinfo[block + blocks].busy.info.size = -blocks;
889 }
890
5dcab13e 891 PROTECT_MALLOC_STATE (1);
2f213514 892 out:
8d0d84d2
YM
893 return result;
894}
895
d0baac98
PE
896void *
897_malloc_internal (size_t size)
8d0d84d2 898{
d0baac98 899 void *result;
8d0d84d2
YM
900
901 LOCK ();
902 result = _malloc_internal_nolock (size);
2f213514 903 UNLOCK ();
8d0d84d2 904
74ad5c7f
KH
905 return result;
906}
907
d0baac98
PE
908void *
909malloc (size_t size)
74ad5c7f 910{
d0baac98 911 void *(*hook) (size_t);
8d0d84d2 912
74ad5c7f
KH
913 if (!__malloc_initialized && !__malloc_initialize ())
914 return NULL;
915
8d0d84d2
YM
916 /* Copy the value of __malloc_hook to an automatic variable in case
917 __malloc_hook is modified in another thread between its
918 NULL-check and the use.
919
920 Note: Strictly speaking, this is not a right solution. We should
921 use mutexes to access non-read-only variables that are shared
922 among multiple threads. We just leave it for compatibility with
923 glibc malloc (i.e., assignments to __malloc_hook) for now. */
924 hook = __malloc_hook;
925 return (hook != NULL ? *hook : _malloc_internal) (size);
74ad5c7f
KH
926}
927\f
928#ifndef _LIBC
929
930/* On some ANSI C systems, some libc functions call _malloc, _free
931 and _realloc. Make them use the GNU functions. */
932
d0baac98
PE
933extern void *_malloc (size_t);
934extern void _free (void *);
935extern void *_realloc (void *, size_t);
936
937void *
938_malloc (size_t size)
74ad5c7f
KH
939{
940 return malloc (size);
941}
942
943void
d0baac98 944_free (void *ptr)
74ad5c7f
KH
945{
946 free (ptr);
947}
948
d0baac98
PE
949void *
950_realloc (void *ptr, size_t size)
74ad5c7f
KH
951{
952 return realloc (ptr, size);
953}
954
955#endif
956/* Free a block of memory allocated by `malloc'.
957 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
958 Written May 1989 by Mike Haertel.
959
960This library is free software; you can redistribute it and/or
423a1f3c 961modify it under the terms of the GNU General Public License as
74ad5c7f
KH
962published by the Free Software Foundation; either version 2 of the
963License, or (at your option) any later version.
964
965This library is distributed in the hope that it will be useful,
966but WITHOUT ANY WARRANTY; without even the implied warranty of
967MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 968General Public License for more details.
74ad5c7f 969
423a1f3c
JB
970You should have received a copy of the GNU General Public
971License along with this library; see the file COPYING. If
3ef97fb6
LK
972not, write to the Free Software Foundation, Inc., 51 Franklin Street,
973Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
974
975 The author may be reached (Email) at the address mike@ai.mit.edu,
976 or (US mail) as Mike Haertel c/o Free Software Foundation. */
977
74ad5c7f 978
74ad5c7f 979/* Debugging hook for free. */
d0baac98 980void (*__free_hook) (void *__ptr);
74ad5c7f
KH
981
982/* List of blocks allocated by memalign. */
983struct alignlist *_aligned_blocks = NULL;
984
985/* Return memory to the heap.
8d0d84d2 986 Like `_free_internal' but don't lock mutex. */
74ad5c7f 987void
d0baac98 988_free_internal_nolock (void *ptr)
74ad5c7f
KH
989{
990 int type;
d0baac98
PE
991 size_t block, blocks;
992 register size_t i;
74ad5c7f 993 struct list *prev, *next;
d0baac98
PE
994 void *curbrk;
995 const size_t lesscore_threshold
74ad5c7f
KH
996 /* Threshold of free space at which we will return some to the system. */
997 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
998
999 register struct alignlist *l;
1000
1001 if (ptr == NULL)
1002 return;
1003
a4579d33 1004#ifdef CYGWIN
1b170bc6 1005 if ((char *) ptr < _heapbase)
a4579d33
KB
1006 /* We're being asked to free something in the static heap. */
1007 return;
1008#endif
1009
5dcab13e 1010 PROTECT_MALLOC_STATE (0);
177c0ea7 1011
8d0d84d2 1012 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1013 for (l = _aligned_blocks; l != NULL; l = l->next)
1014 if (l->aligned == ptr)
1015 {
1016 l->aligned = NULL; /* Mark the slot in the list as free. */
1017 ptr = l->exact;
1018 break;
1019 }
8d0d84d2 1020 UNLOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1021
1022 block = BLOCK (ptr);
1023
1024 type = _heapinfo[block].busy.type;
1025 switch (type)
1026 {
1027 case 0:
1028 /* Get as many statistics as early as we can. */
1029 --_chunks_used;
1030 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1031 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1032
1033 /* Find the free cluster previous to this one in the free list.
1034 Start searching at the last block referenced; this may benefit
1035 programs with locality of allocation. */
1036 i = _heapindex;
1037 if (i > block)
1038 while (i > block)
1039 i = _heapinfo[i].free.prev;
1040 else
1041 {
1042 do
1043 i = _heapinfo[i].free.next;
1044 while (i > 0 && i < block);
1045 i = _heapinfo[i].free.prev;
1046 }
1047
1048 /* Determine how to link this block into the free list. */
1049 if (block == i + _heapinfo[i].free.size)
1050 {
1051 /* Coalesce this block with its predecessor. */
1052 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1053 block = i;
1054 }
1055 else
1056 {
1057 /* Really link this block back into the free list. */
1058 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1059 _heapinfo[block].free.next = _heapinfo[i].free.next;
1060 _heapinfo[block].free.prev = i;
1061 _heapinfo[i].free.next = block;
1062 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1063 ++_chunks_free;
1064 }
1065
1066 /* Now that the block is linked in, see if we can coalesce it
1067 with its successor (by deleting its successor from the list
1068 and adding in its size). */
1069 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1070 {
1071 _heapinfo[block].free.size
1072 += _heapinfo[_heapinfo[block].free.next].free.size;
1073 _heapinfo[block].free.next
1074 = _heapinfo[_heapinfo[block].free.next].free.next;
1075 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1076 --_chunks_free;
1077 }
1078
1079 /* How many trailing free blocks are there now? */
1080 blocks = _heapinfo[block].free.size;
1081
1082 /* Where is the current end of accessible core? */
1083 curbrk = (*__morecore) (0);
1084
1085 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1086 {
1087 /* The end of the malloc heap is at the end of accessible core.
1088 It's possible that moving _heapinfo will allow us to
1089 return some space to the system. */
1090
d0baac98
PE
1091 size_t info_block = BLOCK (_heapinfo);
1092 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1093 size_t prev_block = _heapinfo[block].free.prev;
1094 size_t prev_blocks = _heapinfo[prev_block].free.size;
1095 size_t next_block = _heapinfo[block].free.next;
1096 size_t next_blocks = _heapinfo[next_block].free.size;
74ad5c7f
KH
1097
1098 if (/* Win if this block being freed is last in core, the info table
1099 is just before it, the previous free block is just before the
1100 info table, and the two free blocks together form a useful
1101 amount to return to the system. */
1102 (block + blocks == _heaplimit &&
1103 info_block + info_blocks == block &&
1104 prev_block != 0 && prev_block + prev_blocks == info_block &&
1105 blocks + prev_blocks >= lesscore_threshold) ||
1106 /* Nope, not the case. We can also win if this block being
1107 freed is just before the info table, and the table extends
1108 to the end of core or is followed only by a free block,
1109 and the total free space is worth returning to the system. */
1110 (block + blocks == info_block &&
1111 ((info_block + info_blocks == _heaplimit &&
1112 blocks >= lesscore_threshold) ||
1113 (info_block + info_blocks == next_block &&
1114 next_block + next_blocks == _heaplimit &&
1115 blocks + next_blocks >= lesscore_threshold)))
1116 )
1117 {
1118 malloc_info *newinfo;
d0baac98 1119 size_t oldlimit = _heaplimit;
74ad5c7f
KH
1120
1121 /* Free the old info table, clearing _heaplimit to avoid
1122 recursion into this code. We don't want to return the
1123 table's blocks to the system before we have copied them to
1124 the new location. */
1125 _heaplimit = 0;
8d0d84d2 1126 _free_internal_nolock (_heapinfo);
74ad5c7f
KH
1127 _heaplimit = oldlimit;
1128
1129 /* Tell malloc to search from the beginning of the heap for
1130 free blocks, so it doesn't reuse the ones just freed. */
1131 _heapindex = 0;
1132
1133 /* Allocate new space for the info table and move its data. */
d0baac98 1134 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
5dcab13e 1135 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1136 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1137 _heapinfo = newinfo;
1138
1139 /* We should now have coalesced the free block with the
1140 blocks freed from the old info table. Examine the entire
1141 trailing free block to decide below whether to return some
1142 to the system. */
1143 block = _heapinfo[0].free.prev;
1144 blocks = _heapinfo[block].free.size;
1145 }
1146
1147 /* Now see if we can return stuff to the system. */
1148 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1149 {
d0baac98 1150 register size_t bytes = blocks * BLOCKSIZE;
74ad5c7f
KH
1151 _heaplimit -= blocks;
1152 (*__morecore) (-bytes);
1153 _heapinfo[_heapinfo[block].free.prev].free.next
1154 = _heapinfo[block].free.next;
1155 _heapinfo[_heapinfo[block].free.next].free.prev
1156 = _heapinfo[block].free.prev;
1157 block = _heapinfo[block].free.prev;
1158 --_chunks_free;
1159 _bytes_free -= bytes;
1160 }
1161 }
1162
1163 /* Set the next search to begin at this block. */
1164 _heapindex = block;
1165 break;
1166
1167 default:
1168 /* Do some of the statistics. */
1169 --_chunks_used;
1170 _bytes_used -= 1 << type;
1171 ++_chunks_free;
1172 _bytes_free += 1 << type;
1173
1174 /* Get the address of the first free fragment in this block. */
1175 prev = (struct list *) ((char *) ADDRESS (block) +
1176 (_heapinfo[block].busy.info.frag.first << type));
1177
1178 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1179 {
1180 /* If all fragments of this block are free, remove them
1181 from the fragment list and free the whole block. */
1182 next = prev;
d0baac98 1183 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
74ad5c7f
KH
1184 next = next->next;
1185 prev->prev->next = next;
1186 if (next != NULL)
1187 next->prev = prev->prev;
1188 _heapinfo[block].busy.type = 0;
1189 _heapinfo[block].busy.info.size = 1;
1190
1191 /* Keep the statistics accurate. */
1192 ++_chunks_used;
1193 _bytes_used += BLOCKSIZE;
1194 _chunks_free -= BLOCKSIZE >> type;
1195 _bytes_free -= BLOCKSIZE;
1196
8d0d84d2
YM
1197#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1198 _free_internal_nolock (ADDRESS (block));
8094989b 1199#else
74ad5c7f 1200 free (ADDRESS (block));
8094989b 1201#endif
74ad5c7f
KH
1202 }
1203 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1204 {
1205 /* If some fragments of this block are free, link this
1206 fragment into the fragment list after the first free
1207 fragment of this block. */
d0baac98 1208 next = ptr;
74ad5c7f
KH
1209 next->next = prev->next;
1210 next->prev = prev;
1211 prev->next = next;
1212 if (next->next != NULL)
1213 next->next->prev = next;
1214 ++_heapinfo[block].busy.info.frag.nfree;
1215 }
1216 else
1217 {
1218 /* No fragments of this block are free, so link this
1219 fragment into the fragment list and announce that
1220 it is the first free fragment of this block. */
d0baac98 1221 prev = ptr;
74ad5c7f 1222 _heapinfo[block].busy.info.frag.nfree = 1;
d0baac98
PE
1223 _heapinfo[block].busy.info.frag.first =
1224 (uintptr_t) ptr % BLOCKSIZE >> type;
74ad5c7f
KH
1225 prev->next = _fraghead[type].next;
1226 prev->prev = &_fraghead[type];
1227 prev->prev->next = prev;
1228 if (prev->next != NULL)
1229 prev->next->prev = prev;
1230 }
1231 break;
1232 }
177c0ea7 1233
5dcab13e 1234 PROTECT_MALLOC_STATE (1);
8d0d84d2
YM
1235}
1236
1237/* Return memory to the heap.
1238 Like `free' but don't call a __free_hook if there is one. */
1239void
d0baac98 1240_free_internal (void *ptr)
8d0d84d2
YM
1241{
1242 LOCK ();
1243 _free_internal_nolock (ptr);
2f213514 1244 UNLOCK ();
74ad5c7f
KH
1245}
1246
1247/* Return memory to the heap. */
ca9c0567 1248
4624371d 1249void
d0baac98 1250free (void *ptr)
74ad5c7f 1251{
d0baac98 1252 void (*hook) (void *) = __free_hook;
8d0d84d2
YM
1253
1254 if (hook != NULL)
1255 (*hook) (ptr);
74ad5c7f
KH
1256 else
1257 _free_internal (ptr);
1258}
1259
1260/* Define the `cfree' alias for `free'. */
1261#ifdef weak_alias
1262weak_alias (free, cfree)
1263#else
1264void
d0baac98 1265cfree (void *ptr)
74ad5c7f
KH
1266{
1267 free (ptr);
1268}
1269#endif
1270/* Change the size of a block allocated by `malloc'.
1271 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1272 Written May 1989 by Mike Haertel.
1273
1274This library is free software; you can redistribute it and/or
423a1f3c 1275modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1276published by the Free Software Foundation; either version 2 of the
1277License, or (at your option) any later version.
1278
1279This library is distributed in the hope that it will be useful,
1280but WITHOUT ANY WARRANTY; without even the implied warranty of
1281MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1282General Public License for more details.
74ad5c7f 1283
423a1f3c
JB
1284You should have received a copy of the GNU General Public
1285License along with this library; see the file COPYING. If
3ef97fb6
LK
1286not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1287Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1288
1289 The author may be reached (Email) at the address mike@ai.mit.edu,
1290 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1291
74ad5c7f
KH
1292#define min(A, B) ((A) < (B) ? (A) : (B))
1293
a4579d33
KB
1294/* On Cygwin the dumped emacs may try to realloc storage allocated in
1295 the static heap. We just malloc space in the new heap and copy the
1296 data. */
1297#ifdef CYGWIN
d0baac98
PE
1298void *
1299special_realloc (void *ptr, size_t size)
a4579d33 1300{
d0baac98 1301 void *result;
a4579d33 1302 int type;
d0baac98 1303 size_t block, oldsize;
a4579d33
KB
1304
1305 block = ((char *) ptr - bss_sbrk_heapbase) / BLOCKSIZE + 1;
1306 type = bss_sbrk_heapinfo[block].busy.type;
1307 oldsize =
1308 type == 0 ? bss_sbrk_heapinfo[block].busy.info.size * BLOCKSIZE
d0baac98 1309 : (size_t) 1 << type;
a4579d33
KB
1310 result = _malloc_internal_nolock (size);
1311 if (result != NULL)
1312 memcpy (result, ptr, min (oldsize, size));
1313 return result;
1314}
1315#endif
1316
74ad5c7f 1317/* Debugging hook for realloc. */
d0baac98 1318void *(*__realloc_hook) (void *ptr, size_t size);
74ad5c7f
KH
1319
1320/* Resize the given region to the new size, returning a pointer
1321 to the (possibly moved) region. This is optimized for speed;
1322 some benchmarks seem to indicate that greater compactness is
1323 achieved by unconditionally allocating and copying to a
1324 new region. This module has incestuous knowledge of the
1325 internals of both free and malloc. */
d0baac98
PE
1326void *
1327_realloc_internal_nolock (void *ptr, size_t size)
74ad5c7f 1328{
d0baac98 1329 void *result;
74ad5c7f 1330 int type;
d0baac98 1331 size_t block, blocks, oldlimit;
74ad5c7f
KH
1332
1333 if (size == 0)
1334 {
8d0d84d2
YM
1335 _free_internal_nolock (ptr);
1336 return _malloc_internal_nolock (0);
74ad5c7f
KH
1337 }
1338 else if (ptr == NULL)
8d0d84d2 1339 return _malloc_internal_nolock (size);
74ad5c7f 1340
a4579d33 1341#ifdef CYGWIN
1b170bc6 1342 if ((char *) ptr < _heapbase)
a4579d33
KB
1343 /* ptr points into the static heap */
1344 return special_realloc (ptr, size);
1345#endif
1346
74ad5c7f
KH
1347 block = BLOCK (ptr);
1348
5dcab13e 1349 PROTECT_MALLOC_STATE (0);
177c0ea7 1350
74ad5c7f
KH
1351 type = _heapinfo[block].busy.type;
1352 switch (type)
1353 {
1354 case 0:
1355 /* Maybe reallocate a large block to a small fragment. */
1356 if (size <= BLOCKSIZE / 2)
1357 {
8d0d84d2 1358 result = _malloc_internal_nolock (size);
74ad5c7f
KH
1359 if (result != NULL)
1360 {
1361 memcpy (result, ptr, size);
8d0d84d2 1362 _free_internal_nolock (ptr);
2f213514 1363 goto out;
74ad5c7f
KH
1364 }
1365 }
1366
1367 /* The new size is a large allocation as well;
1368 see if we can hold it in place. */
1369 blocks = BLOCKIFY (size);
1370 if (blocks < _heapinfo[block].busy.info.size)
1371 {
1372 /* The new size is smaller; return
1373 excess memory to the free list. */
1374 _heapinfo[block + blocks].busy.type = 0;
1375 _heapinfo[block + blocks].busy.info.size
1376 = _heapinfo[block].busy.info.size - blocks;
1377 _heapinfo[block].busy.info.size = blocks;
1378 /* We have just created a new chunk by splitting a chunk in two.
1379 Now we will free this chunk; increment the statistics counter
1380 so it doesn't become wrong when _free_internal decrements it. */
1381 ++_chunks_used;
8d0d84d2 1382 _free_internal_nolock (ADDRESS (block + blocks));
74ad5c7f
KH
1383 result = ptr;
1384 }
1385 else if (blocks == _heapinfo[block].busy.info.size)
1386 /* No size change necessary. */
1387 result = ptr;
1388 else
1389 {
1390 /* Won't fit, so allocate a new region that will.
1391 Free the old region first in case there is sufficient
1392 adjacent free space to grow without moving. */
1393 blocks = _heapinfo[block].busy.info.size;
1394 /* Prevent free from actually returning memory to the system. */
1395 oldlimit = _heaplimit;
1396 _heaplimit = 0;
8d0d84d2
YM
1397 _free_internal_nolock (ptr);
1398 result = _malloc_internal_nolock (size);
5dcab13e 1399 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1400 if (_heaplimit == 0)
1401 _heaplimit = oldlimit;
1402 if (result == NULL)
1403 {
1404 /* Now we're really in trouble. We have to unfree
1405 the thing we just freed. Unfortunately it might
1406 have been coalesced with its neighbors. */
1407 if (_heapindex == block)
8d0d84d2 1408 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
74ad5c7f
KH
1409 else
1410 {
d0baac98 1411 void *previous
8d0d84d2
YM
1412 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1413 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1414 _free_internal_nolock (previous);
74ad5c7f 1415 }
2f213514 1416 goto out;
74ad5c7f
KH
1417 }
1418 if (ptr != result)
1419 memmove (result, ptr, blocks * BLOCKSIZE);
1420 }
1421 break;
1422
1423 default:
1424 /* Old size is a fragment; type is logarithm
1425 to base two of the fragment size. */
d0baac98
PE
1426 if (size > (size_t) (1 << (type - 1)) &&
1427 size <= (size_t) (1 << type))
74ad5c7f
KH
1428 /* The new size is the same kind of fragment. */
1429 result = ptr;
1430 else
1431 {
1432 /* The new size is different; allocate a new space,
1433 and copy the lesser of the new size and the old. */
8d0d84d2 1434 result = _malloc_internal_nolock (size);
74ad5c7f 1435 if (result == NULL)
2f213514 1436 goto out;
d0baac98 1437 memcpy (result, ptr, min (size, (size_t) 1 << type));
8d0d84d2 1438 _free_internal_nolock (ptr);
74ad5c7f
KH
1439 }
1440 break;
1441 }
1442
5dcab13e 1443 PROTECT_MALLOC_STATE (1);
2f213514 1444 out:
8d0d84d2
YM
1445 return result;
1446}
1447
d0baac98
PE
1448void *
1449_realloc_internal (void *ptr, size_t size)
8d0d84d2 1450{
d0baac98 1451 void *result;
8d0d84d2 1452
5e617bc2 1453 LOCK ();
8d0d84d2 1454 result = _realloc_internal_nolock (ptr, size);
2f213514 1455 UNLOCK ();
8d0d84d2 1456
74ad5c7f
KH
1457 return result;
1458}
1459
d0baac98
PE
1460void *
1461realloc (void *ptr, size_t size)
74ad5c7f 1462{
d0baac98 1463 void *(*hook) (void *, size_t);
8d0d84d2 1464
74ad5c7f
KH
1465 if (!__malloc_initialized && !__malloc_initialize ())
1466 return NULL;
1467
8d0d84d2
YM
1468 hook = __realloc_hook;
1469 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
74ad5c7f
KH
1470}
1471/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1472
1473This library is free software; you can redistribute it and/or
423a1f3c 1474modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1475published by the Free Software Foundation; either version 2 of the
1476License, or (at your option) any later version.
1477
1478This library is distributed in the hope that it will be useful,
1479but WITHOUT ANY WARRANTY; without even the implied warranty of
1480MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1481General Public License for more details.
74ad5c7f 1482
423a1f3c
JB
1483You should have received a copy of the GNU General Public
1484License along with this library; see the file COPYING. If
3ef97fb6
LK
1485not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1486Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1487
1488 The author may be reached (Email) at the address mike@ai.mit.edu,
1489 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1490
74ad5c7f
KH
1491/* Allocate an array of NMEMB elements each SIZE bytes long.
1492 The entire array is initialized to zeros. */
d0baac98
PE
1493void *
1494calloc (register size_t nmemb, register size_t size)
74ad5c7f 1495{
d0baac98 1496 register void *result = malloc (nmemb * size);
74ad5c7f
KH
1497
1498 if (result != NULL)
1499 (void) memset (result, 0, nmemb * size);
1500
1501 return result;
1502}
1503/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1504This file is part of the GNU C Library.
1505
1506The GNU C Library is free software; you can redistribute it and/or modify
1507it under the terms of the GNU General Public License as published by
1508the Free Software Foundation; either version 2, or (at your option)
1509any later version.
1510
1511The GNU C Library is distributed in the hope that it will be useful,
1512but WITHOUT ANY WARRANTY; without even the implied warranty of
1513MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1514GNU General Public License for more details.
1515
1516You should have received a copy of the GNU General Public License
1517along with the GNU C Library; see the file COPYING. If not, write to
3ef97fb6
LK
1518the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1519MA 02110-1301, USA. */
74ad5c7f 1520
65f451d0
DN
1521/* uClibc defines __GNU_LIBRARY__, but it is not completely
1522 compatible. */
5e617bc2 1523#if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
74ad5c7f 1524#define __sbrk sbrk
65f451d0 1525#else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1526/* It is best not to declare this and cast its result on foreign operating
1527 systems with potentially hostile include files. */
1528
d0baac98 1529extern void *__sbrk (ptrdiff_t increment);
65f451d0 1530#endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f 1531
74ad5c7f
KH
1532/* Allocate INCREMENT more bytes of data space,
1533 and return the start of data space, or NULL on errors.
1534 If INCREMENT is negative, shrink data space. */
d0baac98
PE
1535void *
1536__default_morecore (ptrdiff_t increment)
74ad5c7f 1537{
d0baac98 1538 void *result;
5e617bc2 1539#if defined (CYGWIN)
ef6d1039
SM
1540 if (!bss_sbrk_did_unexec)
1541 {
1542 return bss_sbrk (increment);
1543 }
1544#endif
d0baac98
PE
1545 result = (void *) __sbrk (increment);
1546 if (result == (void *) -1)
74ad5c7f
KH
1547 return NULL;
1548 return result;
1549}
1550/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1551
1552This library is free software; you can redistribute it and/or
423a1f3c 1553modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1554published by the Free Software Foundation; either version 2 of the
1555License, or (at your option) any later version.
1556
1557This library is distributed in the hope that it will be useful,
1558but WITHOUT ANY WARRANTY; without even the implied warranty of
1559MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1560General Public License for more details.
74ad5c7f 1561
423a1f3c
JB
1562You should have received a copy of the GNU General Public
1563License along with this library; see the file COPYING. If
3ef97fb6
LK
1564not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1565Fifth Floor, Boston, MA 02110-1301, USA. */
74ad5c7f 1566
d0baac98 1567void *(*__memalign_hook) (size_t size, size_t alignment);
74ad5c7f 1568
d0baac98
PE
1569void *
1570memalign (size_t alignment, size_t size)
74ad5c7f 1571{
d0baac98
PE
1572 void *result;
1573 size_t adj, lastadj;
1574 void *(*hook) (size_t, size_t) = __memalign_hook;
74ad5c7f 1575
8d0d84d2
YM
1576 if (hook)
1577 return (*hook) (alignment, size);
74ad5c7f
KH
1578
1579 /* Allocate a block with enough extra space to pad the block with up to
1580 (ALIGNMENT - 1) bytes if necessary. */
1581 result = malloc (size + alignment - 1);
1582 if (result == NULL)
1583 return NULL;
1584
1585 /* Figure out how much we will need to pad this particular block
1586 to achieve the required alignment. */
d0baac98 1587 adj = (uintptr_t) result % alignment;
74ad5c7f
KH
1588
1589 do
1590 {
1591 /* Reallocate the block with only as much excess as it needs. */
1592 free (result);
1593 result = malloc (adj + size);
1594 if (result == NULL) /* Impossible unless interrupted. */
1595 return NULL;
1596
1597 lastadj = adj;
d0baac98 1598 adj = (uintptr_t) result % alignment;
74ad5c7f
KH
1599 /* It's conceivable we might have been so unlucky as to get a
1600 different block with weaker alignment. If so, this block is too
1601 short to contain SIZE after alignment correction. So we must
1602 try again and get another block, slightly larger. */
1603 } while (adj > lastadj);
1604
1605 if (adj != 0)
1606 {
1607 /* Record this block in the list of aligned blocks, so that `free'
1608 can identify the pointer it is passed, which will be in the middle
1609 of an allocated block. */
1610
1611 struct alignlist *l;
8d0d84d2 1612 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1613 for (l = _aligned_blocks; l != NULL; l = l->next)
1614 if (l->aligned == NULL)
1615 /* This slot is free. Use it. */
1616 break;
1617 if (l == NULL)
1618 {
38182d90 1619 l = malloc (sizeof *l);
8d0d84d2 1620 if (l != NULL)
74ad5c7f 1621 {
8d0d84d2
YM
1622 l->next = _aligned_blocks;
1623 _aligned_blocks = l;
74ad5c7f 1624 }
74ad5c7f 1625 }
8d0d84d2
YM
1626 if (l != NULL)
1627 {
1628 l->exact = result;
1629 result = l->aligned = (char *) result + alignment - adj;
1630 }
1631 UNLOCK_ALIGNED_BLOCKS ();
1632 if (l == NULL)
1633 {
1634 free (result);
1635 result = NULL;
1636 }
74ad5c7f
KH
1637 }
1638
1639 return result;
1640}
1641
72359c32
YM
1642#ifndef ENOMEM
1643#define ENOMEM 12
1644#endif
1645
1646#ifndef EINVAL
1647#define EINVAL 22
1648#endif
1649
1650int
d0baac98 1651posix_memalign (void **memptr, size_t alignment, size_t size)
72359c32 1652{
d0baac98 1653 void *mem;
72359c32
YM
1654
1655 if (alignment == 0
d0baac98 1656 || alignment % sizeof (void *) != 0
72359c32
YM
1657 || (alignment & (alignment - 1)) != 0)
1658 return EINVAL;
1659
1660 mem = memalign (alignment, size);
1661 if (mem == NULL)
1662 return ENOMEM;
1663
1664 *memptr = mem;
1665
1666 return 0;
1667}
1668
74ad5c7f
KH
1669/* Allocate memory on a page boundary.
1670 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1671
1672This library is free software; you can redistribute it and/or
423a1f3c 1673modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1674published by the Free Software Foundation; either version 2 of the
1675License, or (at your option) any later version.
1676
1677This library is distributed in the hope that it will be useful,
1678but WITHOUT ANY WARRANTY; without even the implied warranty of
1679MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1680General Public License for more details.
74ad5c7f 1681
423a1f3c
JB
1682You should have received a copy of the GNU General Public
1683License along with this library; see the file COPYING. If
3ef97fb6
LK
1684not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1685Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1686
1687 The author may be reached (Email) at the address mike@ai.mit.edu,
1688 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1689
d0baac98
PE
1690/* Allocate SIZE bytes on a page boundary. */
1691extern void *valloc (size_t);
74ad5c7f 1692
d0baac98
PE
1693#if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1694# include "getpagesize.h"
1695#elif !defined getpagesize
1696extern int getpagesize (void);
74ad5c7f
KH
1697#endif
1698
d0baac98 1699static size_t pagesize;
74ad5c7f 1700
d0baac98
PE
1701void *
1702valloc (size_t size)
74ad5c7f
KH
1703{
1704 if (pagesize == 0)
d0baac98 1705 pagesize = getpagesize ();
74ad5c7f
KH
1706
1707 return memalign (pagesize, size);
1708}
1709
a3ba27da
GM
1710#ifdef GC_MCHECK
1711
1712/* Standard debugging hooks for `malloc'.
1713 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1714 Written May 1989 by Mike Haertel.
1715
1716This library is free software; you can redistribute it and/or
423a1f3c 1717modify it under the terms of the GNU General Public License as
a3ba27da
GM
1718published by the Free Software Foundation; either version 2 of the
1719License, or (at your option) any later version.
1720
1721This library is distributed in the hope that it will be useful,
1722but WITHOUT ANY WARRANTY; without even the implied warranty of
1723MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1724General Public License for more details.
a3ba27da 1725
423a1f3c
JB
1726You should have received a copy of the GNU General Public
1727License along with this library; see the file COPYING. If
3ef97fb6
LK
1728not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1729Fifth Floor, Boston, MA 02110-1301, USA.
a3ba27da
GM
1730
1731 The author may be reached (Email) at the address mike@ai.mit.edu,
1732 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1733
a3ba27da 1734#include <stdio.h>
a3ba27da
GM
1735
1736/* Old hook values. */
d0baac98
PE
1737static void (*old_free_hook) (void *ptr);
1738static void *(*old_malloc_hook) (size_t size);
1739static void *(*old_realloc_hook) (void *ptr, size_t size);
a3ba27da
GM
1740
1741/* Function to call when something awful happens. */
f57e2426 1742static void (*abortfunc) (enum mcheck_status);
a3ba27da
GM
1743
1744/* Arbitrary magical numbers. */
d0baac98
PE
1745#define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1746#define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
a3ba27da
GM
1747#define MAGICBYTE ((char) 0xd7)
1748#define MALLOCFLOOD ((char) 0x93)
1749#define FREEFLOOD ((char) 0x95)
1750
1751struct hdr
1752 {
d0baac98
PE
1753 size_t size; /* Exact size requested by user. */
1754 size_t magic; /* Magic number to check header integrity. */
a3ba27da
GM
1755 };
1756
a3ba27da 1757static enum mcheck_status
d0baac98 1758checkhdr (const struct hdr *hdr)
a3ba27da
GM
1759{
1760 enum mcheck_status status;
1761 switch (hdr->magic)
1762 {
1763 default:
1764 status = MCHECK_HEAD;
1765 break;
1766 case MAGICFREE:
1767 status = MCHECK_FREE;
1768 break;
1769 case MAGICWORD:
1770 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1771 status = MCHECK_TAIL;
1772 else
1773 status = MCHECK_OK;
1774 break;
1775 }
1776 if (status != MCHECK_OK)
1777 (*abortfunc) (status);
1778 return status;
1779}
1780
a3ba27da 1781static void
d0baac98 1782freehook (void *ptr)
a3ba27da
GM
1783{
1784 struct hdr *hdr;
177c0ea7 1785
a3ba27da
GM
1786 if (ptr)
1787 {
1788 hdr = ((struct hdr *) ptr) - 1;
1789 checkhdr (hdr);
1790 hdr->magic = MAGICFREE;
0e926e56 1791 memset (ptr, FREEFLOOD, hdr->size);
a3ba27da
GM
1792 }
1793 else
1794 hdr = NULL;
177c0ea7 1795
a3ba27da
GM
1796 __free_hook = old_free_hook;
1797 free (hdr);
1798 __free_hook = freehook;
1799}
1800
d0baac98
PE
1801static void *
1802mallochook (size_t size)
a3ba27da
GM
1803{
1804 struct hdr *hdr;
1805
1806 __malloc_hook = old_malloc_hook;
38182d90 1807 hdr = malloc (sizeof *hdr + size + 1);
a3ba27da
GM
1808 __malloc_hook = mallochook;
1809 if (hdr == NULL)
1810 return NULL;
1811
1812 hdr->size = size;
1813 hdr->magic = MAGICWORD;
1814 ((char *) &hdr[1])[size] = MAGICBYTE;
d0baac98
PE
1815 memset (hdr + 1, MALLOCFLOOD, size);
1816 return hdr + 1;
a3ba27da
GM
1817}
1818
d0baac98
PE
1819static void *
1820reallochook (void *ptr, size_t size)
a3ba27da
GM
1821{
1822 struct hdr *hdr = NULL;
d0baac98 1823 size_t osize = 0;
177c0ea7 1824
a3ba27da
GM
1825 if (ptr)
1826 {
1827 hdr = ((struct hdr *) ptr) - 1;
1828 osize = hdr->size;
1829
1830 checkhdr (hdr);
1831 if (size < osize)
0e926e56 1832 memset ((char *) ptr + size, FREEFLOOD, osize - size);
a3ba27da 1833 }
177c0ea7 1834
a3ba27da
GM
1835 __free_hook = old_free_hook;
1836 __malloc_hook = old_malloc_hook;
1837 __realloc_hook = old_realloc_hook;
38182d90 1838 hdr = realloc (hdr, sizeof *hdr + size + 1);
a3ba27da
GM
1839 __free_hook = freehook;
1840 __malloc_hook = mallochook;
1841 __realloc_hook = reallochook;
1842 if (hdr == NULL)
1843 return NULL;
1844
1845 hdr->size = size;
1846 hdr->magic = MAGICWORD;
1847 ((char *) &hdr[1])[size] = MAGICBYTE;
1848 if (size > osize)
0e926e56 1849 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
d0baac98 1850 return hdr + 1;
a3ba27da
GM
1851}
1852
1853static void
d0baac98 1854mabort (enum mcheck_status status)
a3ba27da
GM
1855{
1856 const char *msg;
1857 switch (status)
1858 {
1859 case MCHECK_OK:
1860 msg = "memory is consistent, library is buggy";
1861 break;
1862 case MCHECK_HEAD:
1863 msg = "memory clobbered before allocated block";
1864 break;
1865 case MCHECK_TAIL:
1866 msg = "memory clobbered past end of allocated block";
1867 break;
1868 case MCHECK_FREE:
1869 msg = "block freed twice";
1870 break;
1871 default:
1872 msg = "bogus mcheck_status, library is buggy";
1873 break;
1874 }
1875#ifdef __GNU_LIBRARY__
1876 __libc_fatal (msg);
1877#else
1878 fprintf (stderr, "mcheck: %s\n", msg);
1879 fflush (stderr);
1880 abort ();
1881#endif
1882}
1883
1884static int mcheck_used = 0;
1885
1886int
d0baac98 1887mcheck (void (*func) (enum mcheck_status))
a3ba27da
GM
1888{
1889 abortfunc = (func != NULL) ? func : &mabort;
1890
1891 /* These hooks may not be safely inserted if malloc is already in use. */
1892 if (!__malloc_initialized && !mcheck_used)
1893 {
1894 old_free_hook = __free_hook;
1895 __free_hook = freehook;
1896 old_malloc_hook = __malloc_hook;
1897 __malloc_hook = mallochook;
1898 old_realloc_hook = __realloc_hook;
1899 __realloc_hook = reallochook;
1900 mcheck_used = 1;
1901 }
1902
1903 return mcheck_used ? 0 : -1;
1904}
1905
1906enum mcheck_status
d0baac98 1907mprobe (void *ptr)
a3ba27da
GM
1908{
1909 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1910}
1911
1912#endif /* GC_MCHECK */