Fix bug #9221 with memory leak in bidi display.
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f
KH
1/* This file is no longer automatically generated from libc. */
2
3#define _MALLOC_INTERNAL
4
5/* The malloc headers and source files from the C library follow here. */
6
7/* Declarations for `malloc' and friends.
0b5538bd 8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
4e6835db 9 2005, 2006, 2007 Free Software Foundation, Inc.
74ad5c7f
KH
10 Written May 1989 by Mike Haertel.
11
12This library is free software; you can redistribute it and/or
423a1f3c 13modify it under the terms of the GNU General Public License as
74ad5c7f
KH
14published by the Free Software Foundation; either version 2 of the
15License, or (at your option) any later version.
16
17This library is distributed in the hope that it will be useful,
18but WITHOUT ANY WARRANTY; without even the implied warranty of
19MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 20General Public License for more details.
74ad5c7f 21
423a1f3c
JB
22You should have received a copy of the GNU General Public
23License along with this library; see the file COPYING. If
3ef97fb6
LK
24not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
26
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
29
30#ifndef _MALLOC_H
31
32#define _MALLOC_H 1
33
34#ifdef _MALLOC_INTERNAL
35
36#ifdef HAVE_CONFIG_H
37#include <config.h>
38#endif
39
ae9e757a 40#ifdef HAVE_PTHREAD
8d0d84d2
YM
41#define USE_PTHREAD
42#endif
43
0a27e8ed
RS
44#undef PP
45#define PP(args) args
74ad5c7f
KH
46#undef __ptr_t
47#define __ptr_t void *
74ad5c7f 48
74ad5c7f 49#include <string.h>
74ad5c7f 50#include <limits.h>
74ad5c7f 51#include <unistd.h>
74ad5c7f 52
2f213514
YM
53#ifdef USE_PTHREAD
54#include <pthread.h>
55#endif
56
74ad5c7f
KH
57#endif /* _MALLOC_INTERNAL. */
58
59
60#ifdef __cplusplus
61extern "C"
62{
63#endif
64
74ad5c7f
KH
65#include <stddef.h>
66#define __malloc_size_t size_t
67#define __malloc_ptrdiff_t ptrdiff_t
74ad5c7f
KH
68
69
70/* Allocate SIZE bytes of memory. */
0a27e8ed 71extern __ptr_t malloc PP ((__malloc_size_t __size));
74ad5c7f
KH
72/* Re-allocate the previously allocated block
73 in __ptr_t, making the new block SIZE bytes long. */
0a27e8ed 74extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f 75/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
0a27e8ed 76extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
74ad5c7f 77/* Free a block allocated by `malloc', `realloc' or `calloc'. */
4624371d 78extern void free PP ((__ptr_t __ptr));
74ad5c7f
KH
79
80/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
ed68db4d 81#if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
0a27e8ed
RS
82extern __ptr_t memalign PP ((__malloc_size_t __alignment,
83 __malloc_size_t __size));
72359c32
YM
84extern int posix_memalign PP ((__ptr_t *, __malloc_size_t,
85 __malloc_size_t size));
74ad5c7f
KH
86#endif
87
88/* Allocate SIZE bytes on a page boundary. */
89#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
0a27e8ed 90extern __ptr_t valloc PP ((__malloc_size_t __size));
74ad5c7f
KH
91#endif
92
3ceeb306
YM
93#ifdef USE_PTHREAD
94/* Set up mutexes and make malloc etc. thread-safe. */
95extern void malloc_enable_thread PP ((void));
96#endif
74ad5c7f
KH
97
98#ifdef _MALLOC_INTERNAL
99
100/* The allocator divides the heap into blocks of fixed size; large
101 requests receive one or more whole blocks, and small requests
102 receive a fragment of a block. Fragment sizes are powers of two,
103 and all fragments of a block are the same size. When all the
104 fragments in a block have been freed, the block itself is freed. */
105#define INT_BIT (CHAR_BIT * sizeof(int))
106#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
107#define BLOCKSIZE (1 << BLOCKLOG)
108#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
109
110/* Determine the amount of memory spanned by the initial heap table
111 (not an absolute limit). */
112#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
113
114/* Number of contiguous free blocks allowed to build up at the end of
115 memory before they will be returned to the system. */
116#define FINAL_FREE_BLOCKS 8
117
118/* Data structure giving per-block information. */
119typedef union
120 {
121 /* Heap information for a busy block. */
122 struct
123 {
124 /* Zero for a large (multiblock) object, or positive giving the
125 logarithm to the base two of the fragment size. */
126 int type;
127 union
128 {
129 struct
130 {
131 __malloc_size_t nfree; /* Free frags in a fragmented block. */
132 __malloc_size_t first; /* First free fragment of the block. */
133 } frag;
134 /* For a large object, in its first block, this has the number
135 of blocks in the object. In the other blocks, this has a
136 negative number which says how far back the first block is. */
137 __malloc_ptrdiff_t size;
138 } info;
139 } busy;
140 /* Heap information for a free block
141 (that may be the first of a free cluster). */
142 struct
143 {
144 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
145 __malloc_size_t next; /* Index of next free cluster. */
146 __malloc_size_t prev; /* Index of previous free cluster. */
147 } free;
148 } malloc_info;
149
150/* Pointer to first block of the heap. */
151extern char *_heapbase;
152
153/* Table indexed by block number giving per-block information. */
154extern malloc_info *_heapinfo;
155
156/* Address to block number and vice versa. */
157#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
158#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
159
160/* Current search index for the heap table. */
161extern __malloc_size_t _heapindex;
162
163/* Limit of valid info table indices. */
164extern __malloc_size_t _heaplimit;
165
166/* Doubly linked lists of free fragments. */
167struct list
168 {
169 struct list *next;
170 struct list *prev;
171 };
172
173/* Free list headers for each fragment size. */
174extern struct list _fraghead[];
175
176/* List of blocks allocated with `memalign' (or `valloc'). */
177struct alignlist
178 {
179 struct alignlist *next;
180 __ptr_t aligned; /* The address that memaligned returned. */
181 __ptr_t exact; /* The address that malloc returned. */
182 };
183extern struct alignlist *_aligned_blocks;
184
185/* Instrumentation. */
186extern __malloc_size_t _chunks_used;
187extern __malloc_size_t _bytes_used;
188extern __malloc_size_t _chunks_free;
189extern __malloc_size_t _bytes_free;
190
191/* Internal versions of `malloc', `realloc', and `free'
192 used when these functions need to call each other.
193 They are the same but don't call the hooks. */
0a27e8ed
RS
194extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
195extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
196extern void _free_internal PP ((__ptr_t __ptr));
8d0d84d2
YM
197extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
198extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
199extern void _free_internal_nolock PP ((__ptr_t __ptr));
74ad5c7f 200
2f213514 201#ifdef USE_PTHREAD
8d0d84d2 202extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
3ceeb306
YM
203extern int _malloc_thread_enabled_p;
204#define LOCK() \
205 do { \
206 if (_malloc_thread_enabled_p) \
207 pthread_mutex_lock (&_malloc_mutex); \
208 } while (0)
209#define UNLOCK() \
210 do { \
211 if (_malloc_thread_enabled_p) \
212 pthread_mutex_unlock (&_malloc_mutex); \
213 } while (0)
214#define LOCK_ALIGNED_BLOCKS() \
215 do { \
216 if (_malloc_thread_enabled_p) \
217 pthread_mutex_lock (&_aligned_blocks_mutex); \
218 } while (0)
219#define UNLOCK_ALIGNED_BLOCKS() \
220 do { \
221 if (_malloc_thread_enabled_p) \
222 pthread_mutex_unlock (&_aligned_blocks_mutex); \
223 } while (0)
2f213514
YM
224#else
225#define LOCK()
226#define UNLOCK()
8d0d84d2
YM
227#define LOCK_ALIGNED_BLOCKS()
228#define UNLOCK_ALIGNED_BLOCKS()
2f213514
YM
229#endif
230
74ad5c7f
KH
231#endif /* _MALLOC_INTERNAL. */
232
233/* Given an address in the middle of a malloc'd object,
234 return the address of the beginning of the object. */
0a27e8ed 235extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
74ad5c7f
KH
236
237/* Underlying allocation function; successive calls should
238 return contiguous pieces of memory. */
0a27e8ed 239extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
240
241/* Default value of `__morecore'. */
0a27e8ed 242extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
243
244/* If not NULL, this function is called after each time
245 `__morecore' is called to increase the data size. */
0a27e8ed 246extern void (*__after_morecore_hook) PP ((void));
74ad5c7f
KH
247
248/* Number of extra blocks to get each time we ask for more core.
249 This reduces the frequency of calling `(*__morecore)'. */
250extern __malloc_size_t __malloc_extra_blocks;
251
252/* Nonzero if `malloc' has been called and done its initialization. */
253extern int __malloc_initialized;
254/* Function called to initialize malloc data structures. */
0a27e8ed 255extern int __malloc_initialize PP ((void));
74ad5c7f
KH
256
257/* Hooks for debugging versions. */
0a27e8ed
RS
258extern void (*__malloc_initialize_hook) PP ((void));
259extern void (*__free_hook) PP ((__ptr_t __ptr));
260extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
261extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
262extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
263 __malloc_size_t __alignment));
74ad5c7f
KH
264
265/* Return values for `mprobe': these are the kinds of inconsistencies that
266 `mcheck' enables detection of. */
267enum mcheck_status
268 {
269 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
270 MCHECK_OK, /* Block is fine. */
271 MCHECK_FREE, /* Block freed twice. */
272 MCHECK_HEAD, /* Memory before the block was clobbered. */
273 MCHECK_TAIL /* Memory after the block was clobbered. */
274 };
275
276/* Activate a standard collection of debugging hooks. This must be called
277 before `malloc' is ever called. ABORTFUNC is called with an error code
278 (see enum above) when an inconsistency is detected. If ABORTFUNC is
279 null, the standard function prints on stderr and then calls `abort'. */
0a27e8ed 280extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
74ad5c7f
KH
281
282/* Check for aberrations in a particular malloc'd block. You must have
283 called `mcheck' already. These are the same checks that `mcheck' does
284 when you free or reallocate a block. */
0a27e8ed 285extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
74ad5c7f
KH
286
287/* Activate a standard collection of tracing hooks. */
0a27e8ed
RS
288extern void mtrace PP ((void));
289extern void muntrace PP ((void));
74ad5c7f
KH
290
291/* Statistics available to the user. */
292struct mstats
293 {
294 __malloc_size_t bytes_total; /* Total size of the heap. */
295 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
296 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
297 __malloc_size_t chunks_free; /* Chunks in the free list. */
298 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
299 };
300
301/* Pick up the current statistics. */
0a27e8ed 302extern struct mstats mstats PP ((void));
74ad5c7f
KH
303
304/* Call WARNFUN with a warning message when memory usage is high. */
0a27e8ed
RS
305extern void memory_warnings PP ((__ptr_t __start,
306 void (*__warnfun) PP ((const char *))));
74ad5c7f
KH
307
308
309/* Relocating allocator. */
310
311/* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
0a27e8ed 312extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
313
314/* Free the storage allocated in HANDLEPTR. */
0a27e8ed 315extern void r_alloc_free PP ((__ptr_t *__handleptr));
74ad5c7f
KH
316
317/* Adjust the block at HANDLEPTR to be SIZE bytes long. */
0a27e8ed 318extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
319
320
321#ifdef __cplusplus
322}
323#endif
324
325#endif /* malloc.h */
326/* Memory allocator `malloc'.
327 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
328 Written May 1989 by Mike Haertel.
329
330This library is free software; you can redistribute it and/or
423a1f3c 331modify it under the terms of the GNU General Public License as
74ad5c7f
KH
332published by the Free Software Foundation; either version 2 of the
333License, or (at your option) any later version.
334
335This library is distributed in the hope that it will be useful,
336but WITHOUT ANY WARRANTY; without even the implied warranty of
337MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 338General Public License for more details.
74ad5c7f 339
423a1f3c
JB
340You should have received a copy of the GNU General Public
341License along with this library; see the file COPYING. If
3ef97fb6
LK
342not, write to the Free Software Foundation, Inc., 51 Franklin Street,
343Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
344
345 The author may be reached (Email) at the address mike@ai.mit.edu,
346 or (US mail) as Mike Haertel c/o Free Software Foundation. */
347
348#ifndef _MALLOC_INTERNAL
349#define _MALLOC_INTERNAL
350#include <malloc.h>
351#endif
352#include <errno.h>
353
354/* How to really get more memory. */
ef6d1039
SM
355#if defined(CYGWIN)
356extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
357extern int bss_sbrk_did_unexec;
358#endif
3cacba85 359__ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
74ad5c7f
KH
360
361/* Debugging hook for `malloc'. */
0a27e8ed 362__ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
74ad5c7f
KH
363
364/* Pointer to the base of the first block. */
365char *_heapbase;
366
367/* Block information table. Allocated with align/__free (not malloc/free). */
368malloc_info *_heapinfo;
369
370/* Number of info entries. */
371static __malloc_size_t heapsize;
372
373/* Search index in the info table. */
374__malloc_size_t _heapindex;
375
376/* Limit of valid info table indices. */
377__malloc_size_t _heaplimit;
378
379/* Free lists for each fragment size. */
380struct list _fraghead[BLOCKLOG];
381
382/* Instrumentation. */
383__malloc_size_t _chunks_used;
384__malloc_size_t _bytes_used;
385__malloc_size_t _chunks_free;
386__malloc_size_t _bytes_free;
387
388/* Are you experienced? */
389int __malloc_initialized;
390
391__malloc_size_t __malloc_extra_blocks;
392
0a27e8ed
RS
393void (*__malloc_initialize_hook) PP ((void));
394void (*__after_morecore_hook) PP ((void));
74ad5c7f 395
5dcab13e
GM
396#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
397
398/* Some code for hunting a bug writing into _heapinfo.
399
400 Call this macro with argument PROT non-zero to protect internal
401 malloc state against writing to it, call it with a zero argument to
402 make it readable and writable.
403
404 Note that this only works if BLOCKSIZE == page size, which is
405 the case on the i386. */
406
407#include <sys/types.h>
408#include <sys/mman.h>
409
410static int state_protected_p;
411static __malloc_size_t last_state_size;
412static malloc_info *last_heapinfo;
413
414void
415protect_malloc_state (protect_p)
416 int protect_p;
417{
418 /* If _heapinfo has been relocated, make sure its old location
419 isn't left read-only; it will be reused by malloc. */
420 if (_heapinfo != last_heapinfo
421 && last_heapinfo
422 && state_protected_p)
423 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
424
425 last_state_size = _heaplimit * sizeof *_heapinfo;
426 last_heapinfo = _heapinfo;
177c0ea7 427
5dcab13e
GM
428 if (protect_p != state_protected_p)
429 {
430 state_protected_p = protect_p;
431 if (mprotect (_heapinfo, last_state_size,
432 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
433 abort ();
434 }
435}
436
437#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
438
439#else
440#define PROTECT_MALLOC_STATE(PROT) /* empty */
441#endif
442
74ad5c7f
KH
443
444/* Aligned allocation. */
0a27e8ed 445static __ptr_t align PP ((__malloc_size_t));
74ad5c7f
KH
446static __ptr_t
447align (size)
448 __malloc_size_t size;
449{
450 __ptr_t result;
451 unsigned long int adj;
452
ceeb3d7d
EZ
453 /* align accepts an unsigned argument, but __morecore accepts a
454 signed one. This could lead to trouble if SIZE overflows a
455 signed int type accepted by __morecore. We just punt in that
456 case, since they are requesting a ludicrous amount anyway. */
457 if ((__malloc_ptrdiff_t)size < 0)
458 result = 0;
459 else
460 result = (*__morecore) (size);
74ad5c7f
KH
461 adj = (unsigned long int) ((unsigned long int) ((char *) result -
462 (char *) NULL)) % BLOCKSIZE;
463 if (adj != 0)
464 {
465 __ptr_t new;
466 adj = BLOCKSIZE - adj;
467 new = (*__morecore) (adj);
468 result = (char *) result + adj;
469 }
470
471 if (__after_morecore_hook)
472 (*__after_morecore_hook) ();
473
474 return result;
475}
476
477/* Get SIZE bytes, if we can get them starting at END.
478 Return the address of the space we got.
479 If we cannot get space at END, fail and return 0. */
0a27e8ed 480static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
74ad5c7f
KH
481static __ptr_t
482get_contiguous_space (size, position)
483 __malloc_ptrdiff_t size;
484 __ptr_t position;
485{
486 __ptr_t before;
487 __ptr_t after;
488
489 before = (*__morecore) (0);
490 /* If we can tell in advance that the break is at the wrong place,
491 fail now. */
492 if (before != position)
493 return 0;
494
495 /* Allocate SIZE bytes and get the address of them. */
496 after = (*__morecore) (size);
497 if (!after)
498 return 0;
499
500 /* It was not contiguous--reject it. */
501 if (after != position)
502 {
503 (*__morecore) (- size);
504 return 0;
505 }
506
507 return after;
508}
509
510
511/* This is called when `_heapinfo' and `heapsize' have just
512 been set to describe a new info table. Set up the table
513 to describe itself and account for it in the statistics. */
55d4c1b2
PE
514static inline void
515register_heapinfo (void)
74ad5c7f
KH
516{
517 __malloc_size_t block, blocks;
518
519 block = BLOCK (_heapinfo);
520 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
521
522 /* Account for the _heapinfo block itself in the statistics. */
523 _bytes_used += blocks * BLOCKSIZE;
524 ++_chunks_used;
525
526 /* Describe the heapinfo block itself in the heapinfo. */
527 _heapinfo[block].busy.type = 0;
528 _heapinfo[block].busy.info.size = blocks;
529 /* Leave back-pointers for malloc_find_address. */
530 while (--blocks > 0)
531 _heapinfo[block + blocks].busy.info.size = -blocks;
532}
533
2f213514 534#ifdef USE_PTHREAD
8d0d84d2
YM
535pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
536pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
3ceeb306
YM
537int _malloc_thread_enabled_p;
538
539static void
540malloc_atfork_handler_prepare ()
541{
542 LOCK ();
543 LOCK_ALIGNED_BLOCKS ();
544}
545
546static void
547malloc_atfork_handler_parent ()
548{
549 UNLOCK_ALIGNED_BLOCKS ();
550 UNLOCK ();
551}
552
553static void
554malloc_atfork_handler_child ()
555{
556 UNLOCK_ALIGNED_BLOCKS ();
557 UNLOCK ();
558}
559
560/* Set up mutexes and make malloc etc. thread-safe. */
561void
562malloc_enable_thread ()
563{
564 if (_malloc_thread_enabled_p)
565 return;
566
567 /* Some pthread implementations call malloc for statically
568 initialized mutexes when they are used first. To avoid such a
569 situation, we initialize mutexes here while their use is
570 disabled in malloc etc. */
571 pthread_mutex_init (&_malloc_mutex, NULL);
572 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
573 pthread_atfork (malloc_atfork_handler_prepare,
574 malloc_atfork_handler_parent,
575 malloc_atfork_handler_child);
576 _malloc_thread_enabled_p = 1;
577}
2f213514 578#endif
74ad5c7f 579
2f213514
YM
580static void
581malloc_initialize_1 ()
582{
a3ba27da
GM
583#ifdef GC_MCHECK
584 mcheck (NULL);
585#endif
586
74ad5c7f
KH
587 if (__malloc_initialize_hook)
588 (*__malloc_initialize_hook) ();
589
590 heapsize = HEAP / BLOCKSIZE;
591 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
592 if (_heapinfo == NULL)
2f213514 593 return;
74ad5c7f
KH
594 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
595 _heapinfo[0].free.size = 0;
596 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
597 _heapindex = 0;
598 _heapbase = (char *) _heapinfo;
599 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
600
601 register_heapinfo ();
602
603 __malloc_initialized = 1;
5dcab13e 604 PROTECT_MALLOC_STATE (1);
2f213514
YM
605 return;
606}
607
784c1472
JD
608/* Set everything up and remember that we have.
609 main will call malloc which calls this function. That is before any threads
610 or signal handlers has been set up, so we don't need thread protection. */
2f213514
YM
611int
612__malloc_initialize ()
613{
2f213514
YM
614 if (__malloc_initialized)
615 return 0;
616
617 malloc_initialize_1 ();
2f213514
YM
618
619 return __malloc_initialized;
74ad5c7f
KH
620}
621
622static int morecore_recursing;
623
624/* Get neatly aligned memory, initializing or
625 growing the heap info table as necessary. */
8d0d84d2 626static __ptr_t morecore_nolock PP ((__malloc_size_t));
74ad5c7f 627static __ptr_t
8d0d84d2 628morecore_nolock (size)
74ad5c7f
KH
629 __malloc_size_t size;
630{
631 __ptr_t result;
632 malloc_info *newinfo, *oldinfo;
633 __malloc_size_t newsize;
634
635 if (morecore_recursing)
636 /* Avoid recursion. The caller will know how to handle a null return. */
637 return NULL;
638
639 result = align (size);
640 if (result == NULL)
641 return NULL;
642
5dcab13e
GM
643 PROTECT_MALLOC_STATE (0);
644
74ad5c7f
KH
645 /* Check if we need to grow the info table. */
646 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
647 {
648 /* Calculate the new _heapinfo table size. We do not account for the
649 added blocks in the table itself, as we hope to place them in
650 existing free space, which is already covered by part of the
651 existing table. */
652 newsize = heapsize;
653 do
654 newsize *= 2;
655 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
656
657 /* We must not reuse existing core for the new info table when called
658 from realloc in the case of growing a large block, because the
659 block being grown is momentarily marked as free. In this case
660 _heaplimit is zero so we know not to reuse space for internal
661 allocation. */
662 if (_heaplimit != 0)
663 {
664 /* First try to allocate the new info table in core we already
665 have, in the usual way using realloc. If realloc cannot
666 extend it in place or relocate it to existing sufficient core,
667 we will get called again, and the code above will notice the
668 `morecore_recursing' flag and return null. */
669 int save = errno; /* Don't want to clobber errno with ENOMEM. */
670 morecore_recursing = 1;
8d0d84d2 671 newinfo = (malloc_info *) _realloc_internal_nolock
74ad5c7f
KH
672 (_heapinfo, newsize * sizeof (malloc_info));
673 morecore_recursing = 0;
674 if (newinfo == NULL)
675 errno = save;
676 else
677 {
678 /* We found some space in core, and realloc has put the old
679 table's blocks on the free list. Now zero the new part
680 of the table and install the new table location. */
681 memset (&newinfo[heapsize], 0,
682 (newsize - heapsize) * sizeof (malloc_info));
683 _heapinfo = newinfo;
684 heapsize = newsize;
685 goto got_heap;
686 }
687 }
688
689 /* Allocate new space for the malloc info table. */
690 while (1)
691 {
692 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
693
694 /* Did it fail? */
695 if (newinfo == NULL)
696 {
697 (*__morecore) (-size);
698 return NULL;
699 }
700
701 /* Is it big enough to record status for its own space?
702 If so, we win. */
703 if ((__malloc_size_t) BLOCK ((char *) newinfo
704 + newsize * sizeof (malloc_info))
705 < newsize)
706 break;
707
708 /* Must try again. First give back most of what we just got. */
709 (*__morecore) (- newsize * sizeof (malloc_info));
710 newsize *= 2;
711 }
712
713 /* Copy the old table to the beginning of the new,
714 and zero the rest of the new table. */
715 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
716 memset (&newinfo[heapsize], 0,
717 (newsize - heapsize) * sizeof (malloc_info));
718 oldinfo = _heapinfo;
719 _heapinfo = newinfo;
720 heapsize = newsize;
721
722 register_heapinfo ();
723
724 /* Reset _heaplimit so _free_internal never decides
725 it can relocate or resize the info table. */
726 _heaplimit = 0;
8d0d84d2 727 _free_internal_nolock (oldinfo);
5dcab13e 728 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
729
730 /* The new heap limit includes the new table just allocated. */
731 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
732 return result;
733 }
734
735 got_heap:
736 _heaplimit = BLOCK ((char *) result + size);
737 return result;
738}
739
740/* Allocate memory from the heap. */
741__ptr_t
8d0d84d2 742_malloc_internal_nolock (size)
74ad5c7f
KH
743 __malloc_size_t size;
744{
745 __ptr_t result;
746 __malloc_size_t block, blocks, lastblocks, start;
747 register __malloc_size_t i;
748 struct list *next;
749
750 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
751 valid address you can realloc and free (though not dereference).
752
753 It turns out that some extant code (sunrpc, at least Ultrix's version)
754 expects `malloc (0)' to return non-NULL and breaks otherwise.
755 Be compatible. */
756
757#if 0
758 if (size == 0)
759 return NULL;
760#endif
761
5dcab13e
GM
762 PROTECT_MALLOC_STATE (0);
763
74ad5c7f
KH
764 if (size < sizeof (struct list))
765 size = sizeof (struct list);
766
74ad5c7f
KH
767 /* Determine the allocation policy based on the request size. */
768 if (size <= BLOCKSIZE / 2)
769 {
770 /* Small allocation to receive a fragment of a block.
771 Determine the logarithm to base two of the fragment size. */
772 register __malloc_size_t log = 1;
773 --size;
774 while ((size /= 2) != 0)
775 ++log;
776
777 /* Look in the fragment lists for a
778 free fragment of the desired size. */
779 next = _fraghead[log].next;
780 if (next != NULL)
781 {
782 /* There are free fragments of this size.
783 Pop a fragment out of the fragment list and return it.
784 Update the block's nfree and first counters. */
785 result = (__ptr_t) next;
786 next->prev->next = next->next;
787 if (next->next != NULL)
788 next->next->prev = next->prev;
789 block = BLOCK (result);
790 if (--_heapinfo[block].busy.info.frag.nfree != 0)
791 _heapinfo[block].busy.info.frag.first = (unsigned long int)
792 ((unsigned long int) ((char *) next->next - (char *) NULL)
793 % BLOCKSIZE) >> log;
794
795 /* Update the statistics. */
796 ++_chunks_used;
797 _bytes_used += 1 << log;
798 --_chunks_free;
799 _bytes_free -= 1 << log;
800 }
801 else
802 {
803 /* No free fragments of the desired size, so get a new block
804 and break it into fragments, returning the first. */
8094989b 805#ifdef GC_MALLOC_CHECK
8d0d84d2 806 result = _malloc_internal_nolock (BLOCKSIZE);
5dcab13e 807 PROTECT_MALLOC_STATE (0);
8d0d84d2
YM
808#elif defined (USE_PTHREAD)
809 result = _malloc_internal_nolock (BLOCKSIZE);
8094989b 810#else
74ad5c7f 811 result = malloc (BLOCKSIZE);
8094989b 812#endif
74ad5c7f 813 if (result == NULL)
5dcab13e
GM
814 {
815 PROTECT_MALLOC_STATE (1);
2f213514 816 goto out;
5dcab13e 817 }
74ad5c7f
KH
818
819 /* Link all fragments but the first into the free list. */
820 next = (struct list *) ((char *) result + (1 << log));
821 next->next = NULL;
822 next->prev = &_fraghead[log];
823 _fraghead[log].next = next;
824
825 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
826 {
827 next = (struct list *) ((char *) result + (i << log));
828 next->next = _fraghead[log].next;
829 next->prev = &_fraghead[log];
830 next->prev->next = next;
831 next->next->prev = next;
832 }
833
834 /* Initialize the nfree and first counters for this block. */
835 block = BLOCK (result);
836 _heapinfo[block].busy.type = log;
837 _heapinfo[block].busy.info.frag.nfree = i - 1;
838 _heapinfo[block].busy.info.frag.first = i - 1;
839
840 _chunks_free += (BLOCKSIZE >> log) - 1;
841 _bytes_free += BLOCKSIZE - (1 << log);
842 _bytes_used -= BLOCKSIZE - (1 << log);
843 }
844 }
845 else
846 {
847 /* Large allocation to receive one or more blocks.
848 Search the free list in a circle starting at the last place visited.
849 If we loop completely around without finding a large enough
850 space we will have to get more memory from the system. */
851 blocks = BLOCKIFY (size);
852 start = block = _heapindex;
853 while (_heapinfo[block].free.size < blocks)
854 {
855 block = _heapinfo[block].free.next;
856 if (block == start)
857 {
858 /* Need to get more from the system. Get a little extra. */
859 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
860 block = _heapinfo[0].free.prev;
861 lastblocks = _heapinfo[block].free.size;
862 /* Check to see if the new core will be contiguous with the
863 final free block; if so we don't need to get as much. */
864 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
865 /* We can't do this if we will have to make the heap info
cc4a96c6 866 table bigger to accommodate the new space. */
74ad5c7f
KH
867 block + wantblocks <= heapsize &&
868 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
869 ADDRESS (block + lastblocks)))
870 {
871 /* We got it contiguously. Which block we are extending
872 (the `final free block' referred to above) might have
873 changed, if it got combined with a freed info table. */
874 block = _heapinfo[0].free.prev;
875 _heapinfo[block].free.size += (wantblocks - lastblocks);
876 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
877 _heaplimit += wantblocks - lastblocks;
878 continue;
879 }
8d0d84d2 880 result = morecore_nolock (wantblocks * BLOCKSIZE);
74ad5c7f 881 if (result == NULL)
2f213514 882 goto out;
74ad5c7f
KH
883 block = BLOCK (result);
884 /* Put the new block at the end of the free list. */
885 _heapinfo[block].free.size = wantblocks;
886 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
887 _heapinfo[block].free.next = 0;
888 _heapinfo[0].free.prev = block;
889 _heapinfo[_heapinfo[block].free.prev].free.next = block;
890 ++_chunks_free;
891 /* Now loop to use some of that block for this allocation. */
892 }
893 }
894
895 /* At this point we have found a suitable free list entry.
896 Figure out how to remove what we need from the list. */
897 result = ADDRESS (block);
898 if (_heapinfo[block].free.size > blocks)
899 {
900 /* The block we found has a bit left over,
901 so relink the tail end back into the free list. */
902 _heapinfo[block + blocks].free.size
903 = _heapinfo[block].free.size - blocks;
904 _heapinfo[block + blocks].free.next
905 = _heapinfo[block].free.next;
906 _heapinfo[block + blocks].free.prev
907 = _heapinfo[block].free.prev;
908 _heapinfo[_heapinfo[block].free.prev].free.next
909 = _heapinfo[_heapinfo[block].free.next].free.prev
910 = _heapindex = block + blocks;
911 }
912 else
913 {
914 /* The block exactly matches our requirements,
915 so just remove it from the list. */
916 _heapinfo[_heapinfo[block].free.next].free.prev
917 = _heapinfo[block].free.prev;
918 _heapinfo[_heapinfo[block].free.prev].free.next
919 = _heapindex = _heapinfo[block].free.next;
920 --_chunks_free;
921 }
922
923 _heapinfo[block].busy.type = 0;
924 _heapinfo[block].busy.info.size = blocks;
925 ++_chunks_used;
926 _bytes_used += blocks * BLOCKSIZE;
927 _bytes_free -= blocks * BLOCKSIZE;
928
929 /* Mark all the blocks of the object just allocated except for the
930 first with a negative number so you can find the first block by
931 adding that adjustment. */
932 while (--blocks > 0)
933 _heapinfo[block + blocks].busy.info.size = -blocks;
934 }
935
5dcab13e 936 PROTECT_MALLOC_STATE (1);
2f213514 937 out:
8d0d84d2
YM
938 return result;
939}
940
941__ptr_t
942_malloc_internal (size)
943 __malloc_size_t size;
944{
945 __ptr_t result;
946
947 LOCK ();
948 result = _malloc_internal_nolock (size);
2f213514 949 UNLOCK ();
8d0d84d2 950
74ad5c7f
KH
951 return result;
952}
953
954__ptr_t
955malloc (size)
956 __malloc_size_t size;
957{
8d0d84d2
YM
958 __ptr_t (*hook) (__malloc_size_t);
959
74ad5c7f
KH
960 if (!__malloc_initialized && !__malloc_initialize ())
961 return NULL;
962
8d0d84d2
YM
963 /* Copy the value of __malloc_hook to an automatic variable in case
964 __malloc_hook is modified in another thread between its
965 NULL-check and the use.
966
967 Note: Strictly speaking, this is not a right solution. We should
968 use mutexes to access non-read-only variables that are shared
969 among multiple threads. We just leave it for compatibility with
970 glibc malloc (i.e., assignments to __malloc_hook) for now. */
971 hook = __malloc_hook;
972 return (hook != NULL ? *hook : _malloc_internal) (size);
74ad5c7f
KH
973}
974\f
975#ifndef _LIBC
976
977/* On some ANSI C systems, some libc functions call _malloc, _free
978 and _realloc. Make them use the GNU functions. */
979
980__ptr_t
981_malloc (size)
982 __malloc_size_t size;
983{
984 return malloc (size);
985}
986
987void
988_free (ptr)
989 __ptr_t ptr;
990{
991 free (ptr);
992}
993
994__ptr_t
995_realloc (ptr, size)
996 __ptr_t ptr;
997 __malloc_size_t size;
998{
999 return realloc (ptr, size);
1000}
1001
1002#endif
1003/* Free a block of memory allocated by `malloc'.
1004 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1005 Written May 1989 by Mike Haertel.
1006
1007This library is free software; you can redistribute it and/or
423a1f3c 1008modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1009published by the Free Software Foundation; either version 2 of the
1010License, or (at your option) any later version.
1011
1012This library is distributed in the hope that it will be useful,
1013but WITHOUT ANY WARRANTY; without even the implied warranty of
1014MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1015General Public License for more details.
74ad5c7f 1016
423a1f3c
JB
1017You should have received a copy of the GNU General Public
1018License along with this library; see the file COPYING. If
3ef97fb6
LK
1019not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1020Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1021
1022 The author may be reached (Email) at the address mike@ai.mit.edu,
1023 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1024
1025#ifndef _MALLOC_INTERNAL
1026#define _MALLOC_INTERNAL
1027#include <malloc.h>
1028#endif
1029
1030
74ad5c7f 1031/* Debugging hook for free. */
0a27e8ed 1032void (*__free_hook) PP ((__ptr_t __ptr));
74ad5c7f
KH
1033
1034/* List of blocks allocated by memalign. */
1035struct alignlist *_aligned_blocks = NULL;
1036
1037/* Return memory to the heap.
8d0d84d2 1038 Like `_free_internal' but don't lock mutex. */
74ad5c7f 1039void
8d0d84d2 1040_free_internal_nolock (ptr)
74ad5c7f
KH
1041 __ptr_t ptr;
1042{
1043 int type;
1044 __malloc_size_t block, blocks;
1045 register __malloc_size_t i;
1046 struct list *prev, *next;
1047 __ptr_t curbrk;
1048 const __malloc_size_t lesscore_threshold
1049 /* Threshold of free space at which we will return some to the system. */
1050 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1051
1052 register struct alignlist *l;
1053
1054 if (ptr == NULL)
1055 return;
1056
5dcab13e 1057 PROTECT_MALLOC_STATE (0);
177c0ea7 1058
8d0d84d2 1059 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1060 for (l = _aligned_blocks; l != NULL; l = l->next)
1061 if (l->aligned == ptr)
1062 {
1063 l->aligned = NULL; /* Mark the slot in the list as free. */
1064 ptr = l->exact;
1065 break;
1066 }
8d0d84d2 1067 UNLOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1068
1069 block = BLOCK (ptr);
1070
1071 type = _heapinfo[block].busy.type;
1072 switch (type)
1073 {
1074 case 0:
1075 /* Get as many statistics as early as we can. */
1076 --_chunks_used;
1077 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1078 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1079
1080 /* Find the free cluster previous to this one in the free list.
1081 Start searching at the last block referenced; this may benefit
1082 programs with locality of allocation. */
1083 i = _heapindex;
1084 if (i > block)
1085 while (i > block)
1086 i = _heapinfo[i].free.prev;
1087 else
1088 {
1089 do
1090 i = _heapinfo[i].free.next;
1091 while (i > 0 && i < block);
1092 i = _heapinfo[i].free.prev;
1093 }
1094
1095 /* Determine how to link this block into the free list. */
1096 if (block == i + _heapinfo[i].free.size)
1097 {
1098 /* Coalesce this block with its predecessor. */
1099 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1100 block = i;
1101 }
1102 else
1103 {
1104 /* Really link this block back into the free list. */
1105 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1106 _heapinfo[block].free.next = _heapinfo[i].free.next;
1107 _heapinfo[block].free.prev = i;
1108 _heapinfo[i].free.next = block;
1109 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1110 ++_chunks_free;
1111 }
1112
1113 /* Now that the block is linked in, see if we can coalesce it
1114 with its successor (by deleting its successor from the list
1115 and adding in its size). */
1116 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1117 {
1118 _heapinfo[block].free.size
1119 += _heapinfo[_heapinfo[block].free.next].free.size;
1120 _heapinfo[block].free.next
1121 = _heapinfo[_heapinfo[block].free.next].free.next;
1122 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1123 --_chunks_free;
1124 }
1125
1126 /* How many trailing free blocks are there now? */
1127 blocks = _heapinfo[block].free.size;
1128
1129 /* Where is the current end of accessible core? */
1130 curbrk = (*__morecore) (0);
1131
1132 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1133 {
1134 /* The end of the malloc heap is at the end of accessible core.
1135 It's possible that moving _heapinfo will allow us to
1136 return some space to the system. */
1137
1138 __malloc_size_t info_block = BLOCK (_heapinfo);
1139 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1140 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1141 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1142 __malloc_size_t next_block = _heapinfo[block].free.next;
1143 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1144
1145 if (/* Win if this block being freed is last in core, the info table
1146 is just before it, the previous free block is just before the
1147 info table, and the two free blocks together form a useful
1148 amount to return to the system. */
1149 (block + blocks == _heaplimit &&
1150 info_block + info_blocks == block &&
1151 prev_block != 0 && prev_block + prev_blocks == info_block &&
1152 blocks + prev_blocks >= lesscore_threshold) ||
1153 /* Nope, not the case. We can also win if this block being
1154 freed is just before the info table, and the table extends
1155 to the end of core or is followed only by a free block,
1156 and the total free space is worth returning to the system. */
1157 (block + blocks == info_block &&
1158 ((info_block + info_blocks == _heaplimit &&
1159 blocks >= lesscore_threshold) ||
1160 (info_block + info_blocks == next_block &&
1161 next_block + next_blocks == _heaplimit &&
1162 blocks + next_blocks >= lesscore_threshold)))
1163 )
1164 {
1165 malloc_info *newinfo;
1166 __malloc_size_t oldlimit = _heaplimit;
1167
1168 /* Free the old info table, clearing _heaplimit to avoid
1169 recursion into this code. We don't want to return the
1170 table's blocks to the system before we have copied them to
1171 the new location. */
1172 _heaplimit = 0;
8d0d84d2 1173 _free_internal_nolock (_heapinfo);
74ad5c7f
KH
1174 _heaplimit = oldlimit;
1175
1176 /* Tell malloc to search from the beginning of the heap for
1177 free blocks, so it doesn't reuse the ones just freed. */
1178 _heapindex = 0;
1179
1180 /* Allocate new space for the info table and move its data. */
8d0d84d2
YM
1181 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1182 * BLOCKSIZE);
5dcab13e 1183 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1184 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1185 _heapinfo = newinfo;
1186
1187 /* We should now have coalesced the free block with the
1188 blocks freed from the old info table. Examine the entire
1189 trailing free block to decide below whether to return some
1190 to the system. */
1191 block = _heapinfo[0].free.prev;
1192 blocks = _heapinfo[block].free.size;
1193 }
1194
1195 /* Now see if we can return stuff to the system. */
1196 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1197 {
1198 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1199 _heaplimit -= blocks;
1200 (*__morecore) (-bytes);
1201 _heapinfo[_heapinfo[block].free.prev].free.next
1202 = _heapinfo[block].free.next;
1203 _heapinfo[_heapinfo[block].free.next].free.prev
1204 = _heapinfo[block].free.prev;
1205 block = _heapinfo[block].free.prev;
1206 --_chunks_free;
1207 _bytes_free -= bytes;
1208 }
1209 }
1210
1211 /* Set the next search to begin at this block. */
1212 _heapindex = block;
1213 break;
1214
1215 default:
1216 /* Do some of the statistics. */
1217 --_chunks_used;
1218 _bytes_used -= 1 << type;
1219 ++_chunks_free;
1220 _bytes_free += 1 << type;
1221
1222 /* Get the address of the first free fragment in this block. */
1223 prev = (struct list *) ((char *) ADDRESS (block) +
1224 (_heapinfo[block].busy.info.frag.first << type));
1225
1226 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1227 {
1228 /* If all fragments of this block are free, remove them
1229 from the fragment list and free the whole block. */
1230 next = prev;
1231 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1232 next = next->next;
1233 prev->prev->next = next;
1234 if (next != NULL)
1235 next->prev = prev->prev;
1236 _heapinfo[block].busy.type = 0;
1237 _heapinfo[block].busy.info.size = 1;
1238
1239 /* Keep the statistics accurate. */
1240 ++_chunks_used;
1241 _bytes_used += BLOCKSIZE;
1242 _chunks_free -= BLOCKSIZE >> type;
1243 _bytes_free -= BLOCKSIZE;
1244
8d0d84d2
YM
1245#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1246 _free_internal_nolock (ADDRESS (block));
8094989b 1247#else
74ad5c7f 1248 free (ADDRESS (block));
8094989b 1249#endif
74ad5c7f
KH
1250 }
1251 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1252 {
1253 /* If some fragments of this block are free, link this
1254 fragment into the fragment list after the first free
1255 fragment of this block. */
1256 next = (struct list *) ptr;
1257 next->next = prev->next;
1258 next->prev = prev;
1259 prev->next = next;
1260 if (next->next != NULL)
1261 next->next->prev = next;
1262 ++_heapinfo[block].busy.info.frag.nfree;
1263 }
1264 else
1265 {
1266 /* No fragments of this block are free, so link this
1267 fragment into the fragment list and announce that
1268 it is the first free fragment of this block. */
1269 prev = (struct list *) ptr;
1270 _heapinfo[block].busy.info.frag.nfree = 1;
1271 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1272 ((unsigned long int) ((char *) ptr - (char *) NULL)
1273 % BLOCKSIZE >> type);
1274 prev->next = _fraghead[type].next;
1275 prev->prev = &_fraghead[type];
1276 prev->prev->next = prev;
1277 if (prev->next != NULL)
1278 prev->next->prev = prev;
1279 }
1280 break;
1281 }
177c0ea7 1282
5dcab13e 1283 PROTECT_MALLOC_STATE (1);
8d0d84d2
YM
1284}
1285
1286/* Return memory to the heap.
1287 Like `free' but don't call a __free_hook if there is one. */
1288void
1289_free_internal (ptr)
1290 __ptr_t ptr;
1291{
1292 LOCK ();
1293 _free_internal_nolock (ptr);
2f213514 1294 UNLOCK ();
74ad5c7f
KH
1295}
1296
1297/* Return memory to the heap. */
ca9c0567 1298
4624371d 1299void
74ad5c7f
KH
1300free (ptr)
1301 __ptr_t ptr;
1302{
8d0d84d2
YM
1303 void (*hook) (__ptr_t) = __free_hook;
1304
1305 if (hook != NULL)
1306 (*hook) (ptr);
74ad5c7f
KH
1307 else
1308 _free_internal (ptr);
1309}
1310
1311/* Define the `cfree' alias for `free'. */
1312#ifdef weak_alias
1313weak_alias (free, cfree)
1314#else
1315void
1316cfree (ptr)
1317 __ptr_t ptr;
1318{
1319 free (ptr);
1320}
1321#endif
1322/* Change the size of a block allocated by `malloc'.
1323 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1324 Written May 1989 by Mike Haertel.
1325
1326This library is free software; you can redistribute it and/or
423a1f3c 1327modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1328published by the Free Software Foundation; either version 2 of the
1329License, or (at your option) any later version.
1330
1331This library is distributed in the hope that it will be useful,
1332but WITHOUT ANY WARRANTY; without even the implied warranty of
1333MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1334General Public License for more details.
74ad5c7f 1335
423a1f3c
JB
1336You should have received a copy of the GNU General Public
1337License along with this library; see the file COPYING. If
3ef97fb6
LK
1338not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1339Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1340
1341 The author may be reached (Email) at the address mike@ai.mit.edu,
1342 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1343
1344#ifndef _MALLOC_INTERNAL
1345#define _MALLOC_INTERNAL
1346#include <malloc.h>
1347#endif
1348
1349
74ad5c7f
KH
1350#define min(A, B) ((A) < (B) ? (A) : (B))
1351
1352/* Debugging hook for realloc. */
0a27e8ed 1353__ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f
KH
1354
1355/* Resize the given region to the new size, returning a pointer
1356 to the (possibly moved) region. This is optimized for speed;
1357 some benchmarks seem to indicate that greater compactness is
1358 achieved by unconditionally allocating and copying to a
1359 new region. This module has incestuous knowledge of the
1360 internals of both free and malloc. */
1361__ptr_t
8d0d84d2 1362_realloc_internal_nolock (ptr, size)
74ad5c7f
KH
1363 __ptr_t ptr;
1364 __malloc_size_t size;
1365{
1366 __ptr_t result;
1367 int type;
1368 __malloc_size_t block, blocks, oldlimit;
1369
1370 if (size == 0)
1371 {
8d0d84d2
YM
1372 _free_internal_nolock (ptr);
1373 return _malloc_internal_nolock (0);
74ad5c7f
KH
1374 }
1375 else if (ptr == NULL)
8d0d84d2 1376 return _malloc_internal_nolock (size);
74ad5c7f
KH
1377
1378 block = BLOCK (ptr);
1379
5dcab13e 1380 PROTECT_MALLOC_STATE (0);
177c0ea7 1381
74ad5c7f
KH
1382 type = _heapinfo[block].busy.type;
1383 switch (type)
1384 {
1385 case 0:
1386 /* Maybe reallocate a large block to a small fragment. */
1387 if (size <= BLOCKSIZE / 2)
1388 {
8d0d84d2 1389 result = _malloc_internal_nolock (size);
74ad5c7f
KH
1390 if (result != NULL)
1391 {
1392 memcpy (result, ptr, size);
8d0d84d2 1393 _free_internal_nolock (ptr);
2f213514 1394 goto out;
74ad5c7f
KH
1395 }
1396 }
1397
1398 /* The new size is a large allocation as well;
1399 see if we can hold it in place. */
1400 blocks = BLOCKIFY (size);
1401 if (blocks < _heapinfo[block].busy.info.size)
1402 {
1403 /* The new size is smaller; return
1404 excess memory to the free list. */
1405 _heapinfo[block + blocks].busy.type = 0;
1406 _heapinfo[block + blocks].busy.info.size
1407 = _heapinfo[block].busy.info.size - blocks;
1408 _heapinfo[block].busy.info.size = blocks;
1409 /* We have just created a new chunk by splitting a chunk in two.
1410 Now we will free this chunk; increment the statistics counter
1411 so it doesn't become wrong when _free_internal decrements it. */
1412 ++_chunks_used;
8d0d84d2 1413 _free_internal_nolock (ADDRESS (block + blocks));
74ad5c7f
KH
1414 result = ptr;
1415 }
1416 else if (blocks == _heapinfo[block].busy.info.size)
1417 /* No size change necessary. */
1418 result = ptr;
1419 else
1420 {
1421 /* Won't fit, so allocate a new region that will.
1422 Free the old region first in case there is sufficient
1423 adjacent free space to grow without moving. */
1424 blocks = _heapinfo[block].busy.info.size;
1425 /* Prevent free from actually returning memory to the system. */
1426 oldlimit = _heaplimit;
1427 _heaplimit = 0;
8d0d84d2
YM
1428 _free_internal_nolock (ptr);
1429 result = _malloc_internal_nolock (size);
5dcab13e 1430 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1431 if (_heaplimit == 0)
1432 _heaplimit = oldlimit;
1433 if (result == NULL)
1434 {
1435 /* Now we're really in trouble. We have to unfree
1436 the thing we just freed. Unfortunately it might
1437 have been coalesced with its neighbors. */
1438 if (_heapindex == block)
8d0d84d2 1439 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
74ad5c7f
KH
1440 else
1441 {
1442 __ptr_t previous
8d0d84d2
YM
1443 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1444 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1445 _free_internal_nolock (previous);
74ad5c7f 1446 }
2f213514 1447 goto out;
74ad5c7f
KH
1448 }
1449 if (ptr != result)
1450 memmove (result, ptr, blocks * BLOCKSIZE);
1451 }
1452 break;
1453
1454 default:
1455 /* Old size is a fragment; type is logarithm
1456 to base two of the fragment size. */
1457 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1458 size <= (__malloc_size_t) (1 << type))
1459 /* The new size is the same kind of fragment. */
1460 result = ptr;
1461 else
1462 {
1463 /* The new size is different; allocate a new space,
1464 and copy the lesser of the new size and the old. */
8d0d84d2 1465 result = _malloc_internal_nolock (size);
74ad5c7f 1466 if (result == NULL)
2f213514 1467 goto out;
74ad5c7f 1468 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
8d0d84d2 1469 _free_internal_nolock (ptr);
74ad5c7f
KH
1470 }
1471 break;
1472 }
1473
5dcab13e 1474 PROTECT_MALLOC_STATE (1);
2f213514 1475 out:
8d0d84d2
YM
1476 return result;
1477}
1478
1479__ptr_t
1480_realloc_internal (ptr, size)
1481 __ptr_t ptr;
1482 __malloc_size_t size;
1483{
1484 __ptr_t result;
1485
1486 LOCK();
1487 result = _realloc_internal_nolock (ptr, size);
2f213514 1488 UNLOCK ();
8d0d84d2 1489
74ad5c7f
KH
1490 return result;
1491}
1492
1493__ptr_t
1494realloc (ptr, size)
1495 __ptr_t ptr;
1496 __malloc_size_t size;
1497{
8d0d84d2
YM
1498 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1499
74ad5c7f
KH
1500 if (!__malloc_initialized && !__malloc_initialize ())
1501 return NULL;
1502
8d0d84d2
YM
1503 hook = __realloc_hook;
1504 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
74ad5c7f
KH
1505}
1506/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1507
1508This library is free software; you can redistribute it and/or
423a1f3c 1509modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1510published by the Free Software Foundation; either version 2 of the
1511License, or (at your option) any later version.
1512
1513This library is distributed in the hope that it will be useful,
1514but WITHOUT ANY WARRANTY; without even the implied warranty of
1515MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1516General Public License for more details.
74ad5c7f 1517
423a1f3c
JB
1518You should have received a copy of the GNU General Public
1519License along with this library; see the file COPYING. If
3ef97fb6
LK
1520not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1521Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1522
1523 The author may be reached (Email) at the address mike@ai.mit.edu,
1524 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1525
1526#ifndef _MALLOC_INTERNAL
1527#define _MALLOC_INTERNAL
1528#include <malloc.h>
1529#endif
1530
1531/* Allocate an array of NMEMB elements each SIZE bytes long.
1532 The entire array is initialized to zeros. */
1533__ptr_t
1534calloc (nmemb, size)
1535 register __malloc_size_t nmemb;
1536 register __malloc_size_t size;
1537{
1538 register __ptr_t result = malloc (nmemb * size);
1539
1540 if (result != NULL)
1541 (void) memset (result, 0, nmemb * size);
1542
1543 return result;
1544}
1545/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1546This file is part of the GNU C Library.
1547
1548The GNU C Library is free software; you can redistribute it and/or modify
1549it under the terms of the GNU General Public License as published by
1550the Free Software Foundation; either version 2, or (at your option)
1551any later version.
1552
1553The GNU C Library is distributed in the hope that it will be useful,
1554but WITHOUT ANY WARRANTY; without even the implied warranty of
1555MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1556GNU General Public License for more details.
1557
1558You should have received a copy of the GNU General Public License
1559along with the GNU C Library; see the file COPYING. If not, write to
3ef97fb6
LK
1560the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1561MA 02110-1301, USA. */
74ad5c7f
KH
1562
1563#ifndef _MALLOC_INTERNAL
1564#define _MALLOC_INTERNAL
1565#include <malloc.h>
1566#endif
1567
65f451d0
DN
1568/* uClibc defines __GNU_LIBRARY__, but it is not completely
1569 compatible. */
1570#if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
74ad5c7f 1571#define __sbrk sbrk
65f451d0 1572#else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1573/* It is best not to declare this and cast its result on foreign operating
1574 systems with potentially hostile include files. */
1575
1576#include <stddef.h>
0a27e8ed 1577extern __ptr_t __sbrk PP ((ptrdiff_t increment));
65f451d0 1578#endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1579
1580#ifndef NULL
1581#define NULL 0
1582#endif
1583
1584/* Allocate INCREMENT more bytes of data space,
1585 and return the start of data space, or NULL on errors.
1586 If INCREMENT is negative, shrink data space. */
1587__ptr_t
1588__default_morecore (increment)
1589 __malloc_ptrdiff_t increment;
1590{
ef6d1039
SM
1591 __ptr_t result;
1592#if defined(CYGWIN)
1593 if (!bss_sbrk_did_unexec)
1594 {
1595 return bss_sbrk (increment);
1596 }
1597#endif
1598 result = (__ptr_t) __sbrk (increment);
74ad5c7f
KH
1599 if (result == (__ptr_t) -1)
1600 return NULL;
1601 return result;
1602}
1603/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1604
1605This library is free software; you can redistribute it and/or
423a1f3c 1606modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1607published by the Free Software Foundation; either version 2 of the
1608License, or (at your option) any later version.
1609
1610This library is distributed in the hope that it will be useful,
1611but WITHOUT ANY WARRANTY; without even the implied warranty of
1612MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1613General Public License for more details.
74ad5c7f 1614
423a1f3c
JB
1615You should have received a copy of the GNU General Public
1616License along with this library; see the file COPYING. If
3ef97fb6
LK
1617not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1618Fifth Floor, Boston, MA 02110-1301, USA. */
74ad5c7f
KH
1619
1620#ifndef _MALLOC_INTERNAL
1621#define _MALLOC_INTERNAL
1622#include <malloc.h>
1623#endif
1624
eec2d1de
EZ
1625__ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1626 __malloc_size_t __alignment));
74ad5c7f
KH
1627
1628__ptr_t
1629memalign (alignment, size)
1630 __malloc_size_t alignment;
1631 __malloc_size_t size;
1632{
1633 __ptr_t result;
1634 unsigned long int adj, lastadj;
8d0d84d2 1635 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
74ad5c7f 1636
8d0d84d2
YM
1637 if (hook)
1638 return (*hook) (alignment, size);
74ad5c7f
KH
1639
1640 /* Allocate a block with enough extra space to pad the block with up to
1641 (ALIGNMENT - 1) bytes if necessary. */
1642 result = malloc (size + alignment - 1);
1643 if (result == NULL)
1644 return NULL;
1645
1646 /* Figure out how much we will need to pad this particular block
1647 to achieve the required alignment. */
1648 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1649
1650 do
1651 {
1652 /* Reallocate the block with only as much excess as it needs. */
1653 free (result);
1654 result = malloc (adj + size);
1655 if (result == NULL) /* Impossible unless interrupted. */
1656 return NULL;
1657
1658 lastadj = adj;
1659 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1660 /* It's conceivable we might have been so unlucky as to get a
1661 different block with weaker alignment. If so, this block is too
1662 short to contain SIZE after alignment correction. So we must
1663 try again and get another block, slightly larger. */
1664 } while (adj > lastadj);
1665
1666 if (adj != 0)
1667 {
1668 /* Record this block in the list of aligned blocks, so that `free'
1669 can identify the pointer it is passed, which will be in the middle
1670 of an allocated block. */
1671
1672 struct alignlist *l;
8d0d84d2 1673 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1674 for (l = _aligned_blocks; l != NULL; l = l->next)
1675 if (l->aligned == NULL)
1676 /* This slot is free. Use it. */
1677 break;
1678 if (l == NULL)
1679 {
1680 l = (struct alignlist *) malloc (sizeof (struct alignlist));
8d0d84d2 1681 if (l != NULL)
74ad5c7f 1682 {
8d0d84d2
YM
1683 l->next = _aligned_blocks;
1684 _aligned_blocks = l;
74ad5c7f 1685 }
74ad5c7f 1686 }
8d0d84d2
YM
1687 if (l != NULL)
1688 {
1689 l->exact = result;
1690 result = l->aligned = (char *) result + alignment - adj;
1691 }
1692 UNLOCK_ALIGNED_BLOCKS ();
1693 if (l == NULL)
1694 {
1695 free (result);
1696 result = NULL;
1697 }
74ad5c7f
KH
1698 }
1699
1700 return result;
1701}
1702
72359c32
YM
1703#ifndef ENOMEM
1704#define ENOMEM 12
1705#endif
1706
1707#ifndef EINVAL
1708#define EINVAL 22
1709#endif
1710
1711int
1712posix_memalign (memptr, alignment, size)
1713 __ptr_t *memptr;
1714 __malloc_size_t alignment;
1715 __malloc_size_t size;
1716{
1717 __ptr_t mem;
1718
1719 if (alignment == 0
1720 || alignment % sizeof (__ptr_t) != 0
1721 || (alignment & (alignment - 1)) != 0)
1722 return EINVAL;
1723
1724 mem = memalign (alignment, size);
1725 if (mem == NULL)
1726 return ENOMEM;
1727
1728 *memptr = mem;
1729
1730 return 0;
1731}
1732
74ad5c7f
KH
1733/* Allocate memory on a page boundary.
1734 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1735
1736This library is free software; you can redistribute it and/or
423a1f3c 1737modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1738published by the Free Software Foundation; either version 2 of the
1739License, or (at your option) any later version.
1740
1741This library is distributed in the hope that it will be useful,
1742but WITHOUT ANY WARRANTY; without even the implied warranty of
1743MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1744General Public License for more details.
74ad5c7f 1745
423a1f3c
JB
1746You should have received a copy of the GNU General Public
1747License along with this library; see the file COPYING. If
3ef97fb6
LK
1748not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1749Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1750
1751 The author may be reached (Email) at the address mike@ai.mit.edu,
1752 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1753
1754#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1755
1756/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1757 on MSDOS, where it conflicts with a system header file. */
1758
1759#define ELIDE_VALLOC
1760
1761#endif
1762
1763#ifndef ELIDE_VALLOC
1764
1765#if defined (__GNU_LIBRARY__) || defined (_LIBC)
1766#include <stddef.h>
1767#include <sys/cdefs.h>
47582ab3
KH
1768#if defined (__GLIBC__) && __GLIBC__ >= 2
1769/* __getpagesize is already declared in <unistd.h> with return type int */
1770#else
0a27e8ed 1771extern size_t __getpagesize PP ((void));
47582ab3 1772#endif
74ad5c7f
KH
1773#else
1774#include "getpagesize.h"
1775#define __getpagesize() getpagesize()
1776#endif
1777
1778#ifndef _MALLOC_INTERNAL
1779#define _MALLOC_INTERNAL
1780#include <malloc.h>
1781#endif
1782
1783static __malloc_size_t pagesize;
1784
1785__ptr_t
1786valloc (size)
1787 __malloc_size_t size;
1788{
1789 if (pagesize == 0)
1790 pagesize = __getpagesize ();
1791
1792 return memalign (pagesize, size);
1793}
1794
1795#endif /* Not ELIDE_VALLOC. */
a3ba27da
GM
1796
1797#ifdef GC_MCHECK
1798
1799/* Standard debugging hooks for `malloc'.
1800 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1801 Written May 1989 by Mike Haertel.
1802
1803This library is free software; you can redistribute it and/or
423a1f3c 1804modify it under the terms of the GNU General Public License as
a3ba27da
GM
1805published by the Free Software Foundation; either version 2 of the
1806License, or (at your option) any later version.
1807
1808This library is distributed in the hope that it will be useful,
1809but WITHOUT ANY WARRANTY; without even the implied warranty of
1810MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1811General Public License for more details.
a3ba27da 1812
423a1f3c
JB
1813You should have received a copy of the GNU General Public
1814License along with this library; see the file COPYING. If
3ef97fb6
LK
1815not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1816Fifth Floor, Boston, MA 02110-1301, USA.
a3ba27da
GM
1817
1818 The author may be reached (Email) at the address mike@ai.mit.edu,
1819 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1820
1821#ifdef emacs
1822#include <stdio.h>
1823#else
1824#ifndef _MALLOC_INTERNAL
1825#define _MALLOC_INTERNAL
1826#include <malloc.h>
1827#include <stdio.h>
1828#endif
1829#endif
1830
1831/* Old hook values. */
f57e2426
J
1832static void (*old_free_hook) (__ptr_t ptr);
1833static __ptr_t (*old_malloc_hook) (__malloc_size_t size);
1834static __ptr_t (*old_realloc_hook) (__ptr_t ptr, __malloc_size_t size);
a3ba27da
GM
1835
1836/* Function to call when something awful happens. */
f57e2426 1837static void (*abortfunc) (enum mcheck_status);
a3ba27da
GM
1838
1839/* Arbitrary magical numbers. */
1840#define MAGICWORD 0xfedabeeb
1841#define MAGICFREE 0xd8675309
1842#define MAGICBYTE ((char) 0xd7)
1843#define MALLOCFLOOD ((char) 0x93)
1844#define FREEFLOOD ((char) 0x95)
1845
1846struct hdr
1847 {
1848 __malloc_size_t size; /* Exact size requested by user. */
1849 unsigned long int magic; /* Magic number to check header integrity. */
1850 };
1851
f57e2426 1852static enum mcheck_status checkhdr (const struct hdr *);
a3ba27da
GM
1853static enum mcheck_status
1854checkhdr (hdr)
1855 const struct hdr *hdr;
1856{
1857 enum mcheck_status status;
1858 switch (hdr->magic)
1859 {
1860 default:
1861 status = MCHECK_HEAD;
1862 break;
1863 case MAGICFREE:
1864 status = MCHECK_FREE;
1865 break;
1866 case MAGICWORD:
1867 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1868 status = MCHECK_TAIL;
1869 else
1870 status = MCHECK_OK;
1871 break;
1872 }
1873 if (status != MCHECK_OK)
1874 (*abortfunc) (status);
1875 return status;
1876}
1877
f57e2426 1878static void freehook (__ptr_t);
a3ba27da
GM
1879static void
1880freehook (ptr)
1881 __ptr_t ptr;
1882{
1883 struct hdr *hdr;
177c0ea7 1884
a3ba27da
GM
1885 if (ptr)
1886 {
1887 hdr = ((struct hdr *) ptr) - 1;
1888 checkhdr (hdr);
1889 hdr->magic = MAGICFREE;
0e926e56 1890 memset (ptr, FREEFLOOD, hdr->size);
a3ba27da
GM
1891 }
1892 else
1893 hdr = NULL;
177c0ea7 1894
a3ba27da
GM
1895 __free_hook = old_free_hook;
1896 free (hdr);
1897 __free_hook = freehook;
1898}
1899
f57e2426 1900static __ptr_t mallochook (__malloc_size_t);
a3ba27da
GM
1901static __ptr_t
1902mallochook (size)
1903 __malloc_size_t size;
1904{
1905 struct hdr *hdr;
1906
1907 __malloc_hook = old_malloc_hook;
1908 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
1909 __malloc_hook = mallochook;
1910 if (hdr == NULL)
1911 return NULL;
1912
1913 hdr->size = size;
1914 hdr->magic = MAGICWORD;
1915 ((char *) &hdr[1])[size] = MAGICBYTE;
0e926e56 1916 memset ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
a3ba27da
GM
1917 return (__ptr_t) (hdr + 1);
1918}
1919
f57e2426 1920static __ptr_t reallochook (__ptr_t, __malloc_size_t);
a3ba27da
GM
1921static __ptr_t
1922reallochook (ptr, size)
1923 __ptr_t ptr;
1924 __malloc_size_t size;
1925{
1926 struct hdr *hdr = NULL;
1927 __malloc_size_t osize = 0;
177c0ea7 1928
a3ba27da
GM
1929 if (ptr)
1930 {
1931 hdr = ((struct hdr *) ptr) - 1;
1932 osize = hdr->size;
1933
1934 checkhdr (hdr);
1935 if (size < osize)
0e926e56 1936 memset ((char *) ptr + size, FREEFLOOD, osize - size);
a3ba27da 1937 }
177c0ea7 1938
a3ba27da
GM
1939 __free_hook = old_free_hook;
1940 __malloc_hook = old_malloc_hook;
1941 __realloc_hook = old_realloc_hook;
1942 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
1943 __free_hook = freehook;
1944 __malloc_hook = mallochook;
1945 __realloc_hook = reallochook;
1946 if (hdr == NULL)
1947 return NULL;
1948
1949 hdr->size = size;
1950 hdr->magic = MAGICWORD;
1951 ((char *) &hdr[1])[size] = MAGICBYTE;
1952 if (size > osize)
0e926e56 1953 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
a3ba27da
GM
1954 return (__ptr_t) (hdr + 1);
1955}
1956
1957static void
1958mabort (status)
1959 enum mcheck_status status;
1960{
1961 const char *msg;
1962 switch (status)
1963 {
1964 case MCHECK_OK:
1965 msg = "memory is consistent, library is buggy";
1966 break;
1967 case MCHECK_HEAD:
1968 msg = "memory clobbered before allocated block";
1969 break;
1970 case MCHECK_TAIL:
1971 msg = "memory clobbered past end of allocated block";
1972 break;
1973 case MCHECK_FREE:
1974 msg = "block freed twice";
1975 break;
1976 default:
1977 msg = "bogus mcheck_status, library is buggy";
1978 break;
1979 }
1980#ifdef __GNU_LIBRARY__
1981 __libc_fatal (msg);
1982#else
1983 fprintf (stderr, "mcheck: %s\n", msg);
1984 fflush (stderr);
1985 abort ();
1986#endif
1987}
1988
1989static int mcheck_used = 0;
1990
1991int
1992mcheck (func)
f57e2426 1993 void (*func) (enum mcheck_status);
a3ba27da
GM
1994{
1995 abortfunc = (func != NULL) ? func : &mabort;
1996
1997 /* These hooks may not be safely inserted if malloc is already in use. */
1998 if (!__malloc_initialized && !mcheck_used)
1999 {
2000 old_free_hook = __free_hook;
2001 __free_hook = freehook;
2002 old_malloc_hook = __malloc_hook;
2003 __malloc_hook = mallochook;
2004 old_realloc_hook = __realloc_hook;
2005 __realloc_hook = reallochook;
2006 mcheck_used = 1;
2007 }
2008
2009 return mcheck_used ? 0 : -1;
2010}
2011
2012enum mcheck_status
2013mprobe (__ptr_t ptr)
2014{
2015 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2016}
2017
2018#endif /* GC_MCHECK */