[ChangeLog]
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f
KH
1/* This file is no longer automatically generated from libc. */
2
3#define _MALLOC_INTERNAL
4
5/* The malloc headers and source files from the C library follow here. */
6
7/* Declarations for `malloc' and friends.
0b5538bd 8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
4e6835db 9 2005, 2006, 2007 Free Software Foundation, Inc.
74ad5c7f
KH
10 Written May 1989 by Mike Haertel.
11
12This library is free software; you can redistribute it and/or
423a1f3c 13modify it under the terms of the GNU General Public License as
74ad5c7f
KH
14published by the Free Software Foundation; either version 2 of the
15License, or (at your option) any later version.
16
17This library is distributed in the hope that it will be useful,
18but WITHOUT ANY WARRANTY; without even the implied warranty of
19MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 20General Public License for more details.
74ad5c7f 21
423a1f3c
JB
22You should have received a copy of the GNU General Public
23License along with this library; see the file COPYING. If
3ef97fb6
LK
24not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
26
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
29
30#ifndef _MALLOC_H
31
32#define _MALLOC_H 1
33
34#ifdef _MALLOC_INTERNAL
35
36#ifdef HAVE_CONFIG_H
37#include <config.h>
38#endif
39
8d0d84d2
YM
40#ifdef HAVE_GTK_AND_PTHREAD
41#define USE_PTHREAD
42#endif
43
b2e92d3e 44#if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
75934b1d 45 || defined STDC_HEADERS || defined PROTOTYPES))
0a27e8ed
RS
46#undef PP
47#define PP(args) args
74ad5c7f
KH
48#undef __ptr_t
49#define __ptr_t void *
50#else /* Not C++ or ANSI C. */
0a27e8ed
RS
51#undef PP
52#define PP(args) ()
74ad5c7f
KH
53#undef __ptr_t
54#define __ptr_t char *
55#endif /* C++ or ANSI C. */
56
74ad5c7f 57#include <string.h>
74ad5c7f 58
ca9c0567 59#ifdef HAVE_LIMITS_H
74ad5c7f 60#include <limits.h>
ca9c0567 61#endif
74ad5c7f
KH
62#ifndef CHAR_BIT
63#define CHAR_BIT 8
64#endif
74ad5c7f 65
74ad5c7f 66#include <unistd.h>
74ad5c7f 67
2f213514
YM
68#ifdef USE_PTHREAD
69#include <pthread.h>
70#endif
71
74ad5c7f
KH
72#endif /* _MALLOC_INTERNAL. */
73
74
75#ifdef __cplusplus
76extern "C"
77{
78#endif
79
ca9c0567 80#ifdef STDC_HEADERS
74ad5c7f
KH
81#include <stddef.h>
82#define __malloc_size_t size_t
83#define __malloc_ptrdiff_t ptrdiff_t
84#else
eec2d1de
EZ
85#ifdef __GNUC__
86#include <stddef.h>
87#ifdef __SIZE_TYPE__
88#define __malloc_size_t __SIZE_TYPE__
89#endif
90#endif
91#ifndef __malloc_size_t
74ad5c7f 92#define __malloc_size_t unsigned int
eec2d1de 93#endif
74ad5c7f
KH
94#define __malloc_ptrdiff_t int
95#endif
96
97#ifndef NULL
98#define NULL 0
99#endif
100
101
102/* Allocate SIZE bytes of memory. */
0a27e8ed 103extern __ptr_t malloc PP ((__malloc_size_t __size));
74ad5c7f
KH
104/* Re-allocate the previously allocated block
105 in __ptr_t, making the new block SIZE bytes long. */
0a27e8ed 106extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f 107/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
0a27e8ed 108extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
74ad5c7f 109/* Free a block allocated by `malloc', `realloc' or `calloc'. */
4624371d 110extern void free PP ((__ptr_t __ptr));
74ad5c7f
KH
111
112/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
ed68db4d 113#if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
0a27e8ed
RS
114extern __ptr_t memalign PP ((__malloc_size_t __alignment,
115 __malloc_size_t __size));
72359c32
YM
116extern int posix_memalign PP ((__ptr_t *, __malloc_size_t,
117 __malloc_size_t size));
74ad5c7f
KH
118#endif
119
120/* Allocate SIZE bytes on a page boundary. */
121#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
0a27e8ed 122extern __ptr_t valloc PP ((__malloc_size_t __size));
74ad5c7f
KH
123#endif
124
3ceeb306
YM
125#ifdef USE_PTHREAD
126/* Set up mutexes and make malloc etc. thread-safe. */
127extern void malloc_enable_thread PP ((void));
128#endif
74ad5c7f
KH
129
130#ifdef _MALLOC_INTERNAL
131
132/* The allocator divides the heap into blocks of fixed size; large
133 requests receive one or more whole blocks, and small requests
134 receive a fragment of a block. Fragment sizes are powers of two,
135 and all fragments of a block are the same size. When all the
136 fragments in a block have been freed, the block itself is freed. */
137#define INT_BIT (CHAR_BIT * sizeof(int))
138#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
139#define BLOCKSIZE (1 << BLOCKLOG)
140#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
141
142/* Determine the amount of memory spanned by the initial heap table
143 (not an absolute limit). */
144#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
145
146/* Number of contiguous free blocks allowed to build up at the end of
147 memory before they will be returned to the system. */
148#define FINAL_FREE_BLOCKS 8
149
150/* Data structure giving per-block information. */
151typedef union
152 {
153 /* Heap information for a busy block. */
154 struct
155 {
156 /* Zero for a large (multiblock) object, or positive giving the
157 logarithm to the base two of the fragment size. */
158 int type;
159 union
160 {
161 struct
162 {
163 __malloc_size_t nfree; /* Free frags in a fragmented block. */
164 __malloc_size_t first; /* First free fragment of the block. */
165 } frag;
166 /* For a large object, in its first block, this has the number
167 of blocks in the object. In the other blocks, this has a
168 negative number which says how far back the first block is. */
169 __malloc_ptrdiff_t size;
170 } info;
171 } busy;
172 /* Heap information for a free block
173 (that may be the first of a free cluster). */
174 struct
175 {
176 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
177 __malloc_size_t next; /* Index of next free cluster. */
178 __malloc_size_t prev; /* Index of previous free cluster. */
179 } free;
180 } malloc_info;
181
182/* Pointer to first block of the heap. */
183extern char *_heapbase;
184
185/* Table indexed by block number giving per-block information. */
186extern malloc_info *_heapinfo;
187
188/* Address to block number and vice versa. */
189#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
190#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
191
192/* Current search index for the heap table. */
193extern __malloc_size_t _heapindex;
194
195/* Limit of valid info table indices. */
196extern __malloc_size_t _heaplimit;
197
198/* Doubly linked lists of free fragments. */
199struct list
200 {
201 struct list *next;
202 struct list *prev;
203 };
204
205/* Free list headers for each fragment size. */
206extern struct list _fraghead[];
207
208/* List of blocks allocated with `memalign' (or `valloc'). */
209struct alignlist
210 {
211 struct alignlist *next;
212 __ptr_t aligned; /* The address that memaligned returned. */
213 __ptr_t exact; /* The address that malloc returned. */
214 };
215extern struct alignlist *_aligned_blocks;
216
217/* Instrumentation. */
218extern __malloc_size_t _chunks_used;
219extern __malloc_size_t _bytes_used;
220extern __malloc_size_t _chunks_free;
221extern __malloc_size_t _bytes_free;
222
223/* Internal versions of `malloc', `realloc', and `free'
224 used when these functions need to call each other.
225 They are the same but don't call the hooks. */
0a27e8ed
RS
226extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
227extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
228extern void _free_internal PP ((__ptr_t __ptr));
8d0d84d2
YM
229extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
230extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
231extern void _free_internal_nolock PP ((__ptr_t __ptr));
74ad5c7f 232
2f213514 233#ifdef USE_PTHREAD
8d0d84d2 234extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
3ceeb306
YM
235extern int _malloc_thread_enabled_p;
236#define LOCK() \
237 do { \
238 if (_malloc_thread_enabled_p) \
239 pthread_mutex_lock (&_malloc_mutex); \
240 } while (0)
241#define UNLOCK() \
242 do { \
243 if (_malloc_thread_enabled_p) \
244 pthread_mutex_unlock (&_malloc_mutex); \
245 } while (0)
246#define LOCK_ALIGNED_BLOCKS() \
247 do { \
248 if (_malloc_thread_enabled_p) \
249 pthread_mutex_lock (&_aligned_blocks_mutex); \
250 } while (0)
251#define UNLOCK_ALIGNED_BLOCKS() \
252 do { \
253 if (_malloc_thread_enabled_p) \
254 pthread_mutex_unlock (&_aligned_blocks_mutex); \
255 } while (0)
2f213514
YM
256#else
257#define LOCK()
258#define UNLOCK()
8d0d84d2
YM
259#define LOCK_ALIGNED_BLOCKS()
260#define UNLOCK_ALIGNED_BLOCKS()
2f213514
YM
261#endif
262
74ad5c7f
KH
263#endif /* _MALLOC_INTERNAL. */
264
265/* Given an address in the middle of a malloc'd object,
266 return the address of the beginning of the object. */
0a27e8ed 267extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
74ad5c7f
KH
268
269/* Underlying allocation function; successive calls should
270 return contiguous pieces of memory. */
0a27e8ed 271extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
272
273/* Default value of `__morecore'. */
0a27e8ed 274extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
275
276/* If not NULL, this function is called after each time
277 `__morecore' is called to increase the data size. */
0a27e8ed 278extern void (*__after_morecore_hook) PP ((void));
74ad5c7f
KH
279
280/* Number of extra blocks to get each time we ask for more core.
281 This reduces the frequency of calling `(*__morecore)'. */
282extern __malloc_size_t __malloc_extra_blocks;
283
284/* Nonzero if `malloc' has been called and done its initialization. */
285extern int __malloc_initialized;
286/* Function called to initialize malloc data structures. */
0a27e8ed 287extern int __malloc_initialize PP ((void));
74ad5c7f
KH
288
289/* Hooks for debugging versions. */
0a27e8ed
RS
290extern void (*__malloc_initialize_hook) PP ((void));
291extern void (*__free_hook) PP ((__ptr_t __ptr));
292extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
293extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
294extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
295 __malloc_size_t __alignment));
74ad5c7f
KH
296
297/* Return values for `mprobe': these are the kinds of inconsistencies that
298 `mcheck' enables detection of. */
299enum mcheck_status
300 {
301 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
302 MCHECK_OK, /* Block is fine. */
303 MCHECK_FREE, /* Block freed twice. */
304 MCHECK_HEAD, /* Memory before the block was clobbered. */
305 MCHECK_TAIL /* Memory after the block was clobbered. */
306 };
307
308/* Activate a standard collection of debugging hooks. This must be called
309 before `malloc' is ever called. ABORTFUNC is called with an error code
310 (see enum above) when an inconsistency is detected. If ABORTFUNC is
311 null, the standard function prints on stderr and then calls `abort'. */
0a27e8ed 312extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
74ad5c7f
KH
313
314/* Check for aberrations in a particular malloc'd block. You must have
315 called `mcheck' already. These are the same checks that `mcheck' does
316 when you free or reallocate a block. */
0a27e8ed 317extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
74ad5c7f
KH
318
319/* Activate a standard collection of tracing hooks. */
0a27e8ed
RS
320extern void mtrace PP ((void));
321extern void muntrace PP ((void));
74ad5c7f
KH
322
323/* Statistics available to the user. */
324struct mstats
325 {
326 __malloc_size_t bytes_total; /* Total size of the heap. */
327 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
328 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
329 __malloc_size_t chunks_free; /* Chunks in the free list. */
330 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
331 };
332
333/* Pick up the current statistics. */
0a27e8ed 334extern struct mstats mstats PP ((void));
74ad5c7f
KH
335
336/* Call WARNFUN with a warning message when memory usage is high. */
0a27e8ed
RS
337extern void memory_warnings PP ((__ptr_t __start,
338 void (*__warnfun) PP ((const char *))));
74ad5c7f
KH
339
340
341/* Relocating allocator. */
342
343/* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
0a27e8ed 344extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
345
346/* Free the storage allocated in HANDLEPTR. */
0a27e8ed 347extern void r_alloc_free PP ((__ptr_t *__handleptr));
74ad5c7f
KH
348
349/* Adjust the block at HANDLEPTR to be SIZE bytes long. */
0a27e8ed 350extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
351
352
353#ifdef __cplusplus
354}
355#endif
356
357#endif /* malloc.h */
358/* Memory allocator `malloc'.
359 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
360 Written May 1989 by Mike Haertel.
361
362This library is free software; you can redistribute it and/or
423a1f3c 363modify it under the terms of the GNU General Public License as
74ad5c7f
KH
364published by the Free Software Foundation; either version 2 of the
365License, or (at your option) any later version.
366
367This library is distributed in the hope that it will be useful,
368but WITHOUT ANY WARRANTY; without even the implied warranty of
369MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 370General Public License for more details.
74ad5c7f 371
423a1f3c
JB
372You should have received a copy of the GNU General Public
373License along with this library; see the file COPYING. If
3ef97fb6
LK
374not, write to the Free Software Foundation, Inc., 51 Franklin Street,
375Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
376
377 The author may be reached (Email) at the address mike@ai.mit.edu,
378 or (US mail) as Mike Haertel c/o Free Software Foundation. */
379
380#ifndef _MALLOC_INTERNAL
381#define _MALLOC_INTERNAL
382#include <malloc.h>
383#endif
384#include <errno.h>
385
386/* How to really get more memory. */
ef6d1039
SM
387#if defined(CYGWIN)
388extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
389extern int bss_sbrk_did_unexec;
390#endif
3cacba85 391__ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
74ad5c7f
KH
392
393/* Debugging hook for `malloc'. */
0a27e8ed 394__ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
74ad5c7f
KH
395
396/* Pointer to the base of the first block. */
397char *_heapbase;
398
399/* Block information table. Allocated with align/__free (not malloc/free). */
400malloc_info *_heapinfo;
401
402/* Number of info entries. */
403static __malloc_size_t heapsize;
404
405/* Search index in the info table. */
406__malloc_size_t _heapindex;
407
408/* Limit of valid info table indices. */
409__malloc_size_t _heaplimit;
410
411/* Free lists for each fragment size. */
412struct list _fraghead[BLOCKLOG];
413
414/* Instrumentation. */
415__malloc_size_t _chunks_used;
416__malloc_size_t _bytes_used;
417__malloc_size_t _chunks_free;
418__malloc_size_t _bytes_free;
419
420/* Are you experienced? */
421int __malloc_initialized;
422
423__malloc_size_t __malloc_extra_blocks;
424
0a27e8ed
RS
425void (*__malloc_initialize_hook) PP ((void));
426void (*__after_morecore_hook) PP ((void));
74ad5c7f 427
5dcab13e
GM
428#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
429
430/* Some code for hunting a bug writing into _heapinfo.
431
432 Call this macro with argument PROT non-zero to protect internal
433 malloc state against writing to it, call it with a zero argument to
434 make it readable and writable.
435
436 Note that this only works if BLOCKSIZE == page size, which is
437 the case on the i386. */
438
439#include <sys/types.h>
440#include <sys/mman.h>
441
442static int state_protected_p;
443static __malloc_size_t last_state_size;
444static malloc_info *last_heapinfo;
445
446void
447protect_malloc_state (protect_p)
448 int protect_p;
449{
450 /* If _heapinfo has been relocated, make sure its old location
451 isn't left read-only; it will be reused by malloc. */
452 if (_heapinfo != last_heapinfo
453 && last_heapinfo
454 && state_protected_p)
455 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
456
457 last_state_size = _heaplimit * sizeof *_heapinfo;
458 last_heapinfo = _heapinfo;
177c0ea7 459
5dcab13e
GM
460 if (protect_p != state_protected_p)
461 {
462 state_protected_p = protect_p;
463 if (mprotect (_heapinfo, last_state_size,
464 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
465 abort ();
466 }
467}
468
469#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
470
471#else
472#define PROTECT_MALLOC_STATE(PROT) /* empty */
473#endif
474
74ad5c7f
KH
475
476/* Aligned allocation. */
0a27e8ed 477static __ptr_t align PP ((__malloc_size_t));
74ad5c7f
KH
478static __ptr_t
479align (size)
480 __malloc_size_t size;
481{
482 __ptr_t result;
483 unsigned long int adj;
484
ceeb3d7d
EZ
485 /* align accepts an unsigned argument, but __morecore accepts a
486 signed one. This could lead to trouble if SIZE overflows a
487 signed int type accepted by __morecore. We just punt in that
488 case, since they are requesting a ludicrous amount anyway. */
489 if ((__malloc_ptrdiff_t)size < 0)
490 result = 0;
491 else
492 result = (*__morecore) (size);
74ad5c7f
KH
493 adj = (unsigned long int) ((unsigned long int) ((char *) result -
494 (char *) NULL)) % BLOCKSIZE;
495 if (adj != 0)
496 {
497 __ptr_t new;
498 adj = BLOCKSIZE - adj;
499 new = (*__morecore) (adj);
500 result = (char *) result + adj;
501 }
502
503 if (__after_morecore_hook)
504 (*__after_morecore_hook) ();
505
506 return result;
507}
508
509/* Get SIZE bytes, if we can get them starting at END.
510 Return the address of the space we got.
511 If we cannot get space at END, fail and return 0. */
0a27e8ed 512static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
74ad5c7f
KH
513static __ptr_t
514get_contiguous_space (size, position)
515 __malloc_ptrdiff_t size;
516 __ptr_t position;
517{
518 __ptr_t before;
519 __ptr_t after;
520
521 before = (*__morecore) (0);
522 /* If we can tell in advance that the break is at the wrong place,
523 fail now. */
524 if (before != position)
525 return 0;
526
527 /* Allocate SIZE bytes and get the address of them. */
528 after = (*__morecore) (size);
529 if (!after)
530 return 0;
531
532 /* It was not contiguous--reject it. */
533 if (after != position)
534 {
535 (*__morecore) (- size);
536 return 0;
537 }
538
539 return after;
540}
541
542
543/* This is called when `_heapinfo' and `heapsize' have just
544 been set to describe a new info table. Set up the table
545 to describe itself and account for it in the statistics. */
55d4c1b2
PE
546static inline void
547register_heapinfo (void)
74ad5c7f
KH
548{
549 __malloc_size_t block, blocks;
550
551 block = BLOCK (_heapinfo);
552 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
553
554 /* Account for the _heapinfo block itself in the statistics. */
555 _bytes_used += blocks * BLOCKSIZE;
556 ++_chunks_used;
557
558 /* Describe the heapinfo block itself in the heapinfo. */
559 _heapinfo[block].busy.type = 0;
560 _heapinfo[block].busy.info.size = blocks;
561 /* Leave back-pointers for malloc_find_address. */
562 while (--blocks > 0)
563 _heapinfo[block + blocks].busy.info.size = -blocks;
564}
565
2f213514 566#ifdef USE_PTHREAD
8d0d84d2
YM
567pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
568pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
3ceeb306
YM
569int _malloc_thread_enabled_p;
570
571static void
572malloc_atfork_handler_prepare ()
573{
574 LOCK ();
575 LOCK_ALIGNED_BLOCKS ();
576}
577
578static void
579malloc_atfork_handler_parent ()
580{
581 UNLOCK_ALIGNED_BLOCKS ();
582 UNLOCK ();
583}
584
585static void
586malloc_atfork_handler_child ()
587{
588 UNLOCK_ALIGNED_BLOCKS ();
589 UNLOCK ();
590}
591
592/* Set up mutexes and make malloc etc. thread-safe. */
593void
594malloc_enable_thread ()
595{
596 if (_malloc_thread_enabled_p)
597 return;
598
599 /* Some pthread implementations call malloc for statically
600 initialized mutexes when they are used first. To avoid such a
601 situation, we initialize mutexes here while their use is
602 disabled in malloc etc. */
603 pthread_mutex_init (&_malloc_mutex, NULL);
604 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
605 pthread_atfork (malloc_atfork_handler_prepare,
606 malloc_atfork_handler_parent,
607 malloc_atfork_handler_child);
608 _malloc_thread_enabled_p = 1;
609}
2f213514 610#endif
74ad5c7f 611
2f213514
YM
612static void
613malloc_initialize_1 ()
614{
a3ba27da
GM
615#ifdef GC_MCHECK
616 mcheck (NULL);
617#endif
618
74ad5c7f
KH
619 if (__malloc_initialize_hook)
620 (*__malloc_initialize_hook) ();
621
622 heapsize = HEAP / BLOCKSIZE;
623 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
624 if (_heapinfo == NULL)
2f213514 625 return;
74ad5c7f
KH
626 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
627 _heapinfo[0].free.size = 0;
628 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
629 _heapindex = 0;
630 _heapbase = (char *) _heapinfo;
631 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
632
633 register_heapinfo ();
634
635 __malloc_initialized = 1;
5dcab13e 636 PROTECT_MALLOC_STATE (1);
2f213514
YM
637 return;
638}
639
784c1472
JD
640/* Set everything up and remember that we have.
641 main will call malloc which calls this function. That is before any threads
642 or signal handlers has been set up, so we don't need thread protection. */
2f213514
YM
643int
644__malloc_initialize ()
645{
2f213514
YM
646 if (__malloc_initialized)
647 return 0;
648
649 malloc_initialize_1 ();
2f213514
YM
650
651 return __malloc_initialized;
74ad5c7f
KH
652}
653
654static int morecore_recursing;
655
656/* Get neatly aligned memory, initializing or
657 growing the heap info table as necessary. */
8d0d84d2 658static __ptr_t morecore_nolock PP ((__malloc_size_t));
74ad5c7f 659static __ptr_t
8d0d84d2 660morecore_nolock (size)
74ad5c7f
KH
661 __malloc_size_t size;
662{
663 __ptr_t result;
664 malloc_info *newinfo, *oldinfo;
665 __malloc_size_t newsize;
666
667 if (morecore_recursing)
668 /* Avoid recursion. The caller will know how to handle a null return. */
669 return NULL;
670
671 result = align (size);
672 if (result == NULL)
673 return NULL;
674
5dcab13e
GM
675 PROTECT_MALLOC_STATE (0);
676
74ad5c7f
KH
677 /* Check if we need to grow the info table. */
678 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
679 {
680 /* Calculate the new _heapinfo table size. We do not account for the
681 added blocks in the table itself, as we hope to place them in
682 existing free space, which is already covered by part of the
683 existing table. */
684 newsize = heapsize;
685 do
686 newsize *= 2;
687 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
688
689 /* We must not reuse existing core for the new info table when called
690 from realloc in the case of growing a large block, because the
691 block being grown is momentarily marked as free. In this case
692 _heaplimit is zero so we know not to reuse space for internal
693 allocation. */
694 if (_heaplimit != 0)
695 {
696 /* First try to allocate the new info table in core we already
697 have, in the usual way using realloc. If realloc cannot
698 extend it in place or relocate it to existing sufficient core,
699 we will get called again, and the code above will notice the
700 `morecore_recursing' flag and return null. */
701 int save = errno; /* Don't want to clobber errno with ENOMEM. */
702 morecore_recursing = 1;
8d0d84d2 703 newinfo = (malloc_info *) _realloc_internal_nolock
74ad5c7f
KH
704 (_heapinfo, newsize * sizeof (malloc_info));
705 morecore_recursing = 0;
706 if (newinfo == NULL)
707 errno = save;
708 else
709 {
710 /* We found some space in core, and realloc has put the old
711 table's blocks on the free list. Now zero the new part
712 of the table and install the new table location. */
713 memset (&newinfo[heapsize], 0,
714 (newsize - heapsize) * sizeof (malloc_info));
715 _heapinfo = newinfo;
716 heapsize = newsize;
717 goto got_heap;
718 }
719 }
720
721 /* Allocate new space for the malloc info table. */
722 while (1)
723 {
724 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
725
726 /* Did it fail? */
727 if (newinfo == NULL)
728 {
729 (*__morecore) (-size);
730 return NULL;
731 }
732
733 /* Is it big enough to record status for its own space?
734 If so, we win. */
735 if ((__malloc_size_t) BLOCK ((char *) newinfo
736 + newsize * sizeof (malloc_info))
737 < newsize)
738 break;
739
740 /* Must try again. First give back most of what we just got. */
741 (*__morecore) (- newsize * sizeof (malloc_info));
742 newsize *= 2;
743 }
744
745 /* Copy the old table to the beginning of the new,
746 and zero the rest of the new table. */
747 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
748 memset (&newinfo[heapsize], 0,
749 (newsize - heapsize) * sizeof (malloc_info));
750 oldinfo = _heapinfo;
751 _heapinfo = newinfo;
752 heapsize = newsize;
753
754 register_heapinfo ();
755
756 /* Reset _heaplimit so _free_internal never decides
757 it can relocate or resize the info table. */
758 _heaplimit = 0;
8d0d84d2 759 _free_internal_nolock (oldinfo);
5dcab13e 760 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
761
762 /* The new heap limit includes the new table just allocated. */
763 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
764 return result;
765 }
766
767 got_heap:
768 _heaplimit = BLOCK ((char *) result + size);
769 return result;
770}
771
772/* Allocate memory from the heap. */
773__ptr_t
8d0d84d2 774_malloc_internal_nolock (size)
74ad5c7f
KH
775 __malloc_size_t size;
776{
777 __ptr_t result;
778 __malloc_size_t block, blocks, lastblocks, start;
779 register __malloc_size_t i;
780 struct list *next;
781
782 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
783 valid address you can realloc and free (though not dereference).
784
785 It turns out that some extant code (sunrpc, at least Ultrix's version)
786 expects `malloc (0)' to return non-NULL and breaks otherwise.
787 Be compatible. */
788
789#if 0
790 if (size == 0)
791 return NULL;
792#endif
793
5dcab13e
GM
794 PROTECT_MALLOC_STATE (0);
795
74ad5c7f
KH
796 if (size < sizeof (struct list))
797 size = sizeof (struct list);
798
74ad5c7f
KH
799 /* Determine the allocation policy based on the request size. */
800 if (size <= BLOCKSIZE / 2)
801 {
802 /* Small allocation to receive a fragment of a block.
803 Determine the logarithm to base two of the fragment size. */
804 register __malloc_size_t log = 1;
805 --size;
806 while ((size /= 2) != 0)
807 ++log;
808
809 /* Look in the fragment lists for a
810 free fragment of the desired size. */
811 next = _fraghead[log].next;
812 if (next != NULL)
813 {
814 /* There are free fragments of this size.
815 Pop a fragment out of the fragment list and return it.
816 Update the block's nfree and first counters. */
817 result = (__ptr_t) next;
818 next->prev->next = next->next;
819 if (next->next != NULL)
820 next->next->prev = next->prev;
821 block = BLOCK (result);
822 if (--_heapinfo[block].busy.info.frag.nfree != 0)
823 _heapinfo[block].busy.info.frag.first = (unsigned long int)
824 ((unsigned long int) ((char *) next->next - (char *) NULL)
825 % BLOCKSIZE) >> log;
826
827 /* Update the statistics. */
828 ++_chunks_used;
829 _bytes_used += 1 << log;
830 --_chunks_free;
831 _bytes_free -= 1 << log;
832 }
833 else
834 {
835 /* No free fragments of the desired size, so get a new block
836 and break it into fragments, returning the first. */
8094989b 837#ifdef GC_MALLOC_CHECK
8d0d84d2 838 result = _malloc_internal_nolock (BLOCKSIZE);
5dcab13e 839 PROTECT_MALLOC_STATE (0);
8d0d84d2
YM
840#elif defined (USE_PTHREAD)
841 result = _malloc_internal_nolock (BLOCKSIZE);
8094989b 842#else
74ad5c7f 843 result = malloc (BLOCKSIZE);
8094989b 844#endif
74ad5c7f 845 if (result == NULL)
5dcab13e
GM
846 {
847 PROTECT_MALLOC_STATE (1);
2f213514 848 goto out;
5dcab13e 849 }
74ad5c7f
KH
850
851 /* Link all fragments but the first into the free list. */
852 next = (struct list *) ((char *) result + (1 << log));
853 next->next = NULL;
854 next->prev = &_fraghead[log];
855 _fraghead[log].next = next;
856
857 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
858 {
859 next = (struct list *) ((char *) result + (i << log));
860 next->next = _fraghead[log].next;
861 next->prev = &_fraghead[log];
862 next->prev->next = next;
863 next->next->prev = next;
864 }
865
866 /* Initialize the nfree and first counters for this block. */
867 block = BLOCK (result);
868 _heapinfo[block].busy.type = log;
869 _heapinfo[block].busy.info.frag.nfree = i - 1;
870 _heapinfo[block].busy.info.frag.first = i - 1;
871
872 _chunks_free += (BLOCKSIZE >> log) - 1;
873 _bytes_free += BLOCKSIZE - (1 << log);
874 _bytes_used -= BLOCKSIZE - (1 << log);
875 }
876 }
877 else
878 {
879 /* Large allocation to receive one or more blocks.
880 Search the free list in a circle starting at the last place visited.
881 If we loop completely around without finding a large enough
882 space we will have to get more memory from the system. */
883 blocks = BLOCKIFY (size);
884 start = block = _heapindex;
885 while (_heapinfo[block].free.size < blocks)
886 {
887 block = _heapinfo[block].free.next;
888 if (block == start)
889 {
890 /* Need to get more from the system. Get a little extra. */
891 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
892 block = _heapinfo[0].free.prev;
893 lastblocks = _heapinfo[block].free.size;
894 /* Check to see if the new core will be contiguous with the
895 final free block; if so we don't need to get as much. */
896 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
897 /* We can't do this if we will have to make the heap info
cc4a96c6 898 table bigger to accommodate the new space. */
74ad5c7f
KH
899 block + wantblocks <= heapsize &&
900 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
901 ADDRESS (block + lastblocks)))
902 {
903 /* We got it contiguously. Which block we are extending
904 (the `final free block' referred to above) might have
905 changed, if it got combined with a freed info table. */
906 block = _heapinfo[0].free.prev;
907 _heapinfo[block].free.size += (wantblocks - lastblocks);
908 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
909 _heaplimit += wantblocks - lastblocks;
910 continue;
911 }
8d0d84d2 912 result = morecore_nolock (wantblocks * BLOCKSIZE);
74ad5c7f 913 if (result == NULL)
2f213514 914 goto out;
74ad5c7f
KH
915 block = BLOCK (result);
916 /* Put the new block at the end of the free list. */
917 _heapinfo[block].free.size = wantblocks;
918 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
919 _heapinfo[block].free.next = 0;
920 _heapinfo[0].free.prev = block;
921 _heapinfo[_heapinfo[block].free.prev].free.next = block;
922 ++_chunks_free;
923 /* Now loop to use some of that block for this allocation. */
924 }
925 }
926
927 /* At this point we have found a suitable free list entry.
928 Figure out how to remove what we need from the list. */
929 result = ADDRESS (block);
930 if (_heapinfo[block].free.size > blocks)
931 {
932 /* The block we found has a bit left over,
933 so relink the tail end back into the free list. */
934 _heapinfo[block + blocks].free.size
935 = _heapinfo[block].free.size - blocks;
936 _heapinfo[block + blocks].free.next
937 = _heapinfo[block].free.next;
938 _heapinfo[block + blocks].free.prev
939 = _heapinfo[block].free.prev;
940 _heapinfo[_heapinfo[block].free.prev].free.next
941 = _heapinfo[_heapinfo[block].free.next].free.prev
942 = _heapindex = block + blocks;
943 }
944 else
945 {
946 /* The block exactly matches our requirements,
947 so just remove it from the list. */
948 _heapinfo[_heapinfo[block].free.next].free.prev
949 = _heapinfo[block].free.prev;
950 _heapinfo[_heapinfo[block].free.prev].free.next
951 = _heapindex = _heapinfo[block].free.next;
952 --_chunks_free;
953 }
954
955 _heapinfo[block].busy.type = 0;
956 _heapinfo[block].busy.info.size = blocks;
957 ++_chunks_used;
958 _bytes_used += blocks * BLOCKSIZE;
959 _bytes_free -= blocks * BLOCKSIZE;
960
961 /* Mark all the blocks of the object just allocated except for the
962 first with a negative number so you can find the first block by
963 adding that adjustment. */
964 while (--blocks > 0)
965 _heapinfo[block + blocks].busy.info.size = -blocks;
966 }
967
5dcab13e 968 PROTECT_MALLOC_STATE (1);
2f213514 969 out:
8d0d84d2
YM
970 return result;
971}
972
973__ptr_t
974_malloc_internal (size)
975 __malloc_size_t size;
976{
977 __ptr_t result;
978
979 LOCK ();
980 result = _malloc_internal_nolock (size);
2f213514 981 UNLOCK ();
8d0d84d2 982
74ad5c7f
KH
983 return result;
984}
985
986__ptr_t
987malloc (size)
988 __malloc_size_t size;
989{
8d0d84d2
YM
990 __ptr_t (*hook) (__malloc_size_t);
991
74ad5c7f
KH
992 if (!__malloc_initialized && !__malloc_initialize ())
993 return NULL;
994
8d0d84d2
YM
995 /* Copy the value of __malloc_hook to an automatic variable in case
996 __malloc_hook is modified in another thread between its
997 NULL-check and the use.
998
999 Note: Strictly speaking, this is not a right solution. We should
1000 use mutexes to access non-read-only variables that are shared
1001 among multiple threads. We just leave it for compatibility with
1002 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1003 hook = __malloc_hook;
1004 return (hook != NULL ? *hook : _malloc_internal) (size);
74ad5c7f
KH
1005}
1006\f
1007#ifndef _LIBC
1008
1009/* On some ANSI C systems, some libc functions call _malloc, _free
1010 and _realloc. Make them use the GNU functions. */
1011
1012__ptr_t
1013_malloc (size)
1014 __malloc_size_t size;
1015{
1016 return malloc (size);
1017}
1018
1019void
1020_free (ptr)
1021 __ptr_t ptr;
1022{
1023 free (ptr);
1024}
1025
1026__ptr_t
1027_realloc (ptr, size)
1028 __ptr_t ptr;
1029 __malloc_size_t size;
1030{
1031 return realloc (ptr, size);
1032}
1033
1034#endif
1035/* Free a block of memory allocated by `malloc'.
1036 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1037 Written May 1989 by Mike Haertel.
1038
1039This library is free software; you can redistribute it and/or
423a1f3c 1040modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1041published by the Free Software Foundation; either version 2 of the
1042License, or (at your option) any later version.
1043
1044This library is distributed in the hope that it will be useful,
1045but WITHOUT ANY WARRANTY; without even the implied warranty of
1046MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1047General Public License for more details.
74ad5c7f 1048
423a1f3c
JB
1049You should have received a copy of the GNU General Public
1050License along with this library; see the file COPYING. If
3ef97fb6
LK
1051not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1052Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1053
1054 The author may be reached (Email) at the address mike@ai.mit.edu,
1055 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1056
1057#ifndef _MALLOC_INTERNAL
1058#define _MALLOC_INTERNAL
1059#include <malloc.h>
1060#endif
1061
1062
74ad5c7f 1063/* Debugging hook for free. */
0a27e8ed 1064void (*__free_hook) PP ((__ptr_t __ptr));
74ad5c7f
KH
1065
1066/* List of blocks allocated by memalign. */
1067struct alignlist *_aligned_blocks = NULL;
1068
1069/* Return memory to the heap.
8d0d84d2 1070 Like `_free_internal' but don't lock mutex. */
74ad5c7f 1071void
8d0d84d2 1072_free_internal_nolock (ptr)
74ad5c7f
KH
1073 __ptr_t ptr;
1074{
1075 int type;
1076 __malloc_size_t block, blocks;
1077 register __malloc_size_t i;
1078 struct list *prev, *next;
1079 __ptr_t curbrk;
1080 const __malloc_size_t lesscore_threshold
1081 /* Threshold of free space at which we will return some to the system. */
1082 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1083
1084 register struct alignlist *l;
1085
1086 if (ptr == NULL)
1087 return;
1088
5dcab13e 1089 PROTECT_MALLOC_STATE (0);
177c0ea7 1090
8d0d84d2 1091 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1092 for (l = _aligned_blocks; l != NULL; l = l->next)
1093 if (l->aligned == ptr)
1094 {
1095 l->aligned = NULL; /* Mark the slot in the list as free. */
1096 ptr = l->exact;
1097 break;
1098 }
8d0d84d2 1099 UNLOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1100
1101 block = BLOCK (ptr);
1102
1103 type = _heapinfo[block].busy.type;
1104 switch (type)
1105 {
1106 case 0:
1107 /* Get as many statistics as early as we can. */
1108 --_chunks_used;
1109 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1110 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1111
1112 /* Find the free cluster previous to this one in the free list.
1113 Start searching at the last block referenced; this may benefit
1114 programs with locality of allocation. */
1115 i = _heapindex;
1116 if (i > block)
1117 while (i > block)
1118 i = _heapinfo[i].free.prev;
1119 else
1120 {
1121 do
1122 i = _heapinfo[i].free.next;
1123 while (i > 0 && i < block);
1124 i = _heapinfo[i].free.prev;
1125 }
1126
1127 /* Determine how to link this block into the free list. */
1128 if (block == i + _heapinfo[i].free.size)
1129 {
1130 /* Coalesce this block with its predecessor. */
1131 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1132 block = i;
1133 }
1134 else
1135 {
1136 /* Really link this block back into the free list. */
1137 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1138 _heapinfo[block].free.next = _heapinfo[i].free.next;
1139 _heapinfo[block].free.prev = i;
1140 _heapinfo[i].free.next = block;
1141 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1142 ++_chunks_free;
1143 }
1144
1145 /* Now that the block is linked in, see if we can coalesce it
1146 with its successor (by deleting its successor from the list
1147 and adding in its size). */
1148 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1149 {
1150 _heapinfo[block].free.size
1151 += _heapinfo[_heapinfo[block].free.next].free.size;
1152 _heapinfo[block].free.next
1153 = _heapinfo[_heapinfo[block].free.next].free.next;
1154 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1155 --_chunks_free;
1156 }
1157
1158 /* How many trailing free blocks are there now? */
1159 blocks = _heapinfo[block].free.size;
1160
1161 /* Where is the current end of accessible core? */
1162 curbrk = (*__morecore) (0);
1163
1164 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1165 {
1166 /* The end of the malloc heap is at the end of accessible core.
1167 It's possible that moving _heapinfo will allow us to
1168 return some space to the system. */
1169
1170 __malloc_size_t info_block = BLOCK (_heapinfo);
1171 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1172 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1173 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1174 __malloc_size_t next_block = _heapinfo[block].free.next;
1175 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1176
1177 if (/* Win if this block being freed is last in core, the info table
1178 is just before it, the previous free block is just before the
1179 info table, and the two free blocks together form a useful
1180 amount to return to the system. */
1181 (block + blocks == _heaplimit &&
1182 info_block + info_blocks == block &&
1183 prev_block != 0 && prev_block + prev_blocks == info_block &&
1184 blocks + prev_blocks >= lesscore_threshold) ||
1185 /* Nope, not the case. We can also win if this block being
1186 freed is just before the info table, and the table extends
1187 to the end of core or is followed only by a free block,
1188 and the total free space is worth returning to the system. */
1189 (block + blocks == info_block &&
1190 ((info_block + info_blocks == _heaplimit &&
1191 blocks >= lesscore_threshold) ||
1192 (info_block + info_blocks == next_block &&
1193 next_block + next_blocks == _heaplimit &&
1194 blocks + next_blocks >= lesscore_threshold)))
1195 )
1196 {
1197 malloc_info *newinfo;
1198 __malloc_size_t oldlimit = _heaplimit;
1199
1200 /* Free the old info table, clearing _heaplimit to avoid
1201 recursion into this code. We don't want to return the
1202 table's blocks to the system before we have copied them to
1203 the new location. */
1204 _heaplimit = 0;
8d0d84d2 1205 _free_internal_nolock (_heapinfo);
74ad5c7f
KH
1206 _heaplimit = oldlimit;
1207
1208 /* Tell malloc to search from the beginning of the heap for
1209 free blocks, so it doesn't reuse the ones just freed. */
1210 _heapindex = 0;
1211
1212 /* Allocate new space for the info table and move its data. */
8d0d84d2
YM
1213 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1214 * BLOCKSIZE);
5dcab13e 1215 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1216 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1217 _heapinfo = newinfo;
1218
1219 /* We should now have coalesced the free block with the
1220 blocks freed from the old info table. Examine the entire
1221 trailing free block to decide below whether to return some
1222 to the system. */
1223 block = _heapinfo[0].free.prev;
1224 blocks = _heapinfo[block].free.size;
1225 }
1226
1227 /* Now see if we can return stuff to the system. */
1228 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1229 {
1230 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1231 _heaplimit -= blocks;
1232 (*__morecore) (-bytes);
1233 _heapinfo[_heapinfo[block].free.prev].free.next
1234 = _heapinfo[block].free.next;
1235 _heapinfo[_heapinfo[block].free.next].free.prev
1236 = _heapinfo[block].free.prev;
1237 block = _heapinfo[block].free.prev;
1238 --_chunks_free;
1239 _bytes_free -= bytes;
1240 }
1241 }
1242
1243 /* Set the next search to begin at this block. */
1244 _heapindex = block;
1245 break;
1246
1247 default:
1248 /* Do some of the statistics. */
1249 --_chunks_used;
1250 _bytes_used -= 1 << type;
1251 ++_chunks_free;
1252 _bytes_free += 1 << type;
1253
1254 /* Get the address of the first free fragment in this block. */
1255 prev = (struct list *) ((char *) ADDRESS (block) +
1256 (_heapinfo[block].busy.info.frag.first << type));
1257
1258 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1259 {
1260 /* If all fragments of this block are free, remove them
1261 from the fragment list and free the whole block. */
1262 next = prev;
1263 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1264 next = next->next;
1265 prev->prev->next = next;
1266 if (next != NULL)
1267 next->prev = prev->prev;
1268 _heapinfo[block].busy.type = 0;
1269 _heapinfo[block].busy.info.size = 1;
1270
1271 /* Keep the statistics accurate. */
1272 ++_chunks_used;
1273 _bytes_used += BLOCKSIZE;
1274 _chunks_free -= BLOCKSIZE >> type;
1275 _bytes_free -= BLOCKSIZE;
1276
8d0d84d2
YM
1277#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1278 _free_internal_nolock (ADDRESS (block));
8094989b 1279#else
74ad5c7f 1280 free (ADDRESS (block));
8094989b 1281#endif
74ad5c7f
KH
1282 }
1283 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1284 {
1285 /* If some fragments of this block are free, link this
1286 fragment into the fragment list after the first free
1287 fragment of this block. */
1288 next = (struct list *) ptr;
1289 next->next = prev->next;
1290 next->prev = prev;
1291 prev->next = next;
1292 if (next->next != NULL)
1293 next->next->prev = next;
1294 ++_heapinfo[block].busy.info.frag.nfree;
1295 }
1296 else
1297 {
1298 /* No fragments of this block are free, so link this
1299 fragment into the fragment list and announce that
1300 it is the first free fragment of this block. */
1301 prev = (struct list *) ptr;
1302 _heapinfo[block].busy.info.frag.nfree = 1;
1303 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1304 ((unsigned long int) ((char *) ptr - (char *) NULL)
1305 % BLOCKSIZE >> type);
1306 prev->next = _fraghead[type].next;
1307 prev->prev = &_fraghead[type];
1308 prev->prev->next = prev;
1309 if (prev->next != NULL)
1310 prev->next->prev = prev;
1311 }
1312 break;
1313 }
177c0ea7 1314
5dcab13e 1315 PROTECT_MALLOC_STATE (1);
8d0d84d2
YM
1316}
1317
1318/* Return memory to the heap.
1319 Like `free' but don't call a __free_hook if there is one. */
1320void
1321_free_internal (ptr)
1322 __ptr_t ptr;
1323{
1324 LOCK ();
1325 _free_internal_nolock (ptr);
2f213514 1326 UNLOCK ();
74ad5c7f
KH
1327}
1328
1329/* Return memory to the heap. */
ca9c0567 1330
4624371d 1331void
74ad5c7f
KH
1332free (ptr)
1333 __ptr_t ptr;
1334{
8d0d84d2
YM
1335 void (*hook) (__ptr_t) = __free_hook;
1336
1337 if (hook != NULL)
1338 (*hook) (ptr);
74ad5c7f
KH
1339 else
1340 _free_internal (ptr);
1341}
1342
1343/* Define the `cfree' alias for `free'. */
1344#ifdef weak_alias
1345weak_alias (free, cfree)
1346#else
1347void
1348cfree (ptr)
1349 __ptr_t ptr;
1350{
1351 free (ptr);
1352}
1353#endif
1354/* Change the size of a block allocated by `malloc'.
1355 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1356 Written May 1989 by Mike Haertel.
1357
1358This library is free software; you can redistribute it and/or
423a1f3c 1359modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1360published by the Free Software Foundation; either version 2 of the
1361License, or (at your option) any later version.
1362
1363This library is distributed in the hope that it will be useful,
1364but WITHOUT ANY WARRANTY; without even the implied warranty of
1365MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1366General Public License for more details.
74ad5c7f 1367
423a1f3c
JB
1368You should have received a copy of the GNU General Public
1369License along with this library; see the file COPYING. If
3ef97fb6
LK
1370not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1371Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1372
1373 The author may be reached (Email) at the address mike@ai.mit.edu,
1374 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1375
1376#ifndef _MALLOC_INTERNAL
1377#define _MALLOC_INTERNAL
1378#include <malloc.h>
1379#endif
1380
1381
74ad5c7f
KH
1382#define min(A, B) ((A) < (B) ? (A) : (B))
1383
1384/* Debugging hook for realloc. */
0a27e8ed 1385__ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f
KH
1386
1387/* Resize the given region to the new size, returning a pointer
1388 to the (possibly moved) region. This is optimized for speed;
1389 some benchmarks seem to indicate that greater compactness is
1390 achieved by unconditionally allocating and copying to a
1391 new region. This module has incestuous knowledge of the
1392 internals of both free and malloc. */
1393__ptr_t
8d0d84d2 1394_realloc_internal_nolock (ptr, size)
74ad5c7f
KH
1395 __ptr_t ptr;
1396 __malloc_size_t size;
1397{
1398 __ptr_t result;
1399 int type;
1400 __malloc_size_t block, blocks, oldlimit;
1401
1402 if (size == 0)
1403 {
8d0d84d2
YM
1404 _free_internal_nolock (ptr);
1405 return _malloc_internal_nolock (0);
74ad5c7f
KH
1406 }
1407 else if (ptr == NULL)
8d0d84d2 1408 return _malloc_internal_nolock (size);
74ad5c7f
KH
1409
1410 block = BLOCK (ptr);
1411
5dcab13e 1412 PROTECT_MALLOC_STATE (0);
177c0ea7 1413
74ad5c7f
KH
1414 type = _heapinfo[block].busy.type;
1415 switch (type)
1416 {
1417 case 0:
1418 /* Maybe reallocate a large block to a small fragment. */
1419 if (size <= BLOCKSIZE / 2)
1420 {
8d0d84d2 1421 result = _malloc_internal_nolock (size);
74ad5c7f
KH
1422 if (result != NULL)
1423 {
1424 memcpy (result, ptr, size);
8d0d84d2 1425 _free_internal_nolock (ptr);
2f213514 1426 goto out;
74ad5c7f
KH
1427 }
1428 }
1429
1430 /* The new size is a large allocation as well;
1431 see if we can hold it in place. */
1432 blocks = BLOCKIFY (size);
1433 if (blocks < _heapinfo[block].busy.info.size)
1434 {
1435 /* The new size is smaller; return
1436 excess memory to the free list. */
1437 _heapinfo[block + blocks].busy.type = 0;
1438 _heapinfo[block + blocks].busy.info.size
1439 = _heapinfo[block].busy.info.size - blocks;
1440 _heapinfo[block].busy.info.size = blocks;
1441 /* We have just created a new chunk by splitting a chunk in two.
1442 Now we will free this chunk; increment the statistics counter
1443 so it doesn't become wrong when _free_internal decrements it. */
1444 ++_chunks_used;
8d0d84d2 1445 _free_internal_nolock (ADDRESS (block + blocks));
74ad5c7f
KH
1446 result = ptr;
1447 }
1448 else if (blocks == _heapinfo[block].busy.info.size)
1449 /* No size change necessary. */
1450 result = ptr;
1451 else
1452 {
1453 /* Won't fit, so allocate a new region that will.
1454 Free the old region first in case there is sufficient
1455 adjacent free space to grow without moving. */
1456 blocks = _heapinfo[block].busy.info.size;
1457 /* Prevent free from actually returning memory to the system. */
1458 oldlimit = _heaplimit;
1459 _heaplimit = 0;
8d0d84d2
YM
1460 _free_internal_nolock (ptr);
1461 result = _malloc_internal_nolock (size);
5dcab13e 1462 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1463 if (_heaplimit == 0)
1464 _heaplimit = oldlimit;
1465 if (result == NULL)
1466 {
1467 /* Now we're really in trouble. We have to unfree
1468 the thing we just freed. Unfortunately it might
1469 have been coalesced with its neighbors. */
1470 if (_heapindex == block)
8d0d84d2 1471 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
74ad5c7f
KH
1472 else
1473 {
1474 __ptr_t previous
8d0d84d2
YM
1475 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1476 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1477 _free_internal_nolock (previous);
74ad5c7f 1478 }
2f213514 1479 goto out;
74ad5c7f
KH
1480 }
1481 if (ptr != result)
1482 memmove (result, ptr, blocks * BLOCKSIZE);
1483 }
1484 break;
1485
1486 default:
1487 /* Old size is a fragment; type is logarithm
1488 to base two of the fragment size. */
1489 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1490 size <= (__malloc_size_t) (1 << type))
1491 /* The new size is the same kind of fragment. */
1492 result = ptr;
1493 else
1494 {
1495 /* The new size is different; allocate a new space,
1496 and copy the lesser of the new size and the old. */
8d0d84d2 1497 result = _malloc_internal_nolock (size);
74ad5c7f 1498 if (result == NULL)
2f213514 1499 goto out;
74ad5c7f 1500 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
8d0d84d2 1501 _free_internal_nolock (ptr);
74ad5c7f
KH
1502 }
1503 break;
1504 }
1505
5dcab13e 1506 PROTECT_MALLOC_STATE (1);
2f213514 1507 out:
8d0d84d2
YM
1508 return result;
1509}
1510
1511__ptr_t
1512_realloc_internal (ptr, size)
1513 __ptr_t ptr;
1514 __malloc_size_t size;
1515{
1516 __ptr_t result;
1517
1518 LOCK();
1519 result = _realloc_internal_nolock (ptr, size);
2f213514 1520 UNLOCK ();
8d0d84d2 1521
74ad5c7f
KH
1522 return result;
1523}
1524
1525__ptr_t
1526realloc (ptr, size)
1527 __ptr_t ptr;
1528 __malloc_size_t size;
1529{
8d0d84d2
YM
1530 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1531
74ad5c7f
KH
1532 if (!__malloc_initialized && !__malloc_initialize ())
1533 return NULL;
1534
8d0d84d2
YM
1535 hook = __realloc_hook;
1536 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
74ad5c7f
KH
1537}
1538/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1539
1540This library is free software; you can redistribute it and/or
423a1f3c 1541modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1542published by the Free Software Foundation; either version 2 of the
1543License, or (at your option) any later version.
1544
1545This library is distributed in the hope that it will be useful,
1546but WITHOUT ANY WARRANTY; without even the implied warranty of
1547MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1548General Public License for more details.
74ad5c7f 1549
423a1f3c
JB
1550You should have received a copy of the GNU General Public
1551License along with this library; see the file COPYING. If
3ef97fb6
LK
1552not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1553Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1554
1555 The author may be reached (Email) at the address mike@ai.mit.edu,
1556 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1557
1558#ifndef _MALLOC_INTERNAL
1559#define _MALLOC_INTERNAL
1560#include <malloc.h>
1561#endif
1562
1563/* Allocate an array of NMEMB elements each SIZE bytes long.
1564 The entire array is initialized to zeros. */
1565__ptr_t
1566calloc (nmemb, size)
1567 register __malloc_size_t nmemb;
1568 register __malloc_size_t size;
1569{
1570 register __ptr_t result = malloc (nmemb * size);
1571
1572 if (result != NULL)
1573 (void) memset (result, 0, nmemb * size);
1574
1575 return result;
1576}
1577/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1578This file is part of the GNU C Library.
1579
1580The GNU C Library is free software; you can redistribute it and/or modify
1581it under the terms of the GNU General Public License as published by
1582the Free Software Foundation; either version 2, or (at your option)
1583any later version.
1584
1585The GNU C Library is distributed in the hope that it will be useful,
1586but WITHOUT ANY WARRANTY; without even the implied warranty of
1587MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1588GNU General Public License for more details.
1589
1590You should have received a copy of the GNU General Public License
1591along with the GNU C Library; see the file COPYING. If not, write to
3ef97fb6
LK
1592the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1593MA 02110-1301, USA. */
74ad5c7f
KH
1594
1595#ifndef _MALLOC_INTERNAL
1596#define _MALLOC_INTERNAL
1597#include <malloc.h>
1598#endif
1599
65f451d0
DN
1600/* uClibc defines __GNU_LIBRARY__, but it is not completely
1601 compatible. */
1602#if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
74ad5c7f 1603#define __sbrk sbrk
65f451d0 1604#else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1605/* It is best not to declare this and cast its result on foreign operating
1606 systems with potentially hostile include files. */
1607
1608#include <stddef.h>
0a27e8ed 1609extern __ptr_t __sbrk PP ((ptrdiff_t increment));
65f451d0 1610#endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1611
1612#ifndef NULL
1613#define NULL 0
1614#endif
1615
1616/* Allocate INCREMENT more bytes of data space,
1617 and return the start of data space, or NULL on errors.
1618 If INCREMENT is negative, shrink data space. */
1619__ptr_t
1620__default_morecore (increment)
1621 __malloc_ptrdiff_t increment;
1622{
ef6d1039
SM
1623 __ptr_t result;
1624#if defined(CYGWIN)
1625 if (!bss_sbrk_did_unexec)
1626 {
1627 return bss_sbrk (increment);
1628 }
1629#endif
1630 result = (__ptr_t) __sbrk (increment);
74ad5c7f
KH
1631 if (result == (__ptr_t) -1)
1632 return NULL;
1633 return result;
1634}
1635/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1636
1637This library is free software; you can redistribute it and/or
423a1f3c 1638modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1639published by the Free Software Foundation; either version 2 of the
1640License, or (at your option) any later version.
1641
1642This library is distributed in the hope that it will be useful,
1643but WITHOUT ANY WARRANTY; without even the implied warranty of
1644MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1645General Public License for more details.
74ad5c7f 1646
423a1f3c
JB
1647You should have received a copy of the GNU General Public
1648License along with this library; see the file COPYING. If
3ef97fb6
LK
1649not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1650Fifth Floor, Boston, MA 02110-1301, USA. */
74ad5c7f
KH
1651
1652#ifndef _MALLOC_INTERNAL
1653#define _MALLOC_INTERNAL
1654#include <malloc.h>
1655#endif
1656
eec2d1de
EZ
1657__ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1658 __malloc_size_t __alignment));
74ad5c7f
KH
1659
1660__ptr_t
1661memalign (alignment, size)
1662 __malloc_size_t alignment;
1663 __malloc_size_t size;
1664{
1665 __ptr_t result;
1666 unsigned long int adj, lastadj;
8d0d84d2 1667 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
74ad5c7f 1668
8d0d84d2
YM
1669 if (hook)
1670 return (*hook) (alignment, size);
74ad5c7f
KH
1671
1672 /* Allocate a block with enough extra space to pad the block with up to
1673 (ALIGNMENT - 1) bytes if necessary. */
1674 result = malloc (size + alignment - 1);
1675 if (result == NULL)
1676 return NULL;
1677
1678 /* Figure out how much we will need to pad this particular block
1679 to achieve the required alignment. */
1680 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1681
1682 do
1683 {
1684 /* Reallocate the block with only as much excess as it needs. */
1685 free (result);
1686 result = malloc (adj + size);
1687 if (result == NULL) /* Impossible unless interrupted. */
1688 return NULL;
1689
1690 lastadj = adj;
1691 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1692 /* It's conceivable we might have been so unlucky as to get a
1693 different block with weaker alignment. If so, this block is too
1694 short to contain SIZE after alignment correction. So we must
1695 try again and get another block, slightly larger. */
1696 } while (adj > lastadj);
1697
1698 if (adj != 0)
1699 {
1700 /* Record this block in the list of aligned blocks, so that `free'
1701 can identify the pointer it is passed, which will be in the middle
1702 of an allocated block. */
1703
1704 struct alignlist *l;
8d0d84d2 1705 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1706 for (l = _aligned_blocks; l != NULL; l = l->next)
1707 if (l->aligned == NULL)
1708 /* This slot is free. Use it. */
1709 break;
1710 if (l == NULL)
1711 {
1712 l = (struct alignlist *) malloc (sizeof (struct alignlist));
8d0d84d2 1713 if (l != NULL)
74ad5c7f 1714 {
8d0d84d2
YM
1715 l->next = _aligned_blocks;
1716 _aligned_blocks = l;
74ad5c7f 1717 }
74ad5c7f 1718 }
8d0d84d2
YM
1719 if (l != NULL)
1720 {
1721 l->exact = result;
1722 result = l->aligned = (char *) result + alignment - adj;
1723 }
1724 UNLOCK_ALIGNED_BLOCKS ();
1725 if (l == NULL)
1726 {
1727 free (result);
1728 result = NULL;
1729 }
74ad5c7f
KH
1730 }
1731
1732 return result;
1733}
1734
72359c32
YM
1735#ifndef ENOMEM
1736#define ENOMEM 12
1737#endif
1738
1739#ifndef EINVAL
1740#define EINVAL 22
1741#endif
1742
1743int
1744posix_memalign (memptr, alignment, size)
1745 __ptr_t *memptr;
1746 __malloc_size_t alignment;
1747 __malloc_size_t size;
1748{
1749 __ptr_t mem;
1750
1751 if (alignment == 0
1752 || alignment % sizeof (__ptr_t) != 0
1753 || (alignment & (alignment - 1)) != 0)
1754 return EINVAL;
1755
1756 mem = memalign (alignment, size);
1757 if (mem == NULL)
1758 return ENOMEM;
1759
1760 *memptr = mem;
1761
1762 return 0;
1763}
1764
74ad5c7f
KH
1765/* Allocate memory on a page boundary.
1766 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1767
1768This library is free software; you can redistribute it and/or
423a1f3c 1769modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1770published by the Free Software Foundation; either version 2 of the
1771License, or (at your option) any later version.
1772
1773This library is distributed in the hope that it will be useful,
1774but WITHOUT ANY WARRANTY; without even the implied warranty of
1775MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1776General Public License for more details.
74ad5c7f 1777
423a1f3c
JB
1778You should have received a copy of the GNU General Public
1779License along with this library; see the file COPYING. If
3ef97fb6
LK
1780not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1781Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1782
1783 The author may be reached (Email) at the address mike@ai.mit.edu,
1784 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1785
1786#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1787
1788/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1789 on MSDOS, where it conflicts with a system header file. */
1790
1791#define ELIDE_VALLOC
1792
1793#endif
1794
1795#ifndef ELIDE_VALLOC
1796
1797#if defined (__GNU_LIBRARY__) || defined (_LIBC)
1798#include <stddef.h>
1799#include <sys/cdefs.h>
47582ab3
KH
1800#if defined (__GLIBC__) && __GLIBC__ >= 2
1801/* __getpagesize is already declared in <unistd.h> with return type int */
1802#else
0a27e8ed 1803extern size_t __getpagesize PP ((void));
47582ab3 1804#endif
74ad5c7f
KH
1805#else
1806#include "getpagesize.h"
1807#define __getpagesize() getpagesize()
1808#endif
1809
1810#ifndef _MALLOC_INTERNAL
1811#define _MALLOC_INTERNAL
1812#include <malloc.h>
1813#endif
1814
1815static __malloc_size_t pagesize;
1816
1817__ptr_t
1818valloc (size)
1819 __malloc_size_t size;
1820{
1821 if (pagesize == 0)
1822 pagesize = __getpagesize ();
1823
1824 return memalign (pagesize, size);
1825}
1826
1827#endif /* Not ELIDE_VALLOC. */
a3ba27da
GM
1828
1829#ifdef GC_MCHECK
1830
1831/* Standard debugging hooks for `malloc'.
1832 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1833 Written May 1989 by Mike Haertel.
1834
1835This library is free software; you can redistribute it and/or
423a1f3c 1836modify it under the terms of the GNU General Public License as
a3ba27da
GM
1837published by the Free Software Foundation; either version 2 of the
1838License, or (at your option) any later version.
1839
1840This library is distributed in the hope that it will be useful,
1841but WITHOUT ANY WARRANTY; without even the implied warranty of
1842MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1843General Public License for more details.
a3ba27da 1844
423a1f3c
JB
1845You should have received a copy of the GNU General Public
1846License along with this library; see the file COPYING. If
3ef97fb6
LK
1847not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1848Fifth Floor, Boston, MA 02110-1301, USA.
a3ba27da
GM
1849
1850 The author may be reached (Email) at the address mike@ai.mit.edu,
1851 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1852
1853#ifdef emacs
1854#include <stdio.h>
1855#else
1856#ifndef _MALLOC_INTERNAL
1857#define _MALLOC_INTERNAL
1858#include <malloc.h>
1859#include <stdio.h>
1860#endif
1861#endif
1862
1863/* Old hook values. */
f57e2426
J
1864static void (*old_free_hook) (__ptr_t ptr);
1865static __ptr_t (*old_malloc_hook) (__malloc_size_t size);
1866static __ptr_t (*old_realloc_hook) (__ptr_t ptr, __malloc_size_t size);
a3ba27da
GM
1867
1868/* Function to call when something awful happens. */
f57e2426 1869static void (*abortfunc) (enum mcheck_status);
a3ba27da
GM
1870
1871/* Arbitrary magical numbers. */
1872#define MAGICWORD 0xfedabeeb
1873#define MAGICFREE 0xd8675309
1874#define MAGICBYTE ((char) 0xd7)
1875#define MALLOCFLOOD ((char) 0x93)
1876#define FREEFLOOD ((char) 0x95)
1877
1878struct hdr
1879 {
1880 __malloc_size_t size; /* Exact size requested by user. */
1881 unsigned long int magic; /* Magic number to check header integrity. */
1882 };
1883
1884#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1885#define flood memset
1886#else
f57e2426 1887static void flood (__ptr_t, int, __malloc_size_t);
a3ba27da
GM
1888static void
1889flood (ptr, val, size)
1890 __ptr_t ptr;
1891 int val;
1892 __malloc_size_t size;
1893{
1894 char *cp = ptr;
1895 while (size--)
1896 *cp++ = val;
1897}
1898#endif
1899
f57e2426 1900static enum mcheck_status checkhdr (const struct hdr *);
a3ba27da
GM
1901static enum mcheck_status
1902checkhdr (hdr)
1903 const struct hdr *hdr;
1904{
1905 enum mcheck_status status;
1906 switch (hdr->magic)
1907 {
1908 default:
1909 status = MCHECK_HEAD;
1910 break;
1911 case MAGICFREE:
1912 status = MCHECK_FREE;
1913 break;
1914 case MAGICWORD:
1915 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1916 status = MCHECK_TAIL;
1917 else
1918 status = MCHECK_OK;
1919 break;
1920 }
1921 if (status != MCHECK_OK)
1922 (*abortfunc) (status);
1923 return status;
1924}
1925
f57e2426 1926static void freehook (__ptr_t);
a3ba27da
GM
1927static void
1928freehook (ptr)
1929 __ptr_t ptr;
1930{
1931 struct hdr *hdr;
177c0ea7 1932
a3ba27da
GM
1933 if (ptr)
1934 {
1935 hdr = ((struct hdr *) ptr) - 1;
1936 checkhdr (hdr);
1937 hdr->magic = MAGICFREE;
1938 flood (ptr, FREEFLOOD, hdr->size);
1939 }
1940 else
1941 hdr = NULL;
177c0ea7 1942
a3ba27da
GM
1943 __free_hook = old_free_hook;
1944 free (hdr);
1945 __free_hook = freehook;
1946}
1947
f57e2426 1948static __ptr_t mallochook (__malloc_size_t);
a3ba27da
GM
1949static __ptr_t
1950mallochook (size)
1951 __malloc_size_t size;
1952{
1953 struct hdr *hdr;
1954
1955 __malloc_hook = old_malloc_hook;
1956 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
1957 __malloc_hook = mallochook;
1958 if (hdr == NULL)
1959 return NULL;
1960
1961 hdr->size = size;
1962 hdr->magic = MAGICWORD;
1963 ((char *) &hdr[1])[size] = MAGICBYTE;
1964 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
1965 return (__ptr_t) (hdr + 1);
1966}
1967
f57e2426 1968static __ptr_t reallochook (__ptr_t, __malloc_size_t);
a3ba27da
GM
1969static __ptr_t
1970reallochook (ptr, size)
1971 __ptr_t ptr;
1972 __malloc_size_t size;
1973{
1974 struct hdr *hdr = NULL;
1975 __malloc_size_t osize = 0;
177c0ea7 1976
a3ba27da
GM
1977 if (ptr)
1978 {
1979 hdr = ((struct hdr *) ptr) - 1;
1980 osize = hdr->size;
1981
1982 checkhdr (hdr);
1983 if (size < osize)
1984 flood ((char *) ptr + size, FREEFLOOD, osize - size);
1985 }
177c0ea7 1986
a3ba27da
GM
1987 __free_hook = old_free_hook;
1988 __malloc_hook = old_malloc_hook;
1989 __realloc_hook = old_realloc_hook;
1990 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
1991 __free_hook = freehook;
1992 __malloc_hook = mallochook;
1993 __realloc_hook = reallochook;
1994 if (hdr == NULL)
1995 return NULL;
1996
1997 hdr->size = size;
1998 hdr->magic = MAGICWORD;
1999 ((char *) &hdr[1])[size] = MAGICBYTE;
2000 if (size > osize)
2001 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2002 return (__ptr_t) (hdr + 1);
2003}
2004
2005static void
2006mabort (status)
2007 enum mcheck_status status;
2008{
2009 const char *msg;
2010 switch (status)
2011 {
2012 case MCHECK_OK:
2013 msg = "memory is consistent, library is buggy";
2014 break;
2015 case MCHECK_HEAD:
2016 msg = "memory clobbered before allocated block";
2017 break;
2018 case MCHECK_TAIL:
2019 msg = "memory clobbered past end of allocated block";
2020 break;
2021 case MCHECK_FREE:
2022 msg = "block freed twice";
2023 break;
2024 default:
2025 msg = "bogus mcheck_status, library is buggy";
2026 break;
2027 }
2028#ifdef __GNU_LIBRARY__
2029 __libc_fatal (msg);
2030#else
2031 fprintf (stderr, "mcheck: %s\n", msg);
2032 fflush (stderr);
2033 abort ();
2034#endif
2035}
2036
2037static int mcheck_used = 0;
2038
2039int
2040mcheck (func)
f57e2426 2041 void (*func) (enum mcheck_status);
a3ba27da
GM
2042{
2043 abortfunc = (func != NULL) ? func : &mabort;
2044
2045 /* These hooks may not be safely inserted if malloc is already in use. */
2046 if (!__malloc_initialized && !mcheck_used)
2047 {
2048 old_free_hook = __free_hook;
2049 __free_hook = freehook;
2050 old_malloc_hook = __malloc_hook;
2051 __malloc_hook = mallochook;
2052 old_realloc_hook = __realloc_hook;
2053 __realloc_hook = reallochook;
2054 mcheck_used = 1;
2055 }
2056
2057 return mcheck_used ? 0 : -1;
2058}
2059
2060enum mcheck_status
2061mprobe (__ptr_t ptr)
2062{
2063 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2064}
2065
2066#endif /* GC_MCHECK */