Fix memory allocation problems in Cygwin build (Bug#9273).
[bpt/emacs.git] / src / gmalloc.c
1 /* This file is no longer automatically generated from libc. */
2
3 #define _MALLOC_INTERNAL
4
5 /* The malloc headers and source files from the C library follow here. */
6
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
11
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
16
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
21
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
26
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
29
30 #ifndef _MALLOC_H
31
32 #define _MALLOC_H 1
33
34 #ifdef _MALLOC_INTERNAL
35
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
39
40 #ifdef HAVE_PTHREAD
41 #define USE_PTHREAD
42 #endif
43
44 #undef PP
45 #define PP(args) args
46 #undef __ptr_t
47 #define __ptr_t void *
48
49 #include <string.h>
50 #include <limits.h>
51 #include <unistd.h>
52
53 #ifdef USE_PTHREAD
54 #include <pthread.h>
55 #endif
56
57 #endif /* _MALLOC_INTERNAL. */
58
59
60 #ifdef __cplusplus
61 extern "C"
62 {
63 #endif
64
65 #include <stddef.h>
66 #define __malloc_size_t size_t
67 #define __malloc_ptrdiff_t ptrdiff_t
68
69
70 /* Allocate SIZE bytes of memory. */
71 extern __ptr_t malloc PP ((__malloc_size_t __size));
72 /* Re-allocate the previously allocated block
73 in __ptr_t, making the new block SIZE bytes long. */
74 extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
75 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
76 extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
77 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
78 extern void free PP ((__ptr_t __ptr));
79
80 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
81 #if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
82 extern __ptr_t memalign PP ((__malloc_size_t __alignment,
83 __malloc_size_t __size));
84 extern int posix_memalign PP ((__ptr_t *, __malloc_size_t,
85 __malloc_size_t size));
86 #endif
87
88 /* Allocate SIZE bytes on a page boundary. */
89 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
90 extern __ptr_t valloc PP ((__malloc_size_t __size));
91 #endif
92
93 #ifdef USE_PTHREAD
94 /* Set up mutexes and make malloc etc. thread-safe. */
95 extern void malloc_enable_thread PP ((void));
96 #endif
97
98 #ifdef _MALLOC_INTERNAL
99
100 /* The allocator divides the heap into blocks of fixed size; large
101 requests receive one or more whole blocks, and small requests
102 receive a fragment of a block. Fragment sizes are powers of two,
103 and all fragments of a block are the same size. When all the
104 fragments in a block have been freed, the block itself is freed. */
105 #define INT_BIT (CHAR_BIT * sizeof(int))
106 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
107 #define BLOCKSIZE (1 << BLOCKLOG)
108 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
109
110 /* Determine the amount of memory spanned by the initial heap table
111 (not an absolute limit). */
112 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
113
114 /* Number of contiguous free blocks allowed to build up at the end of
115 memory before they will be returned to the system. */
116 #define FINAL_FREE_BLOCKS 8
117
118 /* Data structure giving per-block information. */
119 typedef union
120 {
121 /* Heap information for a busy block. */
122 struct
123 {
124 /* Zero for a large (multiblock) object, or positive giving the
125 logarithm to the base two of the fragment size. */
126 int type;
127 union
128 {
129 struct
130 {
131 __malloc_size_t nfree; /* Free frags in a fragmented block. */
132 __malloc_size_t first; /* First free fragment of the block. */
133 } frag;
134 /* For a large object, in its first block, this has the number
135 of blocks in the object. In the other blocks, this has a
136 negative number which says how far back the first block is. */
137 __malloc_ptrdiff_t size;
138 } info;
139 } busy;
140 /* Heap information for a free block
141 (that may be the first of a free cluster). */
142 struct
143 {
144 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
145 __malloc_size_t next; /* Index of next free cluster. */
146 __malloc_size_t prev; /* Index of previous free cluster. */
147 } free;
148 } malloc_info;
149
150 /* Pointer to first block of the heap. */
151 extern char *_heapbase;
152
153 /* Table indexed by block number giving per-block information. */
154 extern malloc_info *_heapinfo;
155
156 /* Address to block number and vice versa. */
157 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
158 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
159
160 /* Current search index for the heap table. */
161 extern __malloc_size_t _heapindex;
162
163 /* Limit of valid info table indices. */
164 extern __malloc_size_t _heaplimit;
165
166 /* Doubly linked lists of free fragments. */
167 struct list
168 {
169 struct list *next;
170 struct list *prev;
171 };
172
173 /* Free list headers for each fragment size. */
174 extern struct list _fraghead[];
175
176 /* List of blocks allocated with `memalign' (or `valloc'). */
177 struct alignlist
178 {
179 struct alignlist *next;
180 __ptr_t aligned; /* The address that memaligned returned. */
181 __ptr_t exact; /* The address that malloc returned. */
182 };
183 extern struct alignlist *_aligned_blocks;
184
185 /* Instrumentation. */
186 extern __malloc_size_t _chunks_used;
187 extern __malloc_size_t _bytes_used;
188 extern __malloc_size_t _chunks_free;
189 extern __malloc_size_t _bytes_free;
190
191 /* Internal versions of `malloc', `realloc', and `free'
192 used when these functions need to call each other.
193 They are the same but don't call the hooks. */
194 extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
195 extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
196 extern void _free_internal PP ((__ptr_t __ptr));
197 extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
198 extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
199 extern void _free_internal_nolock PP ((__ptr_t __ptr));
200
201 #ifdef USE_PTHREAD
202 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
203 extern int _malloc_thread_enabled_p;
204 #define LOCK() \
205 do { \
206 if (_malloc_thread_enabled_p) \
207 pthread_mutex_lock (&_malloc_mutex); \
208 } while (0)
209 #define UNLOCK() \
210 do { \
211 if (_malloc_thread_enabled_p) \
212 pthread_mutex_unlock (&_malloc_mutex); \
213 } while (0)
214 #define LOCK_ALIGNED_BLOCKS() \
215 do { \
216 if (_malloc_thread_enabled_p) \
217 pthread_mutex_lock (&_aligned_blocks_mutex); \
218 } while (0)
219 #define UNLOCK_ALIGNED_BLOCKS() \
220 do { \
221 if (_malloc_thread_enabled_p) \
222 pthread_mutex_unlock (&_aligned_blocks_mutex); \
223 } while (0)
224 #else
225 #define LOCK()
226 #define UNLOCK()
227 #define LOCK_ALIGNED_BLOCKS()
228 #define UNLOCK_ALIGNED_BLOCKS()
229 #endif
230
231 #endif /* _MALLOC_INTERNAL. */
232
233 /* Given an address in the middle of a malloc'd object,
234 return the address of the beginning of the object. */
235 extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
236
237 /* Underlying allocation function; successive calls should
238 return contiguous pieces of memory. */
239 extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
240
241 /* Default value of `__morecore'. */
242 extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
243
244 /* If not NULL, this function is called after each time
245 `__morecore' is called to increase the data size. */
246 extern void (*__after_morecore_hook) PP ((void));
247
248 /* Number of extra blocks to get each time we ask for more core.
249 This reduces the frequency of calling `(*__morecore)'. */
250 extern __malloc_size_t __malloc_extra_blocks;
251
252 /* Nonzero if `malloc' has been called and done its initialization. */
253 extern int __malloc_initialized;
254 /* Function called to initialize malloc data structures. */
255 extern int __malloc_initialize PP ((void));
256
257 /* Hooks for debugging versions. */
258 extern void (*__malloc_initialize_hook) PP ((void));
259 extern void (*__free_hook) PP ((__ptr_t __ptr));
260 extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
261 extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
262 extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
263 __malloc_size_t __alignment));
264
265 /* Return values for `mprobe': these are the kinds of inconsistencies that
266 `mcheck' enables detection of. */
267 enum mcheck_status
268 {
269 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
270 MCHECK_OK, /* Block is fine. */
271 MCHECK_FREE, /* Block freed twice. */
272 MCHECK_HEAD, /* Memory before the block was clobbered. */
273 MCHECK_TAIL /* Memory after the block was clobbered. */
274 };
275
276 /* Activate a standard collection of debugging hooks. This must be called
277 before `malloc' is ever called. ABORTFUNC is called with an error code
278 (see enum above) when an inconsistency is detected. If ABORTFUNC is
279 null, the standard function prints on stderr and then calls `abort'. */
280 extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
281
282 /* Check for aberrations in a particular malloc'd block. You must have
283 called `mcheck' already. These are the same checks that `mcheck' does
284 when you free or reallocate a block. */
285 extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
286
287 /* Activate a standard collection of tracing hooks. */
288 extern void mtrace PP ((void));
289 extern void muntrace PP ((void));
290
291 /* Statistics available to the user. */
292 struct mstats
293 {
294 __malloc_size_t bytes_total; /* Total size of the heap. */
295 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
296 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
297 __malloc_size_t chunks_free; /* Chunks in the free list. */
298 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
299 };
300
301 /* Pick up the current statistics. */
302 extern struct mstats mstats PP ((void));
303
304 /* Call WARNFUN with a warning message when memory usage is high. */
305 extern void memory_warnings PP ((__ptr_t __start,
306 void (*__warnfun) PP ((const char *))));
307
308
309 /* Relocating allocator. */
310
311 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
312 extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
313
314 /* Free the storage allocated in HANDLEPTR. */
315 extern void r_alloc_free PP ((__ptr_t *__handleptr));
316
317 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
318 extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
319
320
321 #ifdef __cplusplus
322 }
323 #endif
324
325 #endif /* malloc.h */
326 /* Memory allocator `malloc'.
327 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
328 Written May 1989 by Mike Haertel.
329
330 This library is free software; you can redistribute it and/or
331 modify it under the terms of the GNU General Public License as
332 published by the Free Software Foundation; either version 2 of the
333 License, or (at your option) any later version.
334
335 This library is distributed in the hope that it will be useful,
336 but WITHOUT ANY WARRANTY; without even the implied warranty of
337 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
338 General Public License for more details.
339
340 You should have received a copy of the GNU General Public
341 License along with this library; see the file COPYING. If
342 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
343 Fifth Floor, Boston, MA 02110-1301, USA.
344
345 The author may be reached (Email) at the address mike@ai.mit.edu,
346 or (US mail) as Mike Haertel c/o Free Software Foundation. */
347
348 #ifndef _MALLOC_INTERNAL
349 #define _MALLOC_INTERNAL
350 #include <malloc.h>
351 #endif
352 #include <errno.h>
353
354 /* On Cygwin there are two heaps. temacs uses the static heap
355 (defined in sheap.c and managed with bss_sbrk), and the dumped
356 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
357 on Cygwin, it reinitializes malloc, and we save the old info for
358 use by free and realloc if they're called with a pointer into the
359 static heap. */
360 #ifdef CYGWIN
361 extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
362 extern int bss_sbrk_did_unexec;
363 char *bss_sbrk_heapbase; /* _heapbase for static heap */
364 malloc_info *bss_sbrk_heapinfo; /* _heapinfo for static heap */
365 #endif
366 __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
367
368 /* Debugging hook for `malloc'. */
369 __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
370
371 /* Pointer to the base of the first block. */
372 char *_heapbase;
373
374 /* Block information table. Allocated with align/__free (not malloc/free). */
375 malloc_info *_heapinfo;
376
377 /* Number of info entries. */
378 static __malloc_size_t heapsize;
379
380 /* Search index in the info table. */
381 __malloc_size_t _heapindex;
382
383 /* Limit of valid info table indices. */
384 __malloc_size_t _heaplimit;
385
386 /* Free lists for each fragment size. */
387 struct list _fraghead[BLOCKLOG];
388
389 /* Instrumentation. */
390 __malloc_size_t _chunks_used;
391 __malloc_size_t _bytes_used;
392 __malloc_size_t _chunks_free;
393 __malloc_size_t _bytes_free;
394
395 /* Are you experienced? */
396 int __malloc_initialized;
397
398 __malloc_size_t __malloc_extra_blocks;
399
400 void (*__malloc_initialize_hook) PP ((void));
401 void (*__after_morecore_hook) PP ((void));
402
403 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
404
405 /* Some code for hunting a bug writing into _heapinfo.
406
407 Call this macro with argument PROT non-zero to protect internal
408 malloc state against writing to it, call it with a zero argument to
409 make it readable and writable.
410
411 Note that this only works if BLOCKSIZE == page size, which is
412 the case on the i386. */
413
414 #include <sys/types.h>
415 #include <sys/mman.h>
416
417 static int state_protected_p;
418 static __malloc_size_t last_state_size;
419 static malloc_info *last_heapinfo;
420
421 void
422 protect_malloc_state (protect_p)
423 int protect_p;
424 {
425 /* If _heapinfo has been relocated, make sure its old location
426 isn't left read-only; it will be reused by malloc. */
427 if (_heapinfo != last_heapinfo
428 && last_heapinfo
429 && state_protected_p)
430 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
431
432 last_state_size = _heaplimit * sizeof *_heapinfo;
433 last_heapinfo = _heapinfo;
434
435 if (protect_p != state_protected_p)
436 {
437 state_protected_p = protect_p;
438 if (mprotect (_heapinfo, last_state_size,
439 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
440 abort ();
441 }
442 }
443
444 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
445
446 #else
447 #define PROTECT_MALLOC_STATE(PROT) /* empty */
448 #endif
449
450
451 /* Aligned allocation. */
452 static __ptr_t align PP ((__malloc_size_t));
453 static __ptr_t
454 align (size)
455 __malloc_size_t size;
456 {
457 __ptr_t result;
458 unsigned long int adj;
459
460 /* align accepts an unsigned argument, but __morecore accepts a
461 signed one. This could lead to trouble if SIZE overflows a
462 signed int type accepted by __morecore. We just punt in that
463 case, since they are requesting a ludicrous amount anyway. */
464 if ((__malloc_ptrdiff_t)size < 0)
465 result = 0;
466 else
467 result = (*__morecore) (size);
468 adj = (unsigned long int) ((unsigned long int) ((char *) result -
469 (char *) NULL)) % BLOCKSIZE;
470 if (adj != 0)
471 {
472 __ptr_t new;
473 adj = BLOCKSIZE - adj;
474 new = (*__morecore) (adj);
475 result = (char *) result + adj;
476 }
477
478 if (__after_morecore_hook)
479 (*__after_morecore_hook) ();
480
481 return result;
482 }
483
484 /* Get SIZE bytes, if we can get them starting at END.
485 Return the address of the space we got.
486 If we cannot get space at END, fail and return 0. */
487 static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
488 static __ptr_t
489 get_contiguous_space (size, position)
490 __malloc_ptrdiff_t size;
491 __ptr_t position;
492 {
493 __ptr_t before;
494 __ptr_t after;
495
496 before = (*__morecore) (0);
497 /* If we can tell in advance that the break is at the wrong place,
498 fail now. */
499 if (before != position)
500 return 0;
501
502 /* Allocate SIZE bytes and get the address of them. */
503 after = (*__morecore) (size);
504 if (!after)
505 return 0;
506
507 /* It was not contiguous--reject it. */
508 if (after != position)
509 {
510 (*__morecore) (- size);
511 return 0;
512 }
513
514 return after;
515 }
516
517
518 /* This is called when `_heapinfo' and `heapsize' have just
519 been set to describe a new info table. Set up the table
520 to describe itself and account for it in the statistics. */
521 static inline void
522 register_heapinfo (void)
523 {
524 __malloc_size_t block, blocks;
525
526 block = BLOCK (_heapinfo);
527 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
528
529 /* Account for the _heapinfo block itself in the statistics. */
530 _bytes_used += blocks * BLOCKSIZE;
531 ++_chunks_used;
532
533 /* Describe the heapinfo block itself in the heapinfo. */
534 _heapinfo[block].busy.type = 0;
535 _heapinfo[block].busy.info.size = blocks;
536 /* Leave back-pointers for malloc_find_address. */
537 while (--blocks > 0)
538 _heapinfo[block + blocks].busy.info.size = -blocks;
539 }
540
541 #ifdef USE_PTHREAD
542 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
543 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
544 int _malloc_thread_enabled_p;
545
546 static void
547 malloc_atfork_handler_prepare ()
548 {
549 LOCK ();
550 LOCK_ALIGNED_BLOCKS ();
551 }
552
553 static void
554 malloc_atfork_handler_parent ()
555 {
556 UNLOCK_ALIGNED_BLOCKS ();
557 UNLOCK ();
558 }
559
560 static void
561 malloc_atfork_handler_child ()
562 {
563 UNLOCK_ALIGNED_BLOCKS ();
564 UNLOCK ();
565 }
566
567 /* Set up mutexes and make malloc etc. thread-safe. */
568 void
569 malloc_enable_thread ()
570 {
571 if (_malloc_thread_enabled_p)
572 return;
573
574 /* Some pthread implementations call malloc for statically
575 initialized mutexes when they are used first. To avoid such a
576 situation, we initialize mutexes here while their use is
577 disabled in malloc etc. */
578 pthread_mutex_init (&_malloc_mutex, NULL);
579 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
580 pthread_atfork (malloc_atfork_handler_prepare,
581 malloc_atfork_handler_parent,
582 malloc_atfork_handler_child);
583 _malloc_thread_enabled_p = 1;
584 }
585 #endif
586
587 static void
588 malloc_initialize_1 ()
589 {
590 #ifdef GC_MCHECK
591 mcheck (NULL);
592 #endif
593
594 #ifdef CYGWIN
595 if (bss_sbrk_did_unexec)
596 /* we're reinitializing the dumped emacs */
597 {
598 bss_sbrk_heapbase = _heapbase;
599 bss_sbrk_heapinfo = _heapinfo;
600 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
601 }
602 #endif
603
604 if (__malloc_initialize_hook)
605 (*__malloc_initialize_hook) ();
606
607 heapsize = HEAP / BLOCKSIZE;
608 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
609 if (_heapinfo == NULL)
610 return;
611 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
612 _heapinfo[0].free.size = 0;
613 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
614 _heapindex = 0;
615 _heapbase = (char *) _heapinfo;
616 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
617
618 register_heapinfo ();
619
620 __malloc_initialized = 1;
621 PROTECT_MALLOC_STATE (1);
622 return;
623 }
624
625 /* Set everything up and remember that we have.
626 main will call malloc which calls this function. That is before any threads
627 or signal handlers has been set up, so we don't need thread protection. */
628 int
629 __malloc_initialize ()
630 {
631 if (__malloc_initialized)
632 return 0;
633
634 malloc_initialize_1 ();
635
636 return __malloc_initialized;
637 }
638
639 static int morecore_recursing;
640
641 /* Get neatly aligned memory, initializing or
642 growing the heap info table as necessary. */
643 static __ptr_t morecore_nolock PP ((__malloc_size_t));
644 static __ptr_t
645 morecore_nolock (size)
646 __malloc_size_t size;
647 {
648 __ptr_t result;
649 malloc_info *newinfo, *oldinfo;
650 __malloc_size_t newsize;
651
652 if (morecore_recursing)
653 /* Avoid recursion. The caller will know how to handle a null return. */
654 return NULL;
655
656 result = align (size);
657 if (result == NULL)
658 return NULL;
659
660 PROTECT_MALLOC_STATE (0);
661
662 /* Check if we need to grow the info table. */
663 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
664 {
665 /* Calculate the new _heapinfo table size. We do not account for the
666 added blocks in the table itself, as we hope to place them in
667 existing free space, which is already covered by part of the
668 existing table. */
669 newsize = heapsize;
670 do
671 newsize *= 2;
672 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
673
674 /* We must not reuse existing core for the new info table when called
675 from realloc in the case of growing a large block, because the
676 block being grown is momentarily marked as free. In this case
677 _heaplimit is zero so we know not to reuse space for internal
678 allocation. */
679 if (_heaplimit != 0)
680 {
681 /* First try to allocate the new info table in core we already
682 have, in the usual way using realloc. If realloc cannot
683 extend it in place or relocate it to existing sufficient core,
684 we will get called again, and the code above will notice the
685 `morecore_recursing' flag and return null. */
686 int save = errno; /* Don't want to clobber errno with ENOMEM. */
687 morecore_recursing = 1;
688 newinfo = (malloc_info *) _realloc_internal_nolock
689 (_heapinfo, newsize * sizeof (malloc_info));
690 morecore_recursing = 0;
691 if (newinfo == NULL)
692 errno = save;
693 else
694 {
695 /* We found some space in core, and realloc has put the old
696 table's blocks on the free list. Now zero the new part
697 of the table and install the new table location. */
698 memset (&newinfo[heapsize], 0,
699 (newsize - heapsize) * sizeof (malloc_info));
700 _heapinfo = newinfo;
701 heapsize = newsize;
702 goto got_heap;
703 }
704 }
705
706 /* Allocate new space for the malloc info table. */
707 while (1)
708 {
709 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
710
711 /* Did it fail? */
712 if (newinfo == NULL)
713 {
714 (*__morecore) (-size);
715 return NULL;
716 }
717
718 /* Is it big enough to record status for its own space?
719 If so, we win. */
720 if ((__malloc_size_t) BLOCK ((char *) newinfo
721 + newsize * sizeof (malloc_info))
722 < newsize)
723 break;
724
725 /* Must try again. First give back most of what we just got. */
726 (*__morecore) (- newsize * sizeof (malloc_info));
727 newsize *= 2;
728 }
729
730 /* Copy the old table to the beginning of the new,
731 and zero the rest of the new table. */
732 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
733 memset (&newinfo[heapsize], 0,
734 (newsize - heapsize) * sizeof (malloc_info));
735 oldinfo = _heapinfo;
736 _heapinfo = newinfo;
737 heapsize = newsize;
738
739 register_heapinfo ();
740
741 /* Reset _heaplimit so _free_internal never decides
742 it can relocate or resize the info table. */
743 _heaplimit = 0;
744 _free_internal_nolock (oldinfo);
745 PROTECT_MALLOC_STATE (0);
746
747 /* The new heap limit includes the new table just allocated. */
748 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
749 return result;
750 }
751
752 got_heap:
753 _heaplimit = BLOCK ((char *) result + size);
754 return result;
755 }
756
757 /* Allocate memory from the heap. */
758 __ptr_t
759 _malloc_internal_nolock (size)
760 __malloc_size_t size;
761 {
762 __ptr_t result;
763 __malloc_size_t block, blocks, lastblocks, start;
764 register __malloc_size_t i;
765 struct list *next;
766
767 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
768 valid address you can realloc and free (though not dereference).
769
770 It turns out that some extant code (sunrpc, at least Ultrix's version)
771 expects `malloc (0)' to return non-NULL and breaks otherwise.
772 Be compatible. */
773
774 #if 0
775 if (size == 0)
776 return NULL;
777 #endif
778
779 PROTECT_MALLOC_STATE (0);
780
781 if (size < sizeof (struct list))
782 size = sizeof (struct list);
783
784 /* Determine the allocation policy based on the request size. */
785 if (size <= BLOCKSIZE / 2)
786 {
787 /* Small allocation to receive a fragment of a block.
788 Determine the logarithm to base two of the fragment size. */
789 register __malloc_size_t log = 1;
790 --size;
791 while ((size /= 2) != 0)
792 ++log;
793
794 /* Look in the fragment lists for a
795 free fragment of the desired size. */
796 next = _fraghead[log].next;
797 if (next != NULL)
798 {
799 /* There are free fragments of this size.
800 Pop a fragment out of the fragment list and return it.
801 Update the block's nfree and first counters. */
802 result = (__ptr_t) next;
803 next->prev->next = next->next;
804 if (next->next != NULL)
805 next->next->prev = next->prev;
806 block = BLOCK (result);
807 if (--_heapinfo[block].busy.info.frag.nfree != 0)
808 _heapinfo[block].busy.info.frag.first = (unsigned long int)
809 ((unsigned long int) ((char *) next->next - (char *) NULL)
810 % BLOCKSIZE) >> log;
811
812 /* Update the statistics. */
813 ++_chunks_used;
814 _bytes_used += 1 << log;
815 --_chunks_free;
816 _bytes_free -= 1 << log;
817 }
818 else
819 {
820 /* No free fragments of the desired size, so get a new block
821 and break it into fragments, returning the first. */
822 #ifdef GC_MALLOC_CHECK
823 result = _malloc_internal_nolock (BLOCKSIZE);
824 PROTECT_MALLOC_STATE (0);
825 #elif defined (USE_PTHREAD)
826 result = _malloc_internal_nolock (BLOCKSIZE);
827 #else
828 result = malloc (BLOCKSIZE);
829 #endif
830 if (result == NULL)
831 {
832 PROTECT_MALLOC_STATE (1);
833 goto out;
834 }
835
836 /* Link all fragments but the first into the free list. */
837 next = (struct list *) ((char *) result + (1 << log));
838 next->next = NULL;
839 next->prev = &_fraghead[log];
840 _fraghead[log].next = next;
841
842 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
843 {
844 next = (struct list *) ((char *) result + (i << log));
845 next->next = _fraghead[log].next;
846 next->prev = &_fraghead[log];
847 next->prev->next = next;
848 next->next->prev = next;
849 }
850
851 /* Initialize the nfree and first counters for this block. */
852 block = BLOCK (result);
853 _heapinfo[block].busy.type = log;
854 _heapinfo[block].busy.info.frag.nfree = i - 1;
855 _heapinfo[block].busy.info.frag.first = i - 1;
856
857 _chunks_free += (BLOCKSIZE >> log) - 1;
858 _bytes_free += BLOCKSIZE - (1 << log);
859 _bytes_used -= BLOCKSIZE - (1 << log);
860 }
861 }
862 else
863 {
864 /* Large allocation to receive one or more blocks.
865 Search the free list in a circle starting at the last place visited.
866 If we loop completely around without finding a large enough
867 space we will have to get more memory from the system. */
868 blocks = BLOCKIFY (size);
869 start = block = _heapindex;
870 while (_heapinfo[block].free.size < blocks)
871 {
872 block = _heapinfo[block].free.next;
873 if (block == start)
874 {
875 /* Need to get more from the system. Get a little extra. */
876 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
877 block = _heapinfo[0].free.prev;
878 lastblocks = _heapinfo[block].free.size;
879 /* Check to see if the new core will be contiguous with the
880 final free block; if so we don't need to get as much. */
881 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
882 /* We can't do this if we will have to make the heap info
883 table bigger to accommodate the new space. */
884 block + wantblocks <= heapsize &&
885 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
886 ADDRESS (block + lastblocks)))
887 {
888 /* We got it contiguously. Which block we are extending
889 (the `final free block' referred to above) might have
890 changed, if it got combined with a freed info table. */
891 block = _heapinfo[0].free.prev;
892 _heapinfo[block].free.size += (wantblocks - lastblocks);
893 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
894 _heaplimit += wantblocks - lastblocks;
895 continue;
896 }
897 result = morecore_nolock (wantblocks * BLOCKSIZE);
898 if (result == NULL)
899 goto out;
900 block = BLOCK (result);
901 /* Put the new block at the end of the free list. */
902 _heapinfo[block].free.size = wantblocks;
903 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
904 _heapinfo[block].free.next = 0;
905 _heapinfo[0].free.prev = block;
906 _heapinfo[_heapinfo[block].free.prev].free.next = block;
907 ++_chunks_free;
908 /* Now loop to use some of that block for this allocation. */
909 }
910 }
911
912 /* At this point we have found a suitable free list entry.
913 Figure out how to remove what we need from the list. */
914 result = ADDRESS (block);
915 if (_heapinfo[block].free.size > blocks)
916 {
917 /* The block we found has a bit left over,
918 so relink the tail end back into the free list. */
919 _heapinfo[block + blocks].free.size
920 = _heapinfo[block].free.size - blocks;
921 _heapinfo[block + blocks].free.next
922 = _heapinfo[block].free.next;
923 _heapinfo[block + blocks].free.prev
924 = _heapinfo[block].free.prev;
925 _heapinfo[_heapinfo[block].free.prev].free.next
926 = _heapinfo[_heapinfo[block].free.next].free.prev
927 = _heapindex = block + blocks;
928 }
929 else
930 {
931 /* The block exactly matches our requirements,
932 so just remove it from the list. */
933 _heapinfo[_heapinfo[block].free.next].free.prev
934 = _heapinfo[block].free.prev;
935 _heapinfo[_heapinfo[block].free.prev].free.next
936 = _heapindex = _heapinfo[block].free.next;
937 --_chunks_free;
938 }
939
940 _heapinfo[block].busy.type = 0;
941 _heapinfo[block].busy.info.size = blocks;
942 ++_chunks_used;
943 _bytes_used += blocks * BLOCKSIZE;
944 _bytes_free -= blocks * BLOCKSIZE;
945
946 /* Mark all the blocks of the object just allocated except for the
947 first with a negative number so you can find the first block by
948 adding that adjustment. */
949 while (--blocks > 0)
950 _heapinfo[block + blocks].busy.info.size = -blocks;
951 }
952
953 PROTECT_MALLOC_STATE (1);
954 out:
955 return result;
956 }
957
958 __ptr_t
959 _malloc_internal (size)
960 __malloc_size_t size;
961 {
962 __ptr_t result;
963
964 LOCK ();
965 result = _malloc_internal_nolock (size);
966 UNLOCK ();
967
968 return result;
969 }
970
971 __ptr_t
972 malloc (size)
973 __malloc_size_t size;
974 {
975 __ptr_t (*hook) (__malloc_size_t);
976
977 if (!__malloc_initialized && !__malloc_initialize ())
978 return NULL;
979
980 /* Copy the value of __malloc_hook to an automatic variable in case
981 __malloc_hook is modified in another thread between its
982 NULL-check and the use.
983
984 Note: Strictly speaking, this is not a right solution. We should
985 use mutexes to access non-read-only variables that are shared
986 among multiple threads. We just leave it for compatibility with
987 glibc malloc (i.e., assignments to __malloc_hook) for now. */
988 hook = __malloc_hook;
989 return (hook != NULL ? *hook : _malloc_internal) (size);
990 }
991 \f
992 #ifndef _LIBC
993
994 /* On some ANSI C systems, some libc functions call _malloc, _free
995 and _realloc. Make them use the GNU functions. */
996
997 __ptr_t
998 _malloc (size)
999 __malloc_size_t size;
1000 {
1001 return malloc (size);
1002 }
1003
1004 void
1005 _free (ptr)
1006 __ptr_t ptr;
1007 {
1008 free (ptr);
1009 }
1010
1011 __ptr_t
1012 _realloc (ptr, size)
1013 __ptr_t ptr;
1014 __malloc_size_t size;
1015 {
1016 return realloc (ptr, size);
1017 }
1018
1019 #endif
1020 /* Free a block of memory allocated by `malloc'.
1021 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1022 Written May 1989 by Mike Haertel.
1023
1024 This library is free software; you can redistribute it and/or
1025 modify it under the terms of the GNU General Public License as
1026 published by the Free Software Foundation; either version 2 of the
1027 License, or (at your option) any later version.
1028
1029 This library is distributed in the hope that it will be useful,
1030 but WITHOUT ANY WARRANTY; without even the implied warranty of
1031 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1032 General Public License for more details.
1033
1034 You should have received a copy of the GNU General Public
1035 License along with this library; see the file COPYING. If
1036 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1037 Fifth Floor, Boston, MA 02110-1301, USA.
1038
1039 The author may be reached (Email) at the address mike@ai.mit.edu,
1040 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1041
1042 #ifndef _MALLOC_INTERNAL
1043 #define _MALLOC_INTERNAL
1044 #include <malloc.h>
1045 #endif
1046
1047
1048 /* Debugging hook for free. */
1049 void (*__free_hook) PP ((__ptr_t __ptr));
1050
1051 /* List of blocks allocated by memalign. */
1052 struct alignlist *_aligned_blocks = NULL;
1053
1054 /* Return memory to the heap.
1055 Like `_free_internal' but don't lock mutex. */
1056 void
1057 _free_internal_nolock (ptr)
1058 __ptr_t ptr;
1059 {
1060 int type;
1061 __malloc_size_t block, blocks;
1062 register __malloc_size_t i;
1063 struct list *prev, *next;
1064 __ptr_t curbrk;
1065 const __malloc_size_t lesscore_threshold
1066 /* Threshold of free space at which we will return some to the system. */
1067 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1068
1069 register struct alignlist *l;
1070
1071 if (ptr == NULL)
1072 return;
1073
1074 #ifdef CYGWIN
1075 if (ptr < _heapbase)
1076 /* We're being asked to free something in the static heap. */
1077 return;
1078 #endif
1079
1080 PROTECT_MALLOC_STATE (0);
1081
1082 LOCK_ALIGNED_BLOCKS ();
1083 for (l = _aligned_blocks; l != NULL; l = l->next)
1084 if (l->aligned == ptr)
1085 {
1086 l->aligned = NULL; /* Mark the slot in the list as free. */
1087 ptr = l->exact;
1088 break;
1089 }
1090 UNLOCK_ALIGNED_BLOCKS ();
1091
1092 block = BLOCK (ptr);
1093
1094 type = _heapinfo[block].busy.type;
1095 switch (type)
1096 {
1097 case 0:
1098 /* Get as many statistics as early as we can. */
1099 --_chunks_used;
1100 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1101 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1102
1103 /* Find the free cluster previous to this one in the free list.
1104 Start searching at the last block referenced; this may benefit
1105 programs with locality of allocation. */
1106 i = _heapindex;
1107 if (i > block)
1108 while (i > block)
1109 i = _heapinfo[i].free.prev;
1110 else
1111 {
1112 do
1113 i = _heapinfo[i].free.next;
1114 while (i > 0 && i < block);
1115 i = _heapinfo[i].free.prev;
1116 }
1117
1118 /* Determine how to link this block into the free list. */
1119 if (block == i + _heapinfo[i].free.size)
1120 {
1121 /* Coalesce this block with its predecessor. */
1122 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1123 block = i;
1124 }
1125 else
1126 {
1127 /* Really link this block back into the free list. */
1128 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1129 _heapinfo[block].free.next = _heapinfo[i].free.next;
1130 _heapinfo[block].free.prev = i;
1131 _heapinfo[i].free.next = block;
1132 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1133 ++_chunks_free;
1134 }
1135
1136 /* Now that the block is linked in, see if we can coalesce it
1137 with its successor (by deleting its successor from the list
1138 and adding in its size). */
1139 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1140 {
1141 _heapinfo[block].free.size
1142 += _heapinfo[_heapinfo[block].free.next].free.size;
1143 _heapinfo[block].free.next
1144 = _heapinfo[_heapinfo[block].free.next].free.next;
1145 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1146 --_chunks_free;
1147 }
1148
1149 /* How many trailing free blocks are there now? */
1150 blocks = _heapinfo[block].free.size;
1151
1152 /* Where is the current end of accessible core? */
1153 curbrk = (*__morecore) (0);
1154
1155 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1156 {
1157 /* The end of the malloc heap is at the end of accessible core.
1158 It's possible that moving _heapinfo will allow us to
1159 return some space to the system. */
1160
1161 __malloc_size_t info_block = BLOCK (_heapinfo);
1162 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1163 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1164 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1165 __malloc_size_t next_block = _heapinfo[block].free.next;
1166 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1167
1168 if (/* Win if this block being freed is last in core, the info table
1169 is just before it, the previous free block is just before the
1170 info table, and the two free blocks together form a useful
1171 amount to return to the system. */
1172 (block + blocks == _heaplimit &&
1173 info_block + info_blocks == block &&
1174 prev_block != 0 && prev_block + prev_blocks == info_block &&
1175 blocks + prev_blocks >= lesscore_threshold) ||
1176 /* Nope, not the case. We can also win if this block being
1177 freed is just before the info table, and the table extends
1178 to the end of core or is followed only by a free block,
1179 and the total free space is worth returning to the system. */
1180 (block + blocks == info_block &&
1181 ((info_block + info_blocks == _heaplimit &&
1182 blocks >= lesscore_threshold) ||
1183 (info_block + info_blocks == next_block &&
1184 next_block + next_blocks == _heaplimit &&
1185 blocks + next_blocks >= lesscore_threshold)))
1186 )
1187 {
1188 malloc_info *newinfo;
1189 __malloc_size_t oldlimit = _heaplimit;
1190
1191 /* Free the old info table, clearing _heaplimit to avoid
1192 recursion into this code. We don't want to return the
1193 table's blocks to the system before we have copied them to
1194 the new location. */
1195 _heaplimit = 0;
1196 _free_internal_nolock (_heapinfo);
1197 _heaplimit = oldlimit;
1198
1199 /* Tell malloc to search from the beginning of the heap for
1200 free blocks, so it doesn't reuse the ones just freed. */
1201 _heapindex = 0;
1202
1203 /* Allocate new space for the info table and move its data. */
1204 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1205 * BLOCKSIZE);
1206 PROTECT_MALLOC_STATE (0);
1207 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1208 _heapinfo = newinfo;
1209
1210 /* We should now have coalesced the free block with the
1211 blocks freed from the old info table. Examine the entire
1212 trailing free block to decide below whether to return some
1213 to the system. */
1214 block = _heapinfo[0].free.prev;
1215 blocks = _heapinfo[block].free.size;
1216 }
1217
1218 /* Now see if we can return stuff to the system. */
1219 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1220 {
1221 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1222 _heaplimit -= blocks;
1223 (*__morecore) (-bytes);
1224 _heapinfo[_heapinfo[block].free.prev].free.next
1225 = _heapinfo[block].free.next;
1226 _heapinfo[_heapinfo[block].free.next].free.prev
1227 = _heapinfo[block].free.prev;
1228 block = _heapinfo[block].free.prev;
1229 --_chunks_free;
1230 _bytes_free -= bytes;
1231 }
1232 }
1233
1234 /* Set the next search to begin at this block. */
1235 _heapindex = block;
1236 break;
1237
1238 default:
1239 /* Do some of the statistics. */
1240 --_chunks_used;
1241 _bytes_used -= 1 << type;
1242 ++_chunks_free;
1243 _bytes_free += 1 << type;
1244
1245 /* Get the address of the first free fragment in this block. */
1246 prev = (struct list *) ((char *) ADDRESS (block) +
1247 (_heapinfo[block].busy.info.frag.first << type));
1248
1249 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1250 {
1251 /* If all fragments of this block are free, remove them
1252 from the fragment list and free the whole block. */
1253 next = prev;
1254 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1255 next = next->next;
1256 prev->prev->next = next;
1257 if (next != NULL)
1258 next->prev = prev->prev;
1259 _heapinfo[block].busy.type = 0;
1260 _heapinfo[block].busy.info.size = 1;
1261
1262 /* Keep the statistics accurate. */
1263 ++_chunks_used;
1264 _bytes_used += BLOCKSIZE;
1265 _chunks_free -= BLOCKSIZE >> type;
1266 _bytes_free -= BLOCKSIZE;
1267
1268 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1269 _free_internal_nolock (ADDRESS (block));
1270 #else
1271 free (ADDRESS (block));
1272 #endif
1273 }
1274 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1275 {
1276 /* If some fragments of this block are free, link this
1277 fragment into the fragment list after the first free
1278 fragment of this block. */
1279 next = (struct list *) ptr;
1280 next->next = prev->next;
1281 next->prev = prev;
1282 prev->next = next;
1283 if (next->next != NULL)
1284 next->next->prev = next;
1285 ++_heapinfo[block].busy.info.frag.nfree;
1286 }
1287 else
1288 {
1289 /* No fragments of this block are free, so link this
1290 fragment into the fragment list and announce that
1291 it is the first free fragment of this block. */
1292 prev = (struct list *) ptr;
1293 _heapinfo[block].busy.info.frag.nfree = 1;
1294 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1295 ((unsigned long int) ((char *) ptr - (char *) NULL)
1296 % BLOCKSIZE >> type);
1297 prev->next = _fraghead[type].next;
1298 prev->prev = &_fraghead[type];
1299 prev->prev->next = prev;
1300 if (prev->next != NULL)
1301 prev->next->prev = prev;
1302 }
1303 break;
1304 }
1305
1306 PROTECT_MALLOC_STATE (1);
1307 }
1308
1309 /* Return memory to the heap.
1310 Like `free' but don't call a __free_hook if there is one. */
1311 void
1312 _free_internal (ptr)
1313 __ptr_t ptr;
1314 {
1315 LOCK ();
1316 _free_internal_nolock (ptr);
1317 UNLOCK ();
1318 }
1319
1320 /* Return memory to the heap. */
1321
1322 void
1323 free (ptr)
1324 __ptr_t ptr;
1325 {
1326 void (*hook) (__ptr_t) = __free_hook;
1327
1328 if (hook != NULL)
1329 (*hook) (ptr);
1330 else
1331 _free_internal (ptr);
1332 }
1333
1334 /* Define the `cfree' alias for `free'. */
1335 #ifdef weak_alias
1336 weak_alias (free, cfree)
1337 #else
1338 void
1339 cfree (ptr)
1340 __ptr_t ptr;
1341 {
1342 free (ptr);
1343 }
1344 #endif
1345 /* Change the size of a block allocated by `malloc'.
1346 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1347 Written May 1989 by Mike Haertel.
1348
1349 This library is free software; you can redistribute it and/or
1350 modify it under the terms of the GNU General Public License as
1351 published by the Free Software Foundation; either version 2 of the
1352 License, or (at your option) any later version.
1353
1354 This library is distributed in the hope that it will be useful,
1355 but WITHOUT ANY WARRANTY; without even the implied warranty of
1356 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1357 General Public License for more details.
1358
1359 You should have received a copy of the GNU General Public
1360 License along with this library; see the file COPYING. If
1361 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1362 Fifth Floor, Boston, MA 02110-1301, USA.
1363
1364 The author may be reached (Email) at the address mike@ai.mit.edu,
1365 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1366
1367 #ifndef _MALLOC_INTERNAL
1368 #define _MALLOC_INTERNAL
1369 #include <malloc.h>
1370 #endif
1371
1372
1373 #define min(A, B) ((A) < (B) ? (A) : (B))
1374
1375 /* On Cygwin the dumped emacs may try to realloc storage allocated in
1376 the static heap. We just malloc space in the new heap and copy the
1377 data. */
1378 #ifdef CYGWIN
1379 __ptr_t
1380 special_realloc (ptr, size)
1381 __ptr_t ptr;
1382 __malloc_size_t size;
1383 {
1384 __ptr_t result;
1385 int type;
1386 __malloc_size_t block, oldsize;
1387
1388 block = ((char *) ptr - bss_sbrk_heapbase) / BLOCKSIZE + 1;
1389 type = bss_sbrk_heapinfo[block].busy.type;
1390 oldsize =
1391 type == 0 ? bss_sbrk_heapinfo[block].busy.info.size * BLOCKSIZE
1392 : (__malloc_size_t) 1 << type;
1393 result = _malloc_internal_nolock (size);
1394 if (result != NULL)
1395 memcpy (result, ptr, min (oldsize, size));
1396 return result;
1397 }
1398 #endif
1399
1400 /* Debugging hook for realloc. */
1401 __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
1402
1403 /* Resize the given region to the new size, returning a pointer
1404 to the (possibly moved) region. This is optimized for speed;
1405 some benchmarks seem to indicate that greater compactness is
1406 achieved by unconditionally allocating and copying to a
1407 new region. This module has incestuous knowledge of the
1408 internals of both free and malloc. */
1409 __ptr_t
1410 _realloc_internal_nolock (ptr, size)
1411 __ptr_t ptr;
1412 __malloc_size_t size;
1413 {
1414 __ptr_t result;
1415 int type;
1416 __malloc_size_t block, blocks, oldlimit;
1417
1418 if (size == 0)
1419 {
1420 _free_internal_nolock (ptr);
1421 return _malloc_internal_nolock (0);
1422 }
1423 else if (ptr == NULL)
1424 return _malloc_internal_nolock (size);
1425
1426 #ifdef CYGWIN
1427 if (ptr < _heapbase)
1428 /* ptr points into the static heap */
1429 return special_realloc (ptr, size);
1430 #endif
1431
1432 block = BLOCK (ptr);
1433
1434 PROTECT_MALLOC_STATE (0);
1435
1436 type = _heapinfo[block].busy.type;
1437 switch (type)
1438 {
1439 case 0:
1440 /* Maybe reallocate a large block to a small fragment. */
1441 if (size <= BLOCKSIZE / 2)
1442 {
1443 result = _malloc_internal_nolock (size);
1444 if (result != NULL)
1445 {
1446 memcpy (result, ptr, size);
1447 _free_internal_nolock (ptr);
1448 goto out;
1449 }
1450 }
1451
1452 /* The new size is a large allocation as well;
1453 see if we can hold it in place. */
1454 blocks = BLOCKIFY (size);
1455 if (blocks < _heapinfo[block].busy.info.size)
1456 {
1457 /* The new size is smaller; return
1458 excess memory to the free list. */
1459 _heapinfo[block + blocks].busy.type = 0;
1460 _heapinfo[block + blocks].busy.info.size
1461 = _heapinfo[block].busy.info.size - blocks;
1462 _heapinfo[block].busy.info.size = blocks;
1463 /* We have just created a new chunk by splitting a chunk in two.
1464 Now we will free this chunk; increment the statistics counter
1465 so it doesn't become wrong when _free_internal decrements it. */
1466 ++_chunks_used;
1467 _free_internal_nolock (ADDRESS (block + blocks));
1468 result = ptr;
1469 }
1470 else if (blocks == _heapinfo[block].busy.info.size)
1471 /* No size change necessary. */
1472 result = ptr;
1473 else
1474 {
1475 /* Won't fit, so allocate a new region that will.
1476 Free the old region first in case there is sufficient
1477 adjacent free space to grow without moving. */
1478 blocks = _heapinfo[block].busy.info.size;
1479 /* Prevent free from actually returning memory to the system. */
1480 oldlimit = _heaplimit;
1481 _heaplimit = 0;
1482 _free_internal_nolock (ptr);
1483 result = _malloc_internal_nolock (size);
1484 PROTECT_MALLOC_STATE (0);
1485 if (_heaplimit == 0)
1486 _heaplimit = oldlimit;
1487 if (result == NULL)
1488 {
1489 /* Now we're really in trouble. We have to unfree
1490 the thing we just freed. Unfortunately it might
1491 have been coalesced with its neighbors. */
1492 if (_heapindex == block)
1493 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1494 else
1495 {
1496 __ptr_t previous
1497 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1498 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1499 _free_internal_nolock (previous);
1500 }
1501 goto out;
1502 }
1503 if (ptr != result)
1504 memmove (result, ptr, blocks * BLOCKSIZE);
1505 }
1506 break;
1507
1508 default:
1509 /* Old size is a fragment; type is logarithm
1510 to base two of the fragment size. */
1511 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1512 size <= (__malloc_size_t) (1 << type))
1513 /* The new size is the same kind of fragment. */
1514 result = ptr;
1515 else
1516 {
1517 /* The new size is different; allocate a new space,
1518 and copy the lesser of the new size and the old. */
1519 result = _malloc_internal_nolock (size);
1520 if (result == NULL)
1521 goto out;
1522 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1523 _free_internal_nolock (ptr);
1524 }
1525 break;
1526 }
1527
1528 PROTECT_MALLOC_STATE (1);
1529 out:
1530 return result;
1531 }
1532
1533 __ptr_t
1534 _realloc_internal (ptr, size)
1535 __ptr_t ptr;
1536 __malloc_size_t size;
1537 {
1538 __ptr_t result;
1539
1540 LOCK();
1541 result = _realloc_internal_nolock (ptr, size);
1542 UNLOCK ();
1543
1544 return result;
1545 }
1546
1547 __ptr_t
1548 realloc (ptr, size)
1549 __ptr_t ptr;
1550 __malloc_size_t size;
1551 {
1552 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1553
1554 if (!__malloc_initialized && !__malloc_initialize ())
1555 return NULL;
1556
1557 hook = __realloc_hook;
1558 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1559 }
1560 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1561
1562 This library is free software; you can redistribute it and/or
1563 modify it under the terms of the GNU General Public License as
1564 published by the Free Software Foundation; either version 2 of the
1565 License, or (at your option) any later version.
1566
1567 This library is distributed in the hope that it will be useful,
1568 but WITHOUT ANY WARRANTY; without even the implied warranty of
1569 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1570 General Public License for more details.
1571
1572 You should have received a copy of the GNU General Public
1573 License along with this library; see the file COPYING. If
1574 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1575 Fifth Floor, Boston, MA 02110-1301, USA.
1576
1577 The author may be reached (Email) at the address mike@ai.mit.edu,
1578 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1579
1580 #ifndef _MALLOC_INTERNAL
1581 #define _MALLOC_INTERNAL
1582 #include <malloc.h>
1583 #endif
1584
1585 /* Allocate an array of NMEMB elements each SIZE bytes long.
1586 The entire array is initialized to zeros. */
1587 __ptr_t
1588 calloc (nmemb, size)
1589 register __malloc_size_t nmemb;
1590 register __malloc_size_t size;
1591 {
1592 register __ptr_t result = malloc (nmemb * size);
1593
1594 if (result != NULL)
1595 (void) memset (result, 0, nmemb * size);
1596
1597 return result;
1598 }
1599 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1600 This file is part of the GNU C Library.
1601
1602 The GNU C Library is free software; you can redistribute it and/or modify
1603 it under the terms of the GNU General Public License as published by
1604 the Free Software Foundation; either version 2, or (at your option)
1605 any later version.
1606
1607 The GNU C Library is distributed in the hope that it will be useful,
1608 but WITHOUT ANY WARRANTY; without even the implied warranty of
1609 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1610 GNU General Public License for more details.
1611
1612 You should have received a copy of the GNU General Public License
1613 along with the GNU C Library; see the file COPYING. If not, write to
1614 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1615 MA 02110-1301, USA. */
1616
1617 #ifndef _MALLOC_INTERNAL
1618 #define _MALLOC_INTERNAL
1619 #include <malloc.h>
1620 #endif
1621
1622 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1623 compatible. */
1624 #if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
1625 #define __sbrk sbrk
1626 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1627 /* It is best not to declare this and cast its result on foreign operating
1628 systems with potentially hostile include files. */
1629
1630 #include <stddef.h>
1631 extern __ptr_t __sbrk PP ((ptrdiff_t increment));
1632 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1633
1634 #ifndef NULL
1635 #define NULL 0
1636 #endif
1637
1638 /* Allocate INCREMENT more bytes of data space,
1639 and return the start of data space, or NULL on errors.
1640 If INCREMENT is negative, shrink data space. */
1641 __ptr_t
1642 __default_morecore (increment)
1643 __malloc_ptrdiff_t increment;
1644 {
1645 __ptr_t result;
1646 #if defined(CYGWIN)
1647 if (!bss_sbrk_did_unexec)
1648 {
1649 return bss_sbrk (increment);
1650 }
1651 #endif
1652 result = (__ptr_t) __sbrk (increment);
1653 if (result == (__ptr_t) -1)
1654 return NULL;
1655 return result;
1656 }
1657 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1658
1659 This library is free software; you can redistribute it and/or
1660 modify it under the terms of the GNU General Public License as
1661 published by the Free Software Foundation; either version 2 of the
1662 License, or (at your option) any later version.
1663
1664 This library is distributed in the hope that it will be useful,
1665 but WITHOUT ANY WARRANTY; without even the implied warranty of
1666 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1667 General Public License for more details.
1668
1669 You should have received a copy of the GNU General Public
1670 License along with this library; see the file COPYING. If
1671 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1672 Fifth Floor, Boston, MA 02110-1301, USA. */
1673
1674 #ifndef _MALLOC_INTERNAL
1675 #define _MALLOC_INTERNAL
1676 #include <malloc.h>
1677 #endif
1678
1679 __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1680 __malloc_size_t __alignment));
1681
1682 __ptr_t
1683 memalign (alignment, size)
1684 __malloc_size_t alignment;
1685 __malloc_size_t size;
1686 {
1687 __ptr_t result;
1688 unsigned long int adj, lastadj;
1689 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
1690
1691 if (hook)
1692 return (*hook) (alignment, size);
1693
1694 /* Allocate a block with enough extra space to pad the block with up to
1695 (ALIGNMENT - 1) bytes if necessary. */
1696 result = malloc (size + alignment - 1);
1697 if (result == NULL)
1698 return NULL;
1699
1700 /* Figure out how much we will need to pad this particular block
1701 to achieve the required alignment. */
1702 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1703
1704 do
1705 {
1706 /* Reallocate the block with only as much excess as it needs. */
1707 free (result);
1708 result = malloc (adj + size);
1709 if (result == NULL) /* Impossible unless interrupted. */
1710 return NULL;
1711
1712 lastadj = adj;
1713 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1714 /* It's conceivable we might have been so unlucky as to get a
1715 different block with weaker alignment. If so, this block is too
1716 short to contain SIZE after alignment correction. So we must
1717 try again and get another block, slightly larger. */
1718 } while (adj > lastadj);
1719
1720 if (adj != 0)
1721 {
1722 /* Record this block in the list of aligned blocks, so that `free'
1723 can identify the pointer it is passed, which will be in the middle
1724 of an allocated block. */
1725
1726 struct alignlist *l;
1727 LOCK_ALIGNED_BLOCKS ();
1728 for (l = _aligned_blocks; l != NULL; l = l->next)
1729 if (l->aligned == NULL)
1730 /* This slot is free. Use it. */
1731 break;
1732 if (l == NULL)
1733 {
1734 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1735 if (l != NULL)
1736 {
1737 l->next = _aligned_blocks;
1738 _aligned_blocks = l;
1739 }
1740 }
1741 if (l != NULL)
1742 {
1743 l->exact = result;
1744 result = l->aligned = (char *) result + alignment - adj;
1745 }
1746 UNLOCK_ALIGNED_BLOCKS ();
1747 if (l == NULL)
1748 {
1749 free (result);
1750 result = NULL;
1751 }
1752 }
1753
1754 return result;
1755 }
1756
1757 #ifndef ENOMEM
1758 #define ENOMEM 12
1759 #endif
1760
1761 #ifndef EINVAL
1762 #define EINVAL 22
1763 #endif
1764
1765 int
1766 posix_memalign (memptr, alignment, size)
1767 __ptr_t *memptr;
1768 __malloc_size_t alignment;
1769 __malloc_size_t size;
1770 {
1771 __ptr_t mem;
1772
1773 if (alignment == 0
1774 || alignment % sizeof (__ptr_t) != 0
1775 || (alignment & (alignment - 1)) != 0)
1776 return EINVAL;
1777
1778 mem = memalign (alignment, size);
1779 if (mem == NULL)
1780 return ENOMEM;
1781
1782 *memptr = mem;
1783
1784 return 0;
1785 }
1786
1787 /* Allocate memory on a page boundary.
1788 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1789
1790 This library is free software; you can redistribute it and/or
1791 modify it under the terms of the GNU General Public License as
1792 published by the Free Software Foundation; either version 2 of the
1793 License, or (at your option) any later version.
1794
1795 This library is distributed in the hope that it will be useful,
1796 but WITHOUT ANY WARRANTY; without even the implied warranty of
1797 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1798 General Public License for more details.
1799
1800 You should have received a copy of the GNU General Public
1801 License along with this library; see the file COPYING. If
1802 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1803 Fifth Floor, Boston, MA 02110-1301, USA.
1804
1805 The author may be reached (Email) at the address mike@ai.mit.edu,
1806 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1807
1808 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1809
1810 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1811 on MSDOS, where it conflicts with a system header file. */
1812
1813 #define ELIDE_VALLOC
1814
1815 #endif
1816
1817 #ifndef ELIDE_VALLOC
1818
1819 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1820 #include <stddef.h>
1821 #include <sys/cdefs.h>
1822 #if defined (__GLIBC__) && __GLIBC__ >= 2
1823 /* __getpagesize is already declared in <unistd.h> with return type int */
1824 #else
1825 extern size_t __getpagesize PP ((void));
1826 #endif
1827 #else
1828 #include "getpagesize.h"
1829 #define __getpagesize() getpagesize()
1830 #endif
1831
1832 #ifndef _MALLOC_INTERNAL
1833 #define _MALLOC_INTERNAL
1834 #include <malloc.h>
1835 #endif
1836
1837 static __malloc_size_t pagesize;
1838
1839 __ptr_t
1840 valloc (size)
1841 __malloc_size_t size;
1842 {
1843 if (pagesize == 0)
1844 pagesize = __getpagesize ();
1845
1846 return memalign (pagesize, size);
1847 }
1848
1849 #endif /* Not ELIDE_VALLOC. */
1850
1851 #ifdef GC_MCHECK
1852
1853 /* Standard debugging hooks for `malloc'.
1854 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1855 Written May 1989 by Mike Haertel.
1856
1857 This library is free software; you can redistribute it and/or
1858 modify it under the terms of the GNU General Public License as
1859 published by the Free Software Foundation; either version 2 of the
1860 License, or (at your option) any later version.
1861
1862 This library is distributed in the hope that it will be useful,
1863 but WITHOUT ANY WARRANTY; without even the implied warranty of
1864 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1865 General Public License for more details.
1866
1867 You should have received a copy of the GNU General Public
1868 License along with this library; see the file COPYING. If
1869 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1870 Fifth Floor, Boston, MA 02110-1301, USA.
1871
1872 The author may be reached (Email) at the address mike@ai.mit.edu,
1873 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1874
1875 #ifdef emacs
1876 #include <stdio.h>
1877 #else
1878 #ifndef _MALLOC_INTERNAL
1879 #define _MALLOC_INTERNAL
1880 #include <malloc.h>
1881 #include <stdio.h>
1882 #endif
1883 #endif
1884
1885 /* Old hook values. */
1886 static void (*old_free_hook) (__ptr_t ptr);
1887 static __ptr_t (*old_malloc_hook) (__malloc_size_t size);
1888 static __ptr_t (*old_realloc_hook) (__ptr_t ptr, __malloc_size_t size);
1889
1890 /* Function to call when something awful happens. */
1891 static void (*abortfunc) (enum mcheck_status);
1892
1893 /* Arbitrary magical numbers. */
1894 #define MAGICWORD 0xfedabeeb
1895 #define MAGICFREE 0xd8675309
1896 #define MAGICBYTE ((char) 0xd7)
1897 #define MALLOCFLOOD ((char) 0x93)
1898 #define FREEFLOOD ((char) 0x95)
1899
1900 struct hdr
1901 {
1902 __malloc_size_t size; /* Exact size requested by user. */
1903 unsigned long int magic; /* Magic number to check header integrity. */
1904 };
1905
1906 static enum mcheck_status checkhdr (const struct hdr *);
1907 static enum mcheck_status
1908 checkhdr (hdr)
1909 const struct hdr *hdr;
1910 {
1911 enum mcheck_status status;
1912 switch (hdr->magic)
1913 {
1914 default:
1915 status = MCHECK_HEAD;
1916 break;
1917 case MAGICFREE:
1918 status = MCHECK_FREE;
1919 break;
1920 case MAGICWORD:
1921 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1922 status = MCHECK_TAIL;
1923 else
1924 status = MCHECK_OK;
1925 break;
1926 }
1927 if (status != MCHECK_OK)
1928 (*abortfunc) (status);
1929 return status;
1930 }
1931
1932 static void freehook (__ptr_t);
1933 static void
1934 freehook (ptr)
1935 __ptr_t ptr;
1936 {
1937 struct hdr *hdr;
1938
1939 if (ptr)
1940 {
1941 hdr = ((struct hdr *) ptr) - 1;
1942 checkhdr (hdr);
1943 hdr->magic = MAGICFREE;
1944 memset (ptr, FREEFLOOD, hdr->size);
1945 }
1946 else
1947 hdr = NULL;
1948
1949 __free_hook = old_free_hook;
1950 free (hdr);
1951 __free_hook = freehook;
1952 }
1953
1954 static __ptr_t mallochook (__malloc_size_t);
1955 static __ptr_t
1956 mallochook (size)
1957 __malloc_size_t size;
1958 {
1959 struct hdr *hdr;
1960
1961 __malloc_hook = old_malloc_hook;
1962 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
1963 __malloc_hook = mallochook;
1964 if (hdr == NULL)
1965 return NULL;
1966
1967 hdr->size = size;
1968 hdr->magic = MAGICWORD;
1969 ((char *) &hdr[1])[size] = MAGICBYTE;
1970 memset ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
1971 return (__ptr_t) (hdr + 1);
1972 }
1973
1974 static __ptr_t reallochook (__ptr_t, __malloc_size_t);
1975 static __ptr_t
1976 reallochook (ptr, size)
1977 __ptr_t ptr;
1978 __malloc_size_t size;
1979 {
1980 struct hdr *hdr = NULL;
1981 __malloc_size_t osize = 0;
1982
1983 if (ptr)
1984 {
1985 hdr = ((struct hdr *) ptr) - 1;
1986 osize = hdr->size;
1987
1988 checkhdr (hdr);
1989 if (size < osize)
1990 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1991 }
1992
1993 __free_hook = old_free_hook;
1994 __malloc_hook = old_malloc_hook;
1995 __realloc_hook = old_realloc_hook;
1996 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
1997 __free_hook = freehook;
1998 __malloc_hook = mallochook;
1999 __realloc_hook = reallochook;
2000 if (hdr == NULL)
2001 return NULL;
2002
2003 hdr->size = size;
2004 hdr->magic = MAGICWORD;
2005 ((char *) &hdr[1])[size] = MAGICBYTE;
2006 if (size > osize)
2007 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2008 return (__ptr_t) (hdr + 1);
2009 }
2010
2011 static void
2012 mabort (status)
2013 enum mcheck_status status;
2014 {
2015 const char *msg;
2016 switch (status)
2017 {
2018 case MCHECK_OK:
2019 msg = "memory is consistent, library is buggy";
2020 break;
2021 case MCHECK_HEAD:
2022 msg = "memory clobbered before allocated block";
2023 break;
2024 case MCHECK_TAIL:
2025 msg = "memory clobbered past end of allocated block";
2026 break;
2027 case MCHECK_FREE:
2028 msg = "block freed twice";
2029 break;
2030 default:
2031 msg = "bogus mcheck_status, library is buggy";
2032 break;
2033 }
2034 #ifdef __GNU_LIBRARY__
2035 __libc_fatal (msg);
2036 #else
2037 fprintf (stderr, "mcheck: %s\n", msg);
2038 fflush (stderr);
2039 abort ();
2040 #endif
2041 }
2042
2043 static int mcheck_used = 0;
2044
2045 int
2046 mcheck (func)
2047 void (*func) (enum mcheck_status);
2048 {
2049 abortfunc = (func != NULL) ? func : &mabort;
2050
2051 /* These hooks may not be safely inserted if malloc is already in use. */
2052 if (!__malloc_initialized && !mcheck_used)
2053 {
2054 old_free_hook = __free_hook;
2055 __free_hook = freehook;
2056 old_malloc_hook = __malloc_hook;
2057 __malloc_hook = mallochook;
2058 old_realloc_hook = __realloc_hook;
2059 __realloc_hook = reallochook;
2060 mcheck_used = 1;
2061 }
2062
2063 return mcheck_used ? 0 : -1;
2064 }
2065
2066 enum mcheck_status
2067 mprobe (__ptr_t ptr)
2068 {
2069 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2070 }
2071
2072 #endif /* GC_MCHECK */