[ChangeLog]
[bpt/emacs.git] / src / gmalloc.c
1 /* This file is no longer automatically generated from libc. */
2
3 #define _MALLOC_INTERNAL
4
5 /* The malloc headers and source files from the C library follow here. */
6
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
11
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
16
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
21
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
26
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
29
30 #ifndef _MALLOC_H
31
32 #define _MALLOC_H 1
33
34 #ifdef _MALLOC_INTERNAL
35
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
39
40 #ifdef HAVE_GTK_AND_PTHREAD
41 #define USE_PTHREAD
42 #endif
43
44 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
45 || defined STDC_HEADERS || defined PROTOTYPES))
46 #undef PP
47 #define PP(args) args
48 #undef __ptr_t
49 #define __ptr_t void *
50 #else /* Not C++ or ANSI C. */
51 #undef PP
52 #define PP(args) ()
53 #undef __ptr_t
54 #define __ptr_t char *
55 #endif /* C++ or ANSI C. */
56
57 #include <string.h>
58
59 #ifdef HAVE_LIMITS_H
60 #include <limits.h>
61 #endif
62 #ifndef CHAR_BIT
63 #define CHAR_BIT 8
64 #endif
65
66 #include <unistd.h>
67
68 #ifdef USE_PTHREAD
69 #include <pthread.h>
70 #endif
71
72 #endif /* _MALLOC_INTERNAL. */
73
74
75 #ifdef __cplusplus
76 extern "C"
77 {
78 #endif
79
80 #ifdef STDC_HEADERS
81 #include <stddef.h>
82 #define __malloc_size_t size_t
83 #define __malloc_ptrdiff_t ptrdiff_t
84 #else
85 #ifdef __GNUC__
86 #include <stddef.h>
87 #ifdef __SIZE_TYPE__
88 #define __malloc_size_t __SIZE_TYPE__
89 #endif
90 #endif
91 #ifndef __malloc_size_t
92 #define __malloc_size_t unsigned int
93 #endif
94 #define __malloc_ptrdiff_t int
95 #endif
96
97 #ifndef NULL
98 #define NULL 0
99 #endif
100
101
102 /* Allocate SIZE bytes of memory. */
103 extern __ptr_t malloc PP ((__malloc_size_t __size));
104 /* Re-allocate the previously allocated block
105 in __ptr_t, making the new block SIZE bytes long. */
106 extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
107 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
108 extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
109 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
110 extern void free PP ((__ptr_t __ptr));
111
112 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
113 #if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
114 extern __ptr_t memalign PP ((__malloc_size_t __alignment,
115 __malloc_size_t __size));
116 extern int posix_memalign PP ((__ptr_t *, __malloc_size_t,
117 __malloc_size_t size));
118 #endif
119
120 /* Allocate SIZE bytes on a page boundary. */
121 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
122 extern __ptr_t valloc PP ((__malloc_size_t __size));
123 #endif
124
125 #ifdef USE_PTHREAD
126 /* Set up mutexes and make malloc etc. thread-safe. */
127 extern void malloc_enable_thread PP ((void));
128 #endif
129
130 #ifdef _MALLOC_INTERNAL
131
132 /* The allocator divides the heap into blocks of fixed size; large
133 requests receive one or more whole blocks, and small requests
134 receive a fragment of a block. Fragment sizes are powers of two,
135 and all fragments of a block are the same size. When all the
136 fragments in a block have been freed, the block itself is freed. */
137 #define INT_BIT (CHAR_BIT * sizeof(int))
138 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
139 #define BLOCKSIZE (1 << BLOCKLOG)
140 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
141
142 /* Determine the amount of memory spanned by the initial heap table
143 (not an absolute limit). */
144 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
145
146 /* Number of contiguous free blocks allowed to build up at the end of
147 memory before they will be returned to the system. */
148 #define FINAL_FREE_BLOCKS 8
149
150 /* Data structure giving per-block information. */
151 typedef union
152 {
153 /* Heap information for a busy block. */
154 struct
155 {
156 /* Zero for a large (multiblock) object, or positive giving the
157 logarithm to the base two of the fragment size. */
158 int type;
159 union
160 {
161 struct
162 {
163 __malloc_size_t nfree; /* Free frags in a fragmented block. */
164 __malloc_size_t first; /* First free fragment of the block. */
165 } frag;
166 /* For a large object, in its first block, this has the number
167 of blocks in the object. In the other blocks, this has a
168 negative number which says how far back the first block is. */
169 __malloc_ptrdiff_t size;
170 } info;
171 } busy;
172 /* Heap information for a free block
173 (that may be the first of a free cluster). */
174 struct
175 {
176 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
177 __malloc_size_t next; /* Index of next free cluster. */
178 __malloc_size_t prev; /* Index of previous free cluster. */
179 } free;
180 } malloc_info;
181
182 /* Pointer to first block of the heap. */
183 extern char *_heapbase;
184
185 /* Table indexed by block number giving per-block information. */
186 extern malloc_info *_heapinfo;
187
188 /* Address to block number and vice versa. */
189 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
190 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
191
192 /* Current search index for the heap table. */
193 extern __malloc_size_t _heapindex;
194
195 /* Limit of valid info table indices. */
196 extern __malloc_size_t _heaplimit;
197
198 /* Doubly linked lists of free fragments. */
199 struct list
200 {
201 struct list *next;
202 struct list *prev;
203 };
204
205 /* Free list headers for each fragment size. */
206 extern struct list _fraghead[];
207
208 /* List of blocks allocated with `memalign' (or `valloc'). */
209 struct alignlist
210 {
211 struct alignlist *next;
212 __ptr_t aligned; /* The address that memaligned returned. */
213 __ptr_t exact; /* The address that malloc returned. */
214 };
215 extern struct alignlist *_aligned_blocks;
216
217 /* Instrumentation. */
218 extern __malloc_size_t _chunks_used;
219 extern __malloc_size_t _bytes_used;
220 extern __malloc_size_t _chunks_free;
221 extern __malloc_size_t _bytes_free;
222
223 /* Internal versions of `malloc', `realloc', and `free'
224 used when these functions need to call each other.
225 They are the same but don't call the hooks. */
226 extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
227 extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
228 extern void _free_internal PP ((__ptr_t __ptr));
229 extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
230 extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
231 extern void _free_internal_nolock PP ((__ptr_t __ptr));
232
233 #ifdef USE_PTHREAD
234 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
235 extern int _malloc_thread_enabled_p;
236 #define LOCK() \
237 do { \
238 if (_malloc_thread_enabled_p) \
239 pthread_mutex_lock (&_malloc_mutex); \
240 } while (0)
241 #define UNLOCK() \
242 do { \
243 if (_malloc_thread_enabled_p) \
244 pthread_mutex_unlock (&_malloc_mutex); \
245 } while (0)
246 #define LOCK_ALIGNED_BLOCKS() \
247 do { \
248 if (_malloc_thread_enabled_p) \
249 pthread_mutex_lock (&_aligned_blocks_mutex); \
250 } while (0)
251 #define UNLOCK_ALIGNED_BLOCKS() \
252 do { \
253 if (_malloc_thread_enabled_p) \
254 pthread_mutex_unlock (&_aligned_blocks_mutex); \
255 } while (0)
256 #else
257 #define LOCK()
258 #define UNLOCK()
259 #define LOCK_ALIGNED_BLOCKS()
260 #define UNLOCK_ALIGNED_BLOCKS()
261 #endif
262
263 #endif /* _MALLOC_INTERNAL. */
264
265 /* Given an address in the middle of a malloc'd object,
266 return the address of the beginning of the object. */
267 extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
268
269 /* Underlying allocation function; successive calls should
270 return contiguous pieces of memory. */
271 extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
272
273 /* Default value of `__morecore'. */
274 extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
275
276 /* If not NULL, this function is called after each time
277 `__morecore' is called to increase the data size. */
278 extern void (*__after_morecore_hook) PP ((void));
279
280 /* Number of extra blocks to get each time we ask for more core.
281 This reduces the frequency of calling `(*__morecore)'. */
282 extern __malloc_size_t __malloc_extra_blocks;
283
284 /* Nonzero if `malloc' has been called and done its initialization. */
285 extern int __malloc_initialized;
286 /* Function called to initialize malloc data structures. */
287 extern int __malloc_initialize PP ((void));
288
289 /* Hooks for debugging versions. */
290 extern void (*__malloc_initialize_hook) PP ((void));
291 extern void (*__free_hook) PP ((__ptr_t __ptr));
292 extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
293 extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
294 extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
295 __malloc_size_t __alignment));
296
297 /* Return values for `mprobe': these are the kinds of inconsistencies that
298 `mcheck' enables detection of. */
299 enum mcheck_status
300 {
301 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
302 MCHECK_OK, /* Block is fine. */
303 MCHECK_FREE, /* Block freed twice. */
304 MCHECK_HEAD, /* Memory before the block was clobbered. */
305 MCHECK_TAIL /* Memory after the block was clobbered. */
306 };
307
308 /* Activate a standard collection of debugging hooks. This must be called
309 before `malloc' is ever called. ABORTFUNC is called with an error code
310 (see enum above) when an inconsistency is detected. If ABORTFUNC is
311 null, the standard function prints on stderr and then calls `abort'. */
312 extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
313
314 /* Check for aberrations in a particular malloc'd block. You must have
315 called `mcheck' already. These are the same checks that `mcheck' does
316 when you free or reallocate a block. */
317 extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
318
319 /* Activate a standard collection of tracing hooks. */
320 extern void mtrace PP ((void));
321 extern void muntrace PP ((void));
322
323 /* Statistics available to the user. */
324 struct mstats
325 {
326 __malloc_size_t bytes_total; /* Total size of the heap. */
327 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
328 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
329 __malloc_size_t chunks_free; /* Chunks in the free list. */
330 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
331 };
332
333 /* Pick up the current statistics. */
334 extern struct mstats mstats PP ((void));
335
336 /* Call WARNFUN with a warning message when memory usage is high. */
337 extern void memory_warnings PP ((__ptr_t __start,
338 void (*__warnfun) PP ((const char *))));
339
340
341 /* Relocating allocator. */
342
343 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
344 extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
345
346 /* Free the storage allocated in HANDLEPTR. */
347 extern void r_alloc_free PP ((__ptr_t *__handleptr));
348
349 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
350 extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
351
352
353 #ifdef __cplusplus
354 }
355 #endif
356
357 #endif /* malloc.h */
358 /* Memory allocator `malloc'.
359 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
360 Written May 1989 by Mike Haertel.
361
362 This library is free software; you can redistribute it and/or
363 modify it under the terms of the GNU General Public License as
364 published by the Free Software Foundation; either version 2 of the
365 License, or (at your option) any later version.
366
367 This library is distributed in the hope that it will be useful,
368 but WITHOUT ANY WARRANTY; without even the implied warranty of
369 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
370 General Public License for more details.
371
372 You should have received a copy of the GNU General Public
373 License along with this library; see the file COPYING. If
374 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
375 Fifth Floor, Boston, MA 02110-1301, USA.
376
377 The author may be reached (Email) at the address mike@ai.mit.edu,
378 or (US mail) as Mike Haertel c/o Free Software Foundation. */
379
380 #ifndef _MALLOC_INTERNAL
381 #define _MALLOC_INTERNAL
382 #include <malloc.h>
383 #endif
384 #include <errno.h>
385
386 /* How to really get more memory. */
387 #if defined(CYGWIN)
388 extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
389 extern int bss_sbrk_did_unexec;
390 #endif
391 __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
392
393 /* Debugging hook for `malloc'. */
394 __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
395
396 /* Pointer to the base of the first block. */
397 char *_heapbase;
398
399 /* Block information table. Allocated with align/__free (not malloc/free). */
400 malloc_info *_heapinfo;
401
402 /* Number of info entries. */
403 static __malloc_size_t heapsize;
404
405 /* Search index in the info table. */
406 __malloc_size_t _heapindex;
407
408 /* Limit of valid info table indices. */
409 __malloc_size_t _heaplimit;
410
411 /* Free lists for each fragment size. */
412 struct list _fraghead[BLOCKLOG];
413
414 /* Instrumentation. */
415 __malloc_size_t _chunks_used;
416 __malloc_size_t _bytes_used;
417 __malloc_size_t _chunks_free;
418 __malloc_size_t _bytes_free;
419
420 /* Are you experienced? */
421 int __malloc_initialized;
422
423 __malloc_size_t __malloc_extra_blocks;
424
425 void (*__malloc_initialize_hook) PP ((void));
426 void (*__after_morecore_hook) PP ((void));
427
428 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
429
430 /* Some code for hunting a bug writing into _heapinfo.
431
432 Call this macro with argument PROT non-zero to protect internal
433 malloc state against writing to it, call it with a zero argument to
434 make it readable and writable.
435
436 Note that this only works if BLOCKSIZE == page size, which is
437 the case on the i386. */
438
439 #include <sys/types.h>
440 #include <sys/mman.h>
441
442 static int state_protected_p;
443 static __malloc_size_t last_state_size;
444 static malloc_info *last_heapinfo;
445
446 void
447 protect_malloc_state (protect_p)
448 int protect_p;
449 {
450 /* If _heapinfo has been relocated, make sure its old location
451 isn't left read-only; it will be reused by malloc. */
452 if (_heapinfo != last_heapinfo
453 && last_heapinfo
454 && state_protected_p)
455 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
456
457 last_state_size = _heaplimit * sizeof *_heapinfo;
458 last_heapinfo = _heapinfo;
459
460 if (protect_p != state_protected_p)
461 {
462 state_protected_p = protect_p;
463 if (mprotect (_heapinfo, last_state_size,
464 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
465 abort ();
466 }
467 }
468
469 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
470
471 #else
472 #define PROTECT_MALLOC_STATE(PROT) /* empty */
473 #endif
474
475
476 /* Aligned allocation. */
477 static __ptr_t align PP ((__malloc_size_t));
478 static __ptr_t
479 align (size)
480 __malloc_size_t size;
481 {
482 __ptr_t result;
483 unsigned long int adj;
484
485 /* align accepts an unsigned argument, but __morecore accepts a
486 signed one. This could lead to trouble if SIZE overflows a
487 signed int type accepted by __morecore. We just punt in that
488 case, since they are requesting a ludicrous amount anyway. */
489 if ((__malloc_ptrdiff_t)size < 0)
490 result = 0;
491 else
492 result = (*__morecore) (size);
493 adj = (unsigned long int) ((unsigned long int) ((char *) result -
494 (char *) NULL)) % BLOCKSIZE;
495 if (adj != 0)
496 {
497 __ptr_t new;
498 adj = BLOCKSIZE - adj;
499 new = (*__morecore) (adj);
500 result = (char *) result + adj;
501 }
502
503 if (__after_morecore_hook)
504 (*__after_morecore_hook) ();
505
506 return result;
507 }
508
509 /* Get SIZE bytes, if we can get them starting at END.
510 Return the address of the space we got.
511 If we cannot get space at END, fail and return 0. */
512 static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
513 static __ptr_t
514 get_contiguous_space (size, position)
515 __malloc_ptrdiff_t size;
516 __ptr_t position;
517 {
518 __ptr_t before;
519 __ptr_t after;
520
521 before = (*__morecore) (0);
522 /* If we can tell in advance that the break is at the wrong place,
523 fail now. */
524 if (before != position)
525 return 0;
526
527 /* Allocate SIZE bytes and get the address of them. */
528 after = (*__morecore) (size);
529 if (!after)
530 return 0;
531
532 /* It was not contiguous--reject it. */
533 if (after != position)
534 {
535 (*__morecore) (- size);
536 return 0;
537 }
538
539 return after;
540 }
541
542
543 /* This is called when `_heapinfo' and `heapsize' have just
544 been set to describe a new info table. Set up the table
545 to describe itself and account for it in the statistics. */
546 static inline void
547 register_heapinfo (void)
548 {
549 __malloc_size_t block, blocks;
550
551 block = BLOCK (_heapinfo);
552 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
553
554 /* Account for the _heapinfo block itself in the statistics. */
555 _bytes_used += blocks * BLOCKSIZE;
556 ++_chunks_used;
557
558 /* Describe the heapinfo block itself in the heapinfo. */
559 _heapinfo[block].busy.type = 0;
560 _heapinfo[block].busy.info.size = blocks;
561 /* Leave back-pointers for malloc_find_address. */
562 while (--blocks > 0)
563 _heapinfo[block + blocks].busy.info.size = -blocks;
564 }
565
566 #ifdef USE_PTHREAD
567 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
568 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
569 int _malloc_thread_enabled_p;
570
571 static void
572 malloc_atfork_handler_prepare ()
573 {
574 LOCK ();
575 LOCK_ALIGNED_BLOCKS ();
576 }
577
578 static void
579 malloc_atfork_handler_parent ()
580 {
581 UNLOCK_ALIGNED_BLOCKS ();
582 UNLOCK ();
583 }
584
585 static void
586 malloc_atfork_handler_child ()
587 {
588 UNLOCK_ALIGNED_BLOCKS ();
589 UNLOCK ();
590 }
591
592 /* Set up mutexes and make malloc etc. thread-safe. */
593 void
594 malloc_enable_thread ()
595 {
596 if (_malloc_thread_enabled_p)
597 return;
598
599 /* Some pthread implementations call malloc for statically
600 initialized mutexes when they are used first. To avoid such a
601 situation, we initialize mutexes here while their use is
602 disabled in malloc etc. */
603 pthread_mutex_init (&_malloc_mutex, NULL);
604 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
605 pthread_atfork (malloc_atfork_handler_prepare,
606 malloc_atfork_handler_parent,
607 malloc_atfork_handler_child);
608 _malloc_thread_enabled_p = 1;
609 }
610 #endif
611
612 static void
613 malloc_initialize_1 ()
614 {
615 #ifdef GC_MCHECK
616 mcheck (NULL);
617 #endif
618
619 if (__malloc_initialize_hook)
620 (*__malloc_initialize_hook) ();
621
622 heapsize = HEAP / BLOCKSIZE;
623 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
624 if (_heapinfo == NULL)
625 return;
626 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
627 _heapinfo[0].free.size = 0;
628 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
629 _heapindex = 0;
630 _heapbase = (char *) _heapinfo;
631 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
632
633 register_heapinfo ();
634
635 __malloc_initialized = 1;
636 PROTECT_MALLOC_STATE (1);
637 return;
638 }
639
640 /* Set everything up and remember that we have.
641 main will call malloc which calls this function. That is before any threads
642 or signal handlers has been set up, so we don't need thread protection. */
643 int
644 __malloc_initialize ()
645 {
646 if (__malloc_initialized)
647 return 0;
648
649 malloc_initialize_1 ();
650
651 return __malloc_initialized;
652 }
653
654 static int morecore_recursing;
655
656 /* Get neatly aligned memory, initializing or
657 growing the heap info table as necessary. */
658 static __ptr_t morecore_nolock PP ((__malloc_size_t));
659 static __ptr_t
660 morecore_nolock (size)
661 __malloc_size_t size;
662 {
663 __ptr_t result;
664 malloc_info *newinfo, *oldinfo;
665 __malloc_size_t newsize;
666
667 if (morecore_recursing)
668 /* Avoid recursion. The caller will know how to handle a null return. */
669 return NULL;
670
671 result = align (size);
672 if (result == NULL)
673 return NULL;
674
675 PROTECT_MALLOC_STATE (0);
676
677 /* Check if we need to grow the info table. */
678 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
679 {
680 /* Calculate the new _heapinfo table size. We do not account for the
681 added blocks in the table itself, as we hope to place them in
682 existing free space, which is already covered by part of the
683 existing table. */
684 newsize = heapsize;
685 do
686 newsize *= 2;
687 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
688
689 /* We must not reuse existing core for the new info table when called
690 from realloc in the case of growing a large block, because the
691 block being grown is momentarily marked as free. In this case
692 _heaplimit is zero so we know not to reuse space for internal
693 allocation. */
694 if (_heaplimit != 0)
695 {
696 /* First try to allocate the new info table in core we already
697 have, in the usual way using realloc. If realloc cannot
698 extend it in place or relocate it to existing sufficient core,
699 we will get called again, and the code above will notice the
700 `morecore_recursing' flag and return null. */
701 int save = errno; /* Don't want to clobber errno with ENOMEM. */
702 morecore_recursing = 1;
703 newinfo = (malloc_info *) _realloc_internal_nolock
704 (_heapinfo, newsize * sizeof (malloc_info));
705 morecore_recursing = 0;
706 if (newinfo == NULL)
707 errno = save;
708 else
709 {
710 /* We found some space in core, and realloc has put the old
711 table's blocks on the free list. Now zero the new part
712 of the table and install the new table location. */
713 memset (&newinfo[heapsize], 0,
714 (newsize - heapsize) * sizeof (malloc_info));
715 _heapinfo = newinfo;
716 heapsize = newsize;
717 goto got_heap;
718 }
719 }
720
721 /* Allocate new space for the malloc info table. */
722 while (1)
723 {
724 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
725
726 /* Did it fail? */
727 if (newinfo == NULL)
728 {
729 (*__morecore) (-size);
730 return NULL;
731 }
732
733 /* Is it big enough to record status for its own space?
734 If so, we win. */
735 if ((__malloc_size_t) BLOCK ((char *) newinfo
736 + newsize * sizeof (malloc_info))
737 < newsize)
738 break;
739
740 /* Must try again. First give back most of what we just got. */
741 (*__morecore) (- newsize * sizeof (malloc_info));
742 newsize *= 2;
743 }
744
745 /* Copy the old table to the beginning of the new,
746 and zero the rest of the new table. */
747 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
748 memset (&newinfo[heapsize], 0,
749 (newsize - heapsize) * sizeof (malloc_info));
750 oldinfo = _heapinfo;
751 _heapinfo = newinfo;
752 heapsize = newsize;
753
754 register_heapinfo ();
755
756 /* Reset _heaplimit so _free_internal never decides
757 it can relocate or resize the info table. */
758 _heaplimit = 0;
759 _free_internal_nolock (oldinfo);
760 PROTECT_MALLOC_STATE (0);
761
762 /* The new heap limit includes the new table just allocated. */
763 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
764 return result;
765 }
766
767 got_heap:
768 _heaplimit = BLOCK ((char *) result + size);
769 return result;
770 }
771
772 /* Allocate memory from the heap. */
773 __ptr_t
774 _malloc_internal_nolock (size)
775 __malloc_size_t size;
776 {
777 __ptr_t result;
778 __malloc_size_t block, blocks, lastblocks, start;
779 register __malloc_size_t i;
780 struct list *next;
781
782 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
783 valid address you can realloc and free (though not dereference).
784
785 It turns out that some extant code (sunrpc, at least Ultrix's version)
786 expects `malloc (0)' to return non-NULL and breaks otherwise.
787 Be compatible. */
788
789 #if 0
790 if (size == 0)
791 return NULL;
792 #endif
793
794 PROTECT_MALLOC_STATE (0);
795
796 if (size < sizeof (struct list))
797 size = sizeof (struct list);
798
799 /* Determine the allocation policy based on the request size. */
800 if (size <= BLOCKSIZE / 2)
801 {
802 /* Small allocation to receive a fragment of a block.
803 Determine the logarithm to base two of the fragment size. */
804 register __malloc_size_t log = 1;
805 --size;
806 while ((size /= 2) != 0)
807 ++log;
808
809 /* Look in the fragment lists for a
810 free fragment of the desired size. */
811 next = _fraghead[log].next;
812 if (next != NULL)
813 {
814 /* There are free fragments of this size.
815 Pop a fragment out of the fragment list and return it.
816 Update the block's nfree and first counters. */
817 result = (__ptr_t) next;
818 next->prev->next = next->next;
819 if (next->next != NULL)
820 next->next->prev = next->prev;
821 block = BLOCK (result);
822 if (--_heapinfo[block].busy.info.frag.nfree != 0)
823 _heapinfo[block].busy.info.frag.first = (unsigned long int)
824 ((unsigned long int) ((char *) next->next - (char *) NULL)
825 % BLOCKSIZE) >> log;
826
827 /* Update the statistics. */
828 ++_chunks_used;
829 _bytes_used += 1 << log;
830 --_chunks_free;
831 _bytes_free -= 1 << log;
832 }
833 else
834 {
835 /* No free fragments of the desired size, so get a new block
836 and break it into fragments, returning the first. */
837 #ifdef GC_MALLOC_CHECK
838 result = _malloc_internal_nolock (BLOCKSIZE);
839 PROTECT_MALLOC_STATE (0);
840 #elif defined (USE_PTHREAD)
841 result = _malloc_internal_nolock (BLOCKSIZE);
842 #else
843 result = malloc (BLOCKSIZE);
844 #endif
845 if (result == NULL)
846 {
847 PROTECT_MALLOC_STATE (1);
848 goto out;
849 }
850
851 /* Link all fragments but the first into the free list. */
852 next = (struct list *) ((char *) result + (1 << log));
853 next->next = NULL;
854 next->prev = &_fraghead[log];
855 _fraghead[log].next = next;
856
857 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
858 {
859 next = (struct list *) ((char *) result + (i << log));
860 next->next = _fraghead[log].next;
861 next->prev = &_fraghead[log];
862 next->prev->next = next;
863 next->next->prev = next;
864 }
865
866 /* Initialize the nfree and first counters for this block. */
867 block = BLOCK (result);
868 _heapinfo[block].busy.type = log;
869 _heapinfo[block].busy.info.frag.nfree = i - 1;
870 _heapinfo[block].busy.info.frag.first = i - 1;
871
872 _chunks_free += (BLOCKSIZE >> log) - 1;
873 _bytes_free += BLOCKSIZE - (1 << log);
874 _bytes_used -= BLOCKSIZE - (1 << log);
875 }
876 }
877 else
878 {
879 /* Large allocation to receive one or more blocks.
880 Search the free list in a circle starting at the last place visited.
881 If we loop completely around without finding a large enough
882 space we will have to get more memory from the system. */
883 blocks = BLOCKIFY (size);
884 start = block = _heapindex;
885 while (_heapinfo[block].free.size < blocks)
886 {
887 block = _heapinfo[block].free.next;
888 if (block == start)
889 {
890 /* Need to get more from the system. Get a little extra. */
891 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
892 block = _heapinfo[0].free.prev;
893 lastblocks = _heapinfo[block].free.size;
894 /* Check to see if the new core will be contiguous with the
895 final free block; if so we don't need to get as much. */
896 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
897 /* We can't do this if we will have to make the heap info
898 table bigger to accommodate the new space. */
899 block + wantblocks <= heapsize &&
900 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
901 ADDRESS (block + lastblocks)))
902 {
903 /* We got it contiguously. Which block we are extending
904 (the `final free block' referred to above) might have
905 changed, if it got combined with a freed info table. */
906 block = _heapinfo[0].free.prev;
907 _heapinfo[block].free.size += (wantblocks - lastblocks);
908 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
909 _heaplimit += wantblocks - lastblocks;
910 continue;
911 }
912 result = morecore_nolock (wantblocks * BLOCKSIZE);
913 if (result == NULL)
914 goto out;
915 block = BLOCK (result);
916 /* Put the new block at the end of the free list. */
917 _heapinfo[block].free.size = wantblocks;
918 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
919 _heapinfo[block].free.next = 0;
920 _heapinfo[0].free.prev = block;
921 _heapinfo[_heapinfo[block].free.prev].free.next = block;
922 ++_chunks_free;
923 /* Now loop to use some of that block for this allocation. */
924 }
925 }
926
927 /* At this point we have found a suitable free list entry.
928 Figure out how to remove what we need from the list. */
929 result = ADDRESS (block);
930 if (_heapinfo[block].free.size > blocks)
931 {
932 /* The block we found has a bit left over,
933 so relink the tail end back into the free list. */
934 _heapinfo[block + blocks].free.size
935 = _heapinfo[block].free.size - blocks;
936 _heapinfo[block + blocks].free.next
937 = _heapinfo[block].free.next;
938 _heapinfo[block + blocks].free.prev
939 = _heapinfo[block].free.prev;
940 _heapinfo[_heapinfo[block].free.prev].free.next
941 = _heapinfo[_heapinfo[block].free.next].free.prev
942 = _heapindex = block + blocks;
943 }
944 else
945 {
946 /* The block exactly matches our requirements,
947 so just remove it from the list. */
948 _heapinfo[_heapinfo[block].free.next].free.prev
949 = _heapinfo[block].free.prev;
950 _heapinfo[_heapinfo[block].free.prev].free.next
951 = _heapindex = _heapinfo[block].free.next;
952 --_chunks_free;
953 }
954
955 _heapinfo[block].busy.type = 0;
956 _heapinfo[block].busy.info.size = blocks;
957 ++_chunks_used;
958 _bytes_used += blocks * BLOCKSIZE;
959 _bytes_free -= blocks * BLOCKSIZE;
960
961 /* Mark all the blocks of the object just allocated except for the
962 first with a negative number so you can find the first block by
963 adding that adjustment. */
964 while (--blocks > 0)
965 _heapinfo[block + blocks].busy.info.size = -blocks;
966 }
967
968 PROTECT_MALLOC_STATE (1);
969 out:
970 return result;
971 }
972
973 __ptr_t
974 _malloc_internal (size)
975 __malloc_size_t size;
976 {
977 __ptr_t result;
978
979 LOCK ();
980 result = _malloc_internal_nolock (size);
981 UNLOCK ();
982
983 return result;
984 }
985
986 __ptr_t
987 malloc (size)
988 __malloc_size_t size;
989 {
990 __ptr_t (*hook) (__malloc_size_t);
991
992 if (!__malloc_initialized && !__malloc_initialize ())
993 return NULL;
994
995 /* Copy the value of __malloc_hook to an automatic variable in case
996 __malloc_hook is modified in another thread between its
997 NULL-check and the use.
998
999 Note: Strictly speaking, this is not a right solution. We should
1000 use mutexes to access non-read-only variables that are shared
1001 among multiple threads. We just leave it for compatibility with
1002 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1003 hook = __malloc_hook;
1004 return (hook != NULL ? *hook : _malloc_internal) (size);
1005 }
1006 \f
1007 #ifndef _LIBC
1008
1009 /* On some ANSI C systems, some libc functions call _malloc, _free
1010 and _realloc. Make them use the GNU functions. */
1011
1012 __ptr_t
1013 _malloc (size)
1014 __malloc_size_t size;
1015 {
1016 return malloc (size);
1017 }
1018
1019 void
1020 _free (ptr)
1021 __ptr_t ptr;
1022 {
1023 free (ptr);
1024 }
1025
1026 __ptr_t
1027 _realloc (ptr, size)
1028 __ptr_t ptr;
1029 __malloc_size_t size;
1030 {
1031 return realloc (ptr, size);
1032 }
1033
1034 #endif
1035 /* Free a block of memory allocated by `malloc'.
1036 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1037 Written May 1989 by Mike Haertel.
1038
1039 This library is free software; you can redistribute it and/or
1040 modify it under the terms of the GNU General Public License as
1041 published by the Free Software Foundation; either version 2 of the
1042 License, or (at your option) any later version.
1043
1044 This library is distributed in the hope that it will be useful,
1045 but WITHOUT ANY WARRANTY; without even the implied warranty of
1046 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1047 General Public License for more details.
1048
1049 You should have received a copy of the GNU General Public
1050 License along with this library; see the file COPYING. If
1051 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1052 Fifth Floor, Boston, MA 02110-1301, USA.
1053
1054 The author may be reached (Email) at the address mike@ai.mit.edu,
1055 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1056
1057 #ifndef _MALLOC_INTERNAL
1058 #define _MALLOC_INTERNAL
1059 #include <malloc.h>
1060 #endif
1061
1062
1063 /* Debugging hook for free. */
1064 void (*__free_hook) PP ((__ptr_t __ptr));
1065
1066 /* List of blocks allocated by memalign. */
1067 struct alignlist *_aligned_blocks = NULL;
1068
1069 /* Return memory to the heap.
1070 Like `_free_internal' but don't lock mutex. */
1071 void
1072 _free_internal_nolock (ptr)
1073 __ptr_t ptr;
1074 {
1075 int type;
1076 __malloc_size_t block, blocks;
1077 register __malloc_size_t i;
1078 struct list *prev, *next;
1079 __ptr_t curbrk;
1080 const __malloc_size_t lesscore_threshold
1081 /* Threshold of free space at which we will return some to the system. */
1082 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1083
1084 register struct alignlist *l;
1085
1086 if (ptr == NULL)
1087 return;
1088
1089 PROTECT_MALLOC_STATE (0);
1090
1091 LOCK_ALIGNED_BLOCKS ();
1092 for (l = _aligned_blocks; l != NULL; l = l->next)
1093 if (l->aligned == ptr)
1094 {
1095 l->aligned = NULL; /* Mark the slot in the list as free. */
1096 ptr = l->exact;
1097 break;
1098 }
1099 UNLOCK_ALIGNED_BLOCKS ();
1100
1101 block = BLOCK (ptr);
1102
1103 type = _heapinfo[block].busy.type;
1104 switch (type)
1105 {
1106 case 0:
1107 /* Get as many statistics as early as we can. */
1108 --_chunks_used;
1109 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1110 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1111
1112 /* Find the free cluster previous to this one in the free list.
1113 Start searching at the last block referenced; this may benefit
1114 programs with locality of allocation. */
1115 i = _heapindex;
1116 if (i > block)
1117 while (i > block)
1118 i = _heapinfo[i].free.prev;
1119 else
1120 {
1121 do
1122 i = _heapinfo[i].free.next;
1123 while (i > 0 && i < block);
1124 i = _heapinfo[i].free.prev;
1125 }
1126
1127 /* Determine how to link this block into the free list. */
1128 if (block == i + _heapinfo[i].free.size)
1129 {
1130 /* Coalesce this block with its predecessor. */
1131 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1132 block = i;
1133 }
1134 else
1135 {
1136 /* Really link this block back into the free list. */
1137 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1138 _heapinfo[block].free.next = _heapinfo[i].free.next;
1139 _heapinfo[block].free.prev = i;
1140 _heapinfo[i].free.next = block;
1141 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1142 ++_chunks_free;
1143 }
1144
1145 /* Now that the block is linked in, see if we can coalesce it
1146 with its successor (by deleting its successor from the list
1147 and adding in its size). */
1148 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1149 {
1150 _heapinfo[block].free.size
1151 += _heapinfo[_heapinfo[block].free.next].free.size;
1152 _heapinfo[block].free.next
1153 = _heapinfo[_heapinfo[block].free.next].free.next;
1154 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1155 --_chunks_free;
1156 }
1157
1158 /* How many trailing free blocks are there now? */
1159 blocks = _heapinfo[block].free.size;
1160
1161 /* Where is the current end of accessible core? */
1162 curbrk = (*__morecore) (0);
1163
1164 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1165 {
1166 /* The end of the malloc heap is at the end of accessible core.
1167 It's possible that moving _heapinfo will allow us to
1168 return some space to the system. */
1169
1170 __malloc_size_t info_block = BLOCK (_heapinfo);
1171 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1172 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1173 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1174 __malloc_size_t next_block = _heapinfo[block].free.next;
1175 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1176
1177 if (/* Win if this block being freed is last in core, the info table
1178 is just before it, the previous free block is just before the
1179 info table, and the two free blocks together form a useful
1180 amount to return to the system. */
1181 (block + blocks == _heaplimit &&
1182 info_block + info_blocks == block &&
1183 prev_block != 0 && prev_block + prev_blocks == info_block &&
1184 blocks + prev_blocks >= lesscore_threshold) ||
1185 /* Nope, not the case. We can also win if this block being
1186 freed is just before the info table, and the table extends
1187 to the end of core or is followed only by a free block,
1188 and the total free space is worth returning to the system. */
1189 (block + blocks == info_block &&
1190 ((info_block + info_blocks == _heaplimit &&
1191 blocks >= lesscore_threshold) ||
1192 (info_block + info_blocks == next_block &&
1193 next_block + next_blocks == _heaplimit &&
1194 blocks + next_blocks >= lesscore_threshold)))
1195 )
1196 {
1197 malloc_info *newinfo;
1198 __malloc_size_t oldlimit = _heaplimit;
1199
1200 /* Free the old info table, clearing _heaplimit to avoid
1201 recursion into this code. We don't want to return the
1202 table's blocks to the system before we have copied them to
1203 the new location. */
1204 _heaplimit = 0;
1205 _free_internal_nolock (_heapinfo);
1206 _heaplimit = oldlimit;
1207
1208 /* Tell malloc to search from the beginning of the heap for
1209 free blocks, so it doesn't reuse the ones just freed. */
1210 _heapindex = 0;
1211
1212 /* Allocate new space for the info table and move its data. */
1213 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1214 * BLOCKSIZE);
1215 PROTECT_MALLOC_STATE (0);
1216 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1217 _heapinfo = newinfo;
1218
1219 /* We should now have coalesced the free block with the
1220 blocks freed from the old info table. Examine the entire
1221 trailing free block to decide below whether to return some
1222 to the system. */
1223 block = _heapinfo[0].free.prev;
1224 blocks = _heapinfo[block].free.size;
1225 }
1226
1227 /* Now see if we can return stuff to the system. */
1228 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1229 {
1230 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1231 _heaplimit -= blocks;
1232 (*__morecore) (-bytes);
1233 _heapinfo[_heapinfo[block].free.prev].free.next
1234 = _heapinfo[block].free.next;
1235 _heapinfo[_heapinfo[block].free.next].free.prev
1236 = _heapinfo[block].free.prev;
1237 block = _heapinfo[block].free.prev;
1238 --_chunks_free;
1239 _bytes_free -= bytes;
1240 }
1241 }
1242
1243 /* Set the next search to begin at this block. */
1244 _heapindex = block;
1245 break;
1246
1247 default:
1248 /* Do some of the statistics. */
1249 --_chunks_used;
1250 _bytes_used -= 1 << type;
1251 ++_chunks_free;
1252 _bytes_free += 1 << type;
1253
1254 /* Get the address of the first free fragment in this block. */
1255 prev = (struct list *) ((char *) ADDRESS (block) +
1256 (_heapinfo[block].busy.info.frag.first << type));
1257
1258 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1259 {
1260 /* If all fragments of this block are free, remove them
1261 from the fragment list and free the whole block. */
1262 next = prev;
1263 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1264 next = next->next;
1265 prev->prev->next = next;
1266 if (next != NULL)
1267 next->prev = prev->prev;
1268 _heapinfo[block].busy.type = 0;
1269 _heapinfo[block].busy.info.size = 1;
1270
1271 /* Keep the statistics accurate. */
1272 ++_chunks_used;
1273 _bytes_used += BLOCKSIZE;
1274 _chunks_free -= BLOCKSIZE >> type;
1275 _bytes_free -= BLOCKSIZE;
1276
1277 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1278 _free_internal_nolock (ADDRESS (block));
1279 #else
1280 free (ADDRESS (block));
1281 #endif
1282 }
1283 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1284 {
1285 /* If some fragments of this block are free, link this
1286 fragment into the fragment list after the first free
1287 fragment of this block. */
1288 next = (struct list *) ptr;
1289 next->next = prev->next;
1290 next->prev = prev;
1291 prev->next = next;
1292 if (next->next != NULL)
1293 next->next->prev = next;
1294 ++_heapinfo[block].busy.info.frag.nfree;
1295 }
1296 else
1297 {
1298 /* No fragments of this block are free, so link this
1299 fragment into the fragment list and announce that
1300 it is the first free fragment of this block. */
1301 prev = (struct list *) ptr;
1302 _heapinfo[block].busy.info.frag.nfree = 1;
1303 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1304 ((unsigned long int) ((char *) ptr - (char *) NULL)
1305 % BLOCKSIZE >> type);
1306 prev->next = _fraghead[type].next;
1307 prev->prev = &_fraghead[type];
1308 prev->prev->next = prev;
1309 if (prev->next != NULL)
1310 prev->next->prev = prev;
1311 }
1312 break;
1313 }
1314
1315 PROTECT_MALLOC_STATE (1);
1316 }
1317
1318 /* Return memory to the heap.
1319 Like `free' but don't call a __free_hook if there is one. */
1320 void
1321 _free_internal (ptr)
1322 __ptr_t ptr;
1323 {
1324 LOCK ();
1325 _free_internal_nolock (ptr);
1326 UNLOCK ();
1327 }
1328
1329 /* Return memory to the heap. */
1330
1331 void
1332 free (ptr)
1333 __ptr_t ptr;
1334 {
1335 void (*hook) (__ptr_t) = __free_hook;
1336
1337 if (hook != NULL)
1338 (*hook) (ptr);
1339 else
1340 _free_internal (ptr);
1341 }
1342
1343 /* Define the `cfree' alias for `free'. */
1344 #ifdef weak_alias
1345 weak_alias (free, cfree)
1346 #else
1347 void
1348 cfree (ptr)
1349 __ptr_t ptr;
1350 {
1351 free (ptr);
1352 }
1353 #endif
1354 /* Change the size of a block allocated by `malloc'.
1355 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1356 Written May 1989 by Mike Haertel.
1357
1358 This library is free software; you can redistribute it and/or
1359 modify it under the terms of the GNU General Public License as
1360 published by the Free Software Foundation; either version 2 of the
1361 License, or (at your option) any later version.
1362
1363 This library is distributed in the hope that it will be useful,
1364 but WITHOUT ANY WARRANTY; without even the implied warranty of
1365 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1366 General Public License for more details.
1367
1368 You should have received a copy of the GNU General Public
1369 License along with this library; see the file COPYING. If
1370 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1371 Fifth Floor, Boston, MA 02110-1301, USA.
1372
1373 The author may be reached (Email) at the address mike@ai.mit.edu,
1374 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1375
1376 #ifndef _MALLOC_INTERNAL
1377 #define _MALLOC_INTERNAL
1378 #include <malloc.h>
1379 #endif
1380
1381
1382 #define min(A, B) ((A) < (B) ? (A) : (B))
1383
1384 /* Debugging hook for realloc. */
1385 __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
1386
1387 /* Resize the given region to the new size, returning a pointer
1388 to the (possibly moved) region. This is optimized for speed;
1389 some benchmarks seem to indicate that greater compactness is
1390 achieved by unconditionally allocating and copying to a
1391 new region. This module has incestuous knowledge of the
1392 internals of both free and malloc. */
1393 __ptr_t
1394 _realloc_internal_nolock (ptr, size)
1395 __ptr_t ptr;
1396 __malloc_size_t size;
1397 {
1398 __ptr_t result;
1399 int type;
1400 __malloc_size_t block, blocks, oldlimit;
1401
1402 if (size == 0)
1403 {
1404 _free_internal_nolock (ptr);
1405 return _malloc_internal_nolock (0);
1406 }
1407 else if (ptr == NULL)
1408 return _malloc_internal_nolock (size);
1409
1410 block = BLOCK (ptr);
1411
1412 PROTECT_MALLOC_STATE (0);
1413
1414 type = _heapinfo[block].busy.type;
1415 switch (type)
1416 {
1417 case 0:
1418 /* Maybe reallocate a large block to a small fragment. */
1419 if (size <= BLOCKSIZE / 2)
1420 {
1421 result = _malloc_internal_nolock (size);
1422 if (result != NULL)
1423 {
1424 memcpy (result, ptr, size);
1425 _free_internal_nolock (ptr);
1426 goto out;
1427 }
1428 }
1429
1430 /* The new size is a large allocation as well;
1431 see if we can hold it in place. */
1432 blocks = BLOCKIFY (size);
1433 if (blocks < _heapinfo[block].busy.info.size)
1434 {
1435 /* The new size is smaller; return
1436 excess memory to the free list. */
1437 _heapinfo[block + blocks].busy.type = 0;
1438 _heapinfo[block + blocks].busy.info.size
1439 = _heapinfo[block].busy.info.size - blocks;
1440 _heapinfo[block].busy.info.size = blocks;
1441 /* We have just created a new chunk by splitting a chunk in two.
1442 Now we will free this chunk; increment the statistics counter
1443 so it doesn't become wrong when _free_internal decrements it. */
1444 ++_chunks_used;
1445 _free_internal_nolock (ADDRESS (block + blocks));
1446 result = ptr;
1447 }
1448 else if (blocks == _heapinfo[block].busy.info.size)
1449 /* No size change necessary. */
1450 result = ptr;
1451 else
1452 {
1453 /* Won't fit, so allocate a new region that will.
1454 Free the old region first in case there is sufficient
1455 adjacent free space to grow without moving. */
1456 blocks = _heapinfo[block].busy.info.size;
1457 /* Prevent free from actually returning memory to the system. */
1458 oldlimit = _heaplimit;
1459 _heaplimit = 0;
1460 _free_internal_nolock (ptr);
1461 result = _malloc_internal_nolock (size);
1462 PROTECT_MALLOC_STATE (0);
1463 if (_heaplimit == 0)
1464 _heaplimit = oldlimit;
1465 if (result == NULL)
1466 {
1467 /* Now we're really in trouble. We have to unfree
1468 the thing we just freed. Unfortunately it might
1469 have been coalesced with its neighbors. */
1470 if (_heapindex == block)
1471 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1472 else
1473 {
1474 __ptr_t previous
1475 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1476 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1477 _free_internal_nolock (previous);
1478 }
1479 goto out;
1480 }
1481 if (ptr != result)
1482 memmove (result, ptr, blocks * BLOCKSIZE);
1483 }
1484 break;
1485
1486 default:
1487 /* Old size is a fragment; type is logarithm
1488 to base two of the fragment size. */
1489 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1490 size <= (__malloc_size_t) (1 << type))
1491 /* The new size is the same kind of fragment. */
1492 result = ptr;
1493 else
1494 {
1495 /* The new size is different; allocate a new space,
1496 and copy the lesser of the new size and the old. */
1497 result = _malloc_internal_nolock (size);
1498 if (result == NULL)
1499 goto out;
1500 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1501 _free_internal_nolock (ptr);
1502 }
1503 break;
1504 }
1505
1506 PROTECT_MALLOC_STATE (1);
1507 out:
1508 return result;
1509 }
1510
1511 __ptr_t
1512 _realloc_internal (ptr, size)
1513 __ptr_t ptr;
1514 __malloc_size_t size;
1515 {
1516 __ptr_t result;
1517
1518 LOCK();
1519 result = _realloc_internal_nolock (ptr, size);
1520 UNLOCK ();
1521
1522 return result;
1523 }
1524
1525 __ptr_t
1526 realloc (ptr, size)
1527 __ptr_t ptr;
1528 __malloc_size_t size;
1529 {
1530 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1531
1532 if (!__malloc_initialized && !__malloc_initialize ())
1533 return NULL;
1534
1535 hook = __realloc_hook;
1536 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1537 }
1538 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1539
1540 This library is free software; you can redistribute it and/or
1541 modify it under the terms of the GNU General Public License as
1542 published by the Free Software Foundation; either version 2 of the
1543 License, or (at your option) any later version.
1544
1545 This library is distributed in the hope that it will be useful,
1546 but WITHOUT ANY WARRANTY; without even the implied warranty of
1547 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1548 General Public License for more details.
1549
1550 You should have received a copy of the GNU General Public
1551 License along with this library; see the file COPYING. If
1552 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1553 Fifth Floor, Boston, MA 02110-1301, USA.
1554
1555 The author may be reached (Email) at the address mike@ai.mit.edu,
1556 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1557
1558 #ifndef _MALLOC_INTERNAL
1559 #define _MALLOC_INTERNAL
1560 #include <malloc.h>
1561 #endif
1562
1563 /* Allocate an array of NMEMB elements each SIZE bytes long.
1564 The entire array is initialized to zeros. */
1565 __ptr_t
1566 calloc (nmemb, size)
1567 register __malloc_size_t nmemb;
1568 register __malloc_size_t size;
1569 {
1570 register __ptr_t result = malloc (nmemb * size);
1571
1572 if (result != NULL)
1573 (void) memset (result, 0, nmemb * size);
1574
1575 return result;
1576 }
1577 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1578 This file is part of the GNU C Library.
1579
1580 The GNU C Library is free software; you can redistribute it and/or modify
1581 it under the terms of the GNU General Public License as published by
1582 the Free Software Foundation; either version 2, or (at your option)
1583 any later version.
1584
1585 The GNU C Library is distributed in the hope that it will be useful,
1586 but WITHOUT ANY WARRANTY; without even the implied warranty of
1587 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1588 GNU General Public License for more details.
1589
1590 You should have received a copy of the GNU General Public License
1591 along with the GNU C Library; see the file COPYING. If not, write to
1592 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1593 MA 02110-1301, USA. */
1594
1595 #ifndef _MALLOC_INTERNAL
1596 #define _MALLOC_INTERNAL
1597 #include <malloc.h>
1598 #endif
1599
1600 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1601 compatible. */
1602 #if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
1603 #define __sbrk sbrk
1604 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1605 /* It is best not to declare this and cast its result on foreign operating
1606 systems with potentially hostile include files. */
1607
1608 #include <stddef.h>
1609 extern __ptr_t __sbrk PP ((ptrdiff_t increment));
1610 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1611
1612 #ifndef NULL
1613 #define NULL 0
1614 #endif
1615
1616 /* Allocate INCREMENT more bytes of data space,
1617 and return the start of data space, or NULL on errors.
1618 If INCREMENT is negative, shrink data space. */
1619 __ptr_t
1620 __default_morecore (increment)
1621 __malloc_ptrdiff_t increment;
1622 {
1623 __ptr_t result;
1624 #if defined(CYGWIN)
1625 if (!bss_sbrk_did_unexec)
1626 {
1627 return bss_sbrk (increment);
1628 }
1629 #endif
1630 result = (__ptr_t) __sbrk (increment);
1631 if (result == (__ptr_t) -1)
1632 return NULL;
1633 return result;
1634 }
1635 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1636
1637 This library is free software; you can redistribute it and/or
1638 modify it under the terms of the GNU General Public License as
1639 published by the Free Software Foundation; either version 2 of the
1640 License, or (at your option) any later version.
1641
1642 This library is distributed in the hope that it will be useful,
1643 but WITHOUT ANY WARRANTY; without even the implied warranty of
1644 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1645 General Public License for more details.
1646
1647 You should have received a copy of the GNU General Public
1648 License along with this library; see the file COPYING. If
1649 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1650 Fifth Floor, Boston, MA 02110-1301, USA. */
1651
1652 #ifndef _MALLOC_INTERNAL
1653 #define _MALLOC_INTERNAL
1654 #include <malloc.h>
1655 #endif
1656
1657 __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1658 __malloc_size_t __alignment));
1659
1660 __ptr_t
1661 memalign (alignment, size)
1662 __malloc_size_t alignment;
1663 __malloc_size_t size;
1664 {
1665 __ptr_t result;
1666 unsigned long int adj, lastadj;
1667 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
1668
1669 if (hook)
1670 return (*hook) (alignment, size);
1671
1672 /* Allocate a block with enough extra space to pad the block with up to
1673 (ALIGNMENT - 1) bytes if necessary. */
1674 result = malloc (size + alignment - 1);
1675 if (result == NULL)
1676 return NULL;
1677
1678 /* Figure out how much we will need to pad this particular block
1679 to achieve the required alignment. */
1680 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1681
1682 do
1683 {
1684 /* Reallocate the block with only as much excess as it needs. */
1685 free (result);
1686 result = malloc (adj + size);
1687 if (result == NULL) /* Impossible unless interrupted. */
1688 return NULL;
1689
1690 lastadj = adj;
1691 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1692 /* It's conceivable we might have been so unlucky as to get a
1693 different block with weaker alignment. If so, this block is too
1694 short to contain SIZE after alignment correction. So we must
1695 try again and get another block, slightly larger. */
1696 } while (adj > lastadj);
1697
1698 if (adj != 0)
1699 {
1700 /* Record this block in the list of aligned blocks, so that `free'
1701 can identify the pointer it is passed, which will be in the middle
1702 of an allocated block. */
1703
1704 struct alignlist *l;
1705 LOCK_ALIGNED_BLOCKS ();
1706 for (l = _aligned_blocks; l != NULL; l = l->next)
1707 if (l->aligned == NULL)
1708 /* This slot is free. Use it. */
1709 break;
1710 if (l == NULL)
1711 {
1712 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1713 if (l != NULL)
1714 {
1715 l->next = _aligned_blocks;
1716 _aligned_blocks = l;
1717 }
1718 }
1719 if (l != NULL)
1720 {
1721 l->exact = result;
1722 result = l->aligned = (char *) result + alignment - adj;
1723 }
1724 UNLOCK_ALIGNED_BLOCKS ();
1725 if (l == NULL)
1726 {
1727 free (result);
1728 result = NULL;
1729 }
1730 }
1731
1732 return result;
1733 }
1734
1735 #ifndef ENOMEM
1736 #define ENOMEM 12
1737 #endif
1738
1739 #ifndef EINVAL
1740 #define EINVAL 22
1741 #endif
1742
1743 int
1744 posix_memalign (memptr, alignment, size)
1745 __ptr_t *memptr;
1746 __malloc_size_t alignment;
1747 __malloc_size_t size;
1748 {
1749 __ptr_t mem;
1750
1751 if (alignment == 0
1752 || alignment % sizeof (__ptr_t) != 0
1753 || (alignment & (alignment - 1)) != 0)
1754 return EINVAL;
1755
1756 mem = memalign (alignment, size);
1757 if (mem == NULL)
1758 return ENOMEM;
1759
1760 *memptr = mem;
1761
1762 return 0;
1763 }
1764
1765 /* Allocate memory on a page boundary.
1766 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1767
1768 This library is free software; you can redistribute it and/or
1769 modify it under the terms of the GNU General Public License as
1770 published by the Free Software Foundation; either version 2 of the
1771 License, or (at your option) any later version.
1772
1773 This library is distributed in the hope that it will be useful,
1774 but WITHOUT ANY WARRANTY; without even the implied warranty of
1775 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1776 General Public License for more details.
1777
1778 You should have received a copy of the GNU General Public
1779 License along with this library; see the file COPYING. If
1780 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1781 Fifth Floor, Boston, MA 02110-1301, USA.
1782
1783 The author may be reached (Email) at the address mike@ai.mit.edu,
1784 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1785
1786 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1787
1788 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1789 on MSDOS, where it conflicts with a system header file. */
1790
1791 #define ELIDE_VALLOC
1792
1793 #endif
1794
1795 #ifndef ELIDE_VALLOC
1796
1797 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1798 #include <stddef.h>
1799 #include <sys/cdefs.h>
1800 #if defined (__GLIBC__) && __GLIBC__ >= 2
1801 /* __getpagesize is already declared in <unistd.h> with return type int */
1802 #else
1803 extern size_t __getpagesize PP ((void));
1804 #endif
1805 #else
1806 #include "getpagesize.h"
1807 #define __getpagesize() getpagesize()
1808 #endif
1809
1810 #ifndef _MALLOC_INTERNAL
1811 #define _MALLOC_INTERNAL
1812 #include <malloc.h>
1813 #endif
1814
1815 static __malloc_size_t pagesize;
1816
1817 __ptr_t
1818 valloc (size)
1819 __malloc_size_t size;
1820 {
1821 if (pagesize == 0)
1822 pagesize = __getpagesize ();
1823
1824 return memalign (pagesize, size);
1825 }
1826
1827 #endif /* Not ELIDE_VALLOC. */
1828
1829 #ifdef GC_MCHECK
1830
1831 /* Standard debugging hooks for `malloc'.
1832 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1833 Written May 1989 by Mike Haertel.
1834
1835 This library is free software; you can redistribute it and/or
1836 modify it under the terms of the GNU General Public License as
1837 published by the Free Software Foundation; either version 2 of the
1838 License, or (at your option) any later version.
1839
1840 This library is distributed in the hope that it will be useful,
1841 but WITHOUT ANY WARRANTY; without even the implied warranty of
1842 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1843 General Public License for more details.
1844
1845 You should have received a copy of the GNU General Public
1846 License along with this library; see the file COPYING. If
1847 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1848 Fifth Floor, Boston, MA 02110-1301, USA.
1849
1850 The author may be reached (Email) at the address mike@ai.mit.edu,
1851 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1852
1853 #ifdef emacs
1854 #include <stdio.h>
1855 #else
1856 #ifndef _MALLOC_INTERNAL
1857 #define _MALLOC_INTERNAL
1858 #include <malloc.h>
1859 #include <stdio.h>
1860 #endif
1861 #endif
1862
1863 /* Old hook values. */
1864 static void (*old_free_hook) (__ptr_t ptr);
1865 static __ptr_t (*old_malloc_hook) (__malloc_size_t size);
1866 static __ptr_t (*old_realloc_hook) (__ptr_t ptr, __malloc_size_t size);
1867
1868 /* Function to call when something awful happens. */
1869 static void (*abortfunc) (enum mcheck_status);
1870
1871 /* Arbitrary magical numbers. */
1872 #define MAGICWORD 0xfedabeeb
1873 #define MAGICFREE 0xd8675309
1874 #define MAGICBYTE ((char) 0xd7)
1875 #define MALLOCFLOOD ((char) 0x93)
1876 #define FREEFLOOD ((char) 0x95)
1877
1878 struct hdr
1879 {
1880 __malloc_size_t size; /* Exact size requested by user. */
1881 unsigned long int magic; /* Magic number to check header integrity. */
1882 };
1883
1884 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1885 #define flood memset
1886 #else
1887 static void flood (__ptr_t, int, __malloc_size_t);
1888 static void
1889 flood (ptr, val, size)
1890 __ptr_t ptr;
1891 int val;
1892 __malloc_size_t size;
1893 {
1894 char *cp = ptr;
1895 while (size--)
1896 *cp++ = val;
1897 }
1898 #endif
1899
1900 static enum mcheck_status checkhdr (const struct hdr *);
1901 static enum mcheck_status
1902 checkhdr (hdr)
1903 const struct hdr *hdr;
1904 {
1905 enum mcheck_status status;
1906 switch (hdr->magic)
1907 {
1908 default:
1909 status = MCHECK_HEAD;
1910 break;
1911 case MAGICFREE:
1912 status = MCHECK_FREE;
1913 break;
1914 case MAGICWORD:
1915 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1916 status = MCHECK_TAIL;
1917 else
1918 status = MCHECK_OK;
1919 break;
1920 }
1921 if (status != MCHECK_OK)
1922 (*abortfunc) (status);
1923 return status;
1924 }
1925
1926 static void freehook (__ptr_t);
1927 static void
1928 freehook (ptr)
1929 __ptr_t ptr;
1930 {
1931 struct hdr *hdr;
1932
1933 if (ptr)
1934 {
1935 hdr = ((struct hdr *) ptr) - 1;
1936 checkhdr (hdr);
1937 hdr->magic = MAGICFREE;
1938 flood (ptr, FREEFLOOD, hdr->size);
1939 }
1940 else
1941 hdr = NULL;
1942
1943 __free_hook = old_free_hook;
1944 free (hdr);
1945 __free_hook = freehook;
1946 }
1947
1948 static __ptr_t mallochook (__malloc_size_t);
1949 static __ptr_t
1950 mallochook (size)
1951 __malloc_size_t size;
1952 {
1953 struct hdr *hdr;
1954
1955 __malloc_hook = old_malloc_hook;
1956 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
1957 __malloc_hook = mallochook;
1958 if (hdr == NULL)
1959 return NULL;
1960
1961 hdr->size = size;
1962 hdr->magic = MAGICWORD;
1963 ((char *) &hdr[1])[size] = MAGICBYTE;
1964 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
1965 return (__ptr_t) (hdr + 1);
1966 }
1967
1968 static __ptr_t reallochook (__ptr_t, __malloc_size_t);
1969 static __ptr_t
1970 reallochook (ptr, size)
1971 __ptr_t ptr;
1972 __malloc_size_t size;
1973 {
1974 struct hdr *hdr = NULL;
1975 __malloc_size_t osize = 0;
1976
1977 if (ptr)
1978 {
1979 hdr = ((struct hdr *) ptr) - 1;
1980 osize = hdr->size;
1981
1982 checkhdr (hdr);
1983 if (size < osize)
1984 flood ((char *) ptr + size, FREEFLOOD, osize - size);
1985 }
1986
1987 __free_hook = old_free_hook;
1988 __malloc_hook = old_malloc_hook;
1989 __realloc_hook = old_realloc_hook;
1990 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
1991 __free_hook = freehook;
1992 __malloc_hook = mallochook;
1993 __realloc_hook = reallochook;
1994 if (hdr == NULL)
1995 return NULL;
1996
1997 hdr->size = size;
1998 hdr->magic = MAGICWORD;
1999 ((char *) &hdr[1])[size] = MAGICBYTE;
2000 if (size > osize)
2001 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2002 return (__ptr_t) (hdr + 1);
2003 }
2004
2005 static void
2006 mabort (status)
2007 enum mcheck_status status;
2008 {
2009 const char *msg;
2010 switch (status)
2011 {
2012 case MCHECK_OK:
2013 msg = "memory is consistent, library is buggy";
2014 break;
2015 case MCHECK_HEAD:
2016 msg = "memory clobbered before allocated block";
2017 break;
2018 case MCHECK_TAIL:
2019 msg = "memory clobbered past end of allocated block";
2020 break;
2021 case MCHECK_FREE:
2022 msg = "block freed twice";
2023 break;
2024 default:
2025 msg = "bogus mcheck_status, library is buggy";
2026 break;
2027 }
2028 #ifdef __GNU_LIBRARY__
2029 __libc_fatal (msg);
2030 #else
2031 fprintf (stderr, "mcheck: %s\n", msg);
2032 fflush (stderr);
2033 abort ();
2034 #endif
2035 }
2036
2037 static int mcheck_used = 0;
2038
2039 int
2040 mcheck (func)
2041 void (*func) (enum mcheck_status);
2042 {
2043 abortfunc = (func != NULL) ? func : &mabort;
2044
2045 /* These hooks may not be safely inserted if malloc is already in use. */
2046 if (!__malloc_initialized && !mcheck_used)
2047 {
2048 old_free_hook = __free_hook;
2049 __free_hook = freehook;
2050 old_malloc_hook = __malloc_hook;
2051 __malloc_hook = mallochook;
2052 old_realloc_hook = __realloc_hook;
2053 __realloc_hook = reallochook;
2054 mcheck_used = 1;
2055 }
2056
2057 return mcheck_used ? 0 : -1;
2058 }
2059
2060 enum mcheck_status
2061 mprobe (__ptr_t ptr)
2062 {
2063 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2064 }
2065
2066 #endif /* GC_MCHECK */