* image.c (png_load): Ignore png-supplied background color.
[bpt/emacs.git] / src / gmalloc.c
1 /* This file is no longer automatically generated from libc. */
2
3 #define _MALLOC_INTERNAL
4
5 /* The malloc headers and source files from the C library follow here. */
6
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
11
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
16
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
21
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
26
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
29
30 #ifndef _MALLOC_H
31
32 #define _MALLOC_H 1
33
34 #ifdef _MALLOC_INTERNAL
35
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
39
40 #ifdef HAVE_GTK_AND_PTHREAD
41 #define USE_PTHREAD
42 #endif
43
44 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
45 || defined STDC_HEADERS || defined PROTOTYPES) \
46 && ! defined (BROKEN_PROTOTYPES))
47 #undef PP
48 #define PP(args) args
49 #undef __ptr_t
50 #define __ptr_t void *
51 #else /* Not C++ or ANSI C. */
52 #undef PP
53 #define PP(args) ()
54 #undef __ptr_t
55 #define __ptr_t char *
56 #endif /* C++ or ANSI C. */
57
58 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
59 #include <string.h>
60 #else
61 #ifndef memset
62 #define memset(s, zero, n) bzero ((s), (n))
63 #endif
64 #ifndef memcpy
65 #define memcpy(d, s, n) bcopy ((s), (d), (n))
66 #endif
67 #endif
68
69 #ifdef HAVE_LIMITS_H
70 #include <limits.h>
71 #endif
72 #ifndef CHAR_BIT
73 #define CHAR_BIT 8
74 #endif
75
76 #ifdef HAVE_UNISTD_H
77 #include <unistd.h>
78 #endif
79
80 #ifdef USE_PTHREAD
81 #include <pthread.h>
82 #endif
83
84 #endif /* _MALLOC_INTERNAL. */
85
86
87 #ifdef __cplusplus
88 extern "C"
89 {
90 #endif
91
92 #ifdef STDC_HEADERS
93 #include <stddef.h>
94 #define __malloc_size_t size_t
95 #define __malloc_ptrdiff_t ptrdiff_t
96 #else
97 #ifdef __GNUC__
98 #include <stddef.h>
99 #ifdef __SIZE_TYPE__
100 #define __malloc_size_t __SIZE_TYPE__
101 #endif
102 #endif
103 #ifndef __malloc_size_t
104 #define __malloc_size_t unsigned int
105 #endif
106 #define __malloc_ptrdiff_t int
107 #endif
108
109 #ifndef NULL
110 #define NULL 0
111 #endif
112
113 #ifndef FREE_RETURN_TYPE
114 #define FREE_RETURN_TYPE void
115 #endif
116
117
118 /* Allocate SIZE bytes of memory. */
119 extern __ptr_t malloc PP ((__malloc_size_t __size));
120 /* Re-allocate the previously allocated block
121 in __ptr_t, making the new block SIZE bytes long. */
122 extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
123 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
124 extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
125 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
126 extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr));
127
128 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
129 #if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
130 extern __ptr_t memalign PP ((__malloc_size_t __alignment,
131 __malloc_size_t __size));
132 #endif
133
134 /* Allocate SIZE bytes on a page boundary. */
135 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
136 extern __ptr_t valloc PP ((__malloc_size_t __size));
137 #endif
138
139
140 #ifdef _MALLOC_INTERNAL
141
142 /* The allocator divides the heap into blocks of fixed size; large
143 requests receive one or more whole blocks, and small requests
144 receive a fragment of a block. Fragment sizes are powers of two,
145 and all fragments of a block are the same size. When all the
146 fragments in a block have been freed, the block itself is freed. */
147 #define INT_BIT (CHAR_BIT * sizeof(int))
148 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
149 #define BLOCKSIZE (1 << BLOCKLOG)
150 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
151
152 /* Determine the amount of memory spanned by the initial heap table
153 (not an absolute limit). */
154 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
155
156 /* Number of contiguous free blocks allowed to build up at the end of
157 memory before they will be returned to the system. */
158 #define FINAL_FREE_BLOCKS 8
159
160 /* Data structure giving per-block information. */
161 typedef union
162 {
163 /* Heap information for a busy block. */
164 struct
165 {
166 /* Zero for a large (multiblock) object, or positive giving the
167 logarithm to the base two of the fragment size. */
168 int type;
169 union
170 {
171 struct
172 {
173 __malloc_size_t nfree; /* Free frags in a fragmented block. */
174 __malloc_size_t first; /* First free fragment of the block. */
175 } frag;
176 /* For a large object, in its first block, this has the number
177 of blocks in the object. In the other blocks, this has a
178 negative number which says how far back the first block is. */
179 __malloc_ptrdiff_t size;
180 } info;
181 } busy;
182 /* Heap information for a free block
183 (that may be the first of a free cluster). */
184 struct
185 {
186 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
187 __malloc_size_t next; /* Index of next free cluster. */
188 __malloc_size_t prev; /* Index of previous free cluster. */
189 } free;
190 } malloc_info;
191
192 /* Pointer to first block of the heap. */
193 extern char *_heapbase;
194
195 /* Table indexed by block number giving per-block information. */
196 extern malloc_info *_heapinfo;
197
198 /* Address to block number and vice versa. */
199 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
200 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
201
202 /* Current search index for the heap table. */
203 extern __malloc_size_t _heapindex;
204
205 /* Limit of valid info table indices. */
206 extern __malloc_size_t _heaplimit;
207
208 /* Doubly linked lists of free fragments. */
209 struct list
210 {
211 struct list *next;
212 struct list *prev;
213 };
214
215 /* Free list headers for each fragment size. */
216 extern struct list _fraghead[];
217
218 /* List of blocks allocated with `memalign' (or `valloc'). */
219 struct alignlist
220 {
221 struct alignlist *next;
222 __ptr_t aligned; /* The address that memaligned returned. */
223 __ptr_t exact; /* The address that malloc returned. */
224 };
225 extern struct alignlist *_aligned_blocks;
226
227 /* Instrumentation. */
228 extern __malloc_size_t _chunks_used;
229 extern __malloc_size_t _bytes_used;
230 extern __malloc_size_t _chunks_free;
231 extern __malloc_size_t _bytes_free;
232
233 /* Internal versions of `malloc', `realloc', and `free'
234 used when these functions need to call each other.
235 They are the same but don't call the hooks. */
236 extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
237 extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
238 extern void _free_internal PP ((__ptr_t __ptr));
239 extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
240 extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
241 extern void _free_internal_nolock PP ((__ptr_t __ptr));
242
243 #ifdef USE_PTHREAD
244 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
245 #define LOCK() pthread_mutex_lock (&_malloc_mutex)
246 #define UNLOCK() pthread_mutex_unlock (&_malloc_mutex)
247 #define LOCK_ALIGNED_BLOCKS() pthread_mutex_lock (&_aligned_blocks_mutex)
248 #define UNLOCK_ALIGNED_BLOCKS() pthread_mutex_unlock (&_aligned_blocks_mutex)
249 #else
250 #define LOCK()
251 #define UNLOCK()
252 #define LOCK_ALIGNED_BLOCKS()
253 #define UNLOCK_ALIGNED_BLOCKS()
254 #endif
255
256 #endif /* _MALLOC_INTERNAL. */
257
258 /* Given an address in the middle of a malloc'd object,
259 return the address of the beginning of the object. */
260 extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
261
262 /* Underlying allocation function; successive calls should
263 return contiguous pieces of memory. */
264 extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
265
266 /* Default value of `__morecore'. */
267 extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
268
269 /* If not NULL, this function is called after each time
270 `__morecore' is called to increase the data size. */
271 extern void (*__after_morecore_hook) PP ((void));
272
273 /* Number of extra blocks to get each time we ask for more core.
274 This reduces the frequency of calling `(*__morecore)'. */
275 extern __malloc_size_t __malloc_extra_blocks;
276
277 /* Nonzero if `malloc' has been called and done its initialization. */
278 extern int __malloc_initialized;
279 /* Function called to initialize malloc data structures. */
280 extern int __malloc_initialize PP ((void));
281
282 /* Hooks for debugging versions. */
283 extern void (*__malloc_initialize_hook) PP ((void));
284 extern void (*__free_hook) PP ((__ptr_t __ptr));
285 extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
286 extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
287 extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
288 __malloc_size_t __alignment));
289
290 /* Return values for `mprobe': these are the kinds of inconsistencies that
291 `mcheck' enables detection of. */
292 enum mcheck_status
293 {
294 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
295 MCHECK_OK, /* Block is fine. */
296 MCHECK_FREE, /* Block freed twice. */
297 MCHECK_HEAD, /* Memory before the block was clobbered. */
298 MCHECK_TAIL /* Memory after the block was clobbered. */
299 };
300
301 /* Activate a standard collection of debugging hooks. This must be called
302 before `malloc' is ever called. ABORTFUNC is called with an error code
303 (see enum above) when an inconsistency is detected. If ABORTFUNC is
304 null, the standard function prints on stderr and then calls `abort'. */
305 extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
306
307 /* Check for aberrations in a particular malloc'd block. You must have
308 called `mcheck' already. These are the same checks that `mcheck' does
309 when you free or reallocate a block. */
310 extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
311
312 /* Activate a standard collection of tracing hooks. */
313 extern void mtrace PP ((void));
314 extern void muntrace PP ((void));
315
316 /* Statistics available to the user. */
317 struct mstats
318 {
319 __malloc_size_t bytes_total; /* Total size of the heap. */
320 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
321 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
322 __malloc_size_t chunks_free; /* Chunks in the free list. */
323 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
324 };
325
326 /* Pick up the current statistics. */
327 extern struct mstats mstats PP ((void));
328
329 /* Call WARNFUN with a warning message when memory usage is high. */
330 extern void memory_warnings PP ((__ptr_t __start,
331 void (*__warnfun) PP ((const char *))));
332
333
334 /* Relocating allocator. */
335
336 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
337 extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
338
339 /* Free the storage allocated in HANDLEPTR. */
340 extern void r_alloc_free PP ((__ptr_t *__handleptr));
341
342 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
343 extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
344
345
346 #ifdef __cplusplus
347 }
348 #endif
349
350 #endif /* malloc.h */
351 /* Memory allocator `malloc'.
352 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
353 Written May 1989 by Mike Haertel.
354
355 This library is free software; you can redistribute it and/or
356 modify it under the terms of the GNU General Public License as
357 published by the Free Software Foundation; either version 2 of the
358 License, or (at your option) any later version.
359
360 This library is distributed in the hope that it will be useful,
361 but WITHOUT ANY WARRANTY; without even the implied warranty of
362 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
363 General Public License for more details.
364
365 You should have received a copy of the GNU General Public
366 License along with this library; see the file COPYING. If
367 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
368 Fifth Floor, Boston, MA 02110-1301, USA.
369
370 The author may be reached (Email) at the address mike@ai.mit.edu,
371 or (US mail) as Mike Haertel c/o Free Software Foundation. */
372
373 #ifndef _MALLOC_INTERNAL
374 #define _MALLOC_INTERNAL
375 #include <malloc.h>
376 #endif
377 #include <errno.h>
378
379 /* How to really get more memory. */
380 #if defined(CYGWIN)
381 extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
382 extern int bss_sbrk_did_unexec;
383 #endif
384 __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
385
386 /* Debugging hook for `malloc'. */
387 __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
388
389 /* Pointer to the base of the first block. */
390 char *_heapbase;
391
392 /* Block information table. Allocated with align/__free (not malloc/free). */
393 malloc_info *_heapinfo;
394
395 /* Number of info entries. */
396 static __malloc_size_t heapsize;
397
398 /* Search index in the info table. */
399 __malloc_size_t _heapindex;
400
401 /* Limit of valid info table indices. */
402 __malloc_size_t _heaplimit;
403
404 /* Free lists for each fragment size. */
405 struct list _fraghead[BLOCKLOG];
406
407 /* Instrumentation. */
408 __malloc_size_t _chunks_used;
409 __malloc_size_t _bytes_used;
410 __malloc_size_t _chunks_free;
411 __malloc_size_t _bytes_free;
412
413 /* Are you experienced? */
414 int __malloc_initialized;
415
416 __malloc_size_t __malloc_extra_blocks;
417
418 void (*__malloc_initialize_hook) PP ((void));
419 void (*__after_morecore_hook) PP ((void));
420
421 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
422
423 /* Some code for hunting a bug writing into _heapinfo.
424
425 Call this macro with argument PROT non-zero to protect internal
426 malloc state against writing to it, call it with a zero argument to
427 make it readable and writable.
428
429 Note that this only works if BLOCKSIZE == page size, which is
430 the case on the i386. */
431
432 #include <sys/types.h>
433 #include <sys/mman.h>
434
435 static int state_protected_p;
436 static __malloc_size_t last_state_size;
437 static malloc_info *last_heapinfo;
438
439 void
440 protect_malloc_state (protect_p)
441 int protect_p;
442 {
443 /* If _heapinfo has been relocated, make sure its old location
444 isn't left read-only; it will be reused by malloc. */
445 if (_heapinfo != last_heapinfo
446 && last_heapinfo
447 && state_protected_p)
448 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
449
450 last_state_size = _heaplimit * sizeof *_heapinfo;
451 last_heapinfo = _heapinfo;
452
453 if (protect_p != state_protected_p)
454 {
455 state_protected_p = protect_p;
456 if (mprotect (_heapinfo, last_state_size,
457 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
458 abort ();
459 }
460 }
461
462 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
463
464 #else
465 #define PROTECT_MALLOC_STATE(PROT) /* empty */
466 #endif
467
468
469 /* Aligned allocation. */
470 static __ptr_t align PP ((__malloc_size_t));
471 static __ptr_t
472 align (size)
473 __malloc_size_t size;
474 {
475 __ptr_t result;
476 unsigned long int adj;
477
478 /* align accepts an unsigned argument, but __morecore accepts a
479 signed one. This could lead to trouble if SIZE overflows a
480 signed int type accepted by __morecore. We just punt in that
481 case, since they are requesting a ludicrous amount anyway. */
482 if ((__malloc_ptrdiff_t)size < 0)
483 result = 0;
484 else
485 result = (*__morecore) (size);
486 adj = (unsigned long int) ((unsigned long int) ((char *) result -
487 (char *) NULL)) % BLOCKSIZE;
488 if (adj != 0)
489 {
490 __ptr_t new;
491 adj = BLOCKSIZE - adj;
492 new = (*__morecore) (adj);
493 result = (char *) result + adj;
494 }
495
496 if (__after_morecore_hook)
497 (*__after_morecore_hook) ();
498
499 return result;
500 }
501
502 /* Get SIZE bytes, if we can get them starting at END.
503 Return the address of the space we got.
504 If we cannot get space at END, fail and return 0. */
505 static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
506 static __ptr_t
507 get_contiguous_space (size, position)
508 __malloc_ptrdiff_t size;
509 __ptr_t position;
510 {
511 __ptr_t before;
512 __ptr_t after;
513
514 before = (*__morecore) (0);
515 /* If we can tell in advance that the break is at the wrong place,
516 fail now. */
517 if (before != position)
518 return 0;
519
520 /* Allocate SIZE bytes and get the address of them. */
521 after = (*__morecore) (size);
522 if (!after)
523 return 0;
524
525 /* It was not contiguous--reject it. */
526 if (after != position)
527 {
528 (*__morecore) (- size);
529 return 0;
530 }
531
532 return after;
533 }
534
535
536 /* This is called when `_heapinfo' and `heapsize' have just
537 been set to describe a new info table. Set up the table
538 to describe itself and account for it in the statistics. */
539 static void register_heapinfo PP ((void));
540 #ifdef __GNUC__
541 __inline__
542 #endif
543 static void
544 register_heapinfo ()
545 {
546 __malloc_size_t block, blocks;
547
548 block = BLOCK (_heapinfo);
549 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
550
551 /* Account for the _heapinfo block itself in the statistics. */
552 _bytes_used += blocks * BLOCKSIZE;
553 ++_chunks_used;
554
555 /* Describe the heapinfo block itself in the heapinfo. */
556 _heapinfo[block].busy.type = 0;
557 _heapinfo[block].busy.info.size = blocks;
558 /* Leave back-pointers for malloc_find_address. */
559 while (--blocks > 0)
560 _heapinfo[block + blocks].busy.info.size = -blocks;
561 }
562
563 #ifdef USE_PTHREAD
564 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
565 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
566 #endif
567
568 static void
569 malloc_initialize_1 ()
570 {
571 #ifdef GC_MCHECK
572 mcheck (NULL);
573 #endif
574
575 if (__malloc_initialize_hook)
576 (*__malloc_initialize_hook) ();
577
578 /* We don't use recursive mutex because pthread_mutexattr_init may
579 call malloc internally. */
580 #if 0 /* defined (USE_PTHREAD) */
581 {
582 pthread_mutexattr_t attr;
583
584 pthread_mutexattr_init (&attr);
585 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
586 pthread_mutex_init (&_malloc_mutex, &attr);
587 pthread_mutexattr_destroy (&attr);
588 }
589 #endif
590
591 heapsize = HEAP / BLOCKSIZE;
592 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
593 if (_heapinfo == NULL)
594 return;
595 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
596 _heapinfo[0].free.size = 0;
597 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
598 _heapindex = 0;
599 _heapbase = (char *) _heapinfo;
600 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
601
602 register_heapinfo ();
603
604 __malloc_initialized = 1;
605 PROTECT_MALLOC_STATE (1);
606 return;
607 }
608
609 /* Set everything up and remember that we have.
610 main will call malloc which calls this function. That is before any threads
611 or signal handlers has been set up, so we don't need thread protection. */
612 int
613 __malloc_initialize ()
614 {
615 if (__malloc_initialized)
616 return 0;
617
618 malloc_initialize_1 ();
619
620 return __malloc_initialized;
621 }
622
623 static int morecore_recursing;
624
625 /* Get neatly aligned memory, initializing or
626 growing the heap info table as necessary. */
627 static __ptr_t morecore_nolock PP ((__malloc_size_t));
628 static __ptr_t
629 morecore_nolock (size)
630 __malloc_size_t size;
631 {
632 __ptr_t result;
633 malloc_info *newinfo, *oldinfo;
634 __malloc_size_t newsize;
635
636 if (morecore_recursing)
637 /* Avoid recursion. The caller will know how to handle a null return. */
638 return NULL;
639
640 result = align (size);
641 if (result == NULL)
642 return NULL;
643
644 PROTECT_MALLOC_STATE (0);
645
646 /* Check if we need to grow the info table. */
647 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
648 {
649 /* Calculate the new _heapinfo table size. We do not account for the
650 added blocks in the table itself, as we hope to place them in
651 existing free space, which is already covered by part of the
652 existing table. */
653 newsize = heapsize;
654 do
655 newsize *= 2;
656 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
657
658 /* We must not reuse existing core for the new info table when called
659 from realloc in the case of growing a large block, because the
660 block being grown is momentarily marked as free. In this case
661 _heaplimit is zero so we know not to reuse space for internal
662 allocation. */
663 if (_heaplimit != 0)
664 {
665 /* First try to allocate the new info table in core we already
666 have, in the usual way using realloc. If realloc cannot
667 extend it in place or relocate it to existing sufficient core,
668 we will get called again, and the code above will notice the
669 `morecore_recursing' flag and return null. */
670 int save = errno; /* Don't want to clobber errno with ENOMEM. */
671 morecore_recursing = 1;
672 newinfo = (malloc_info *) _realloc_internal_nolock
673 (_heapinfo, newsize * sizeof (malloc_info));
674 morecore_recursing = 0;
675 if (newinfo == NULL)
676 errno = save;
677 else
678 {
679 /* We found some space in core, and realloc has put the old
680 table's blocks on the free list. Now zero the new part
681 of the table and install the new table location. */
682 memset (&newinfo[heapsize], 0,
683 (newsize - heapsize) * sizeof (malloc_info));
684 _heapinfo = newinfo;
685 heapsize = newsize;
686 goto got_heap;
687 }
688 }
689
690 /* Allocate new space for the malloc info table. */
691 while (1)
692 {
693 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
694
695 /* Did it fail? */
696 if (newinfo == NULL)
697 {
698 (*__morecore) (-size);
699 return NULL;
700 }
701
702 /* Is it big enough to record status for its own space?
703 If so, we win. */
704 if ((__malloc_size_t) BLOCK ((char *) newinfo
705 + newsize * sizeof (malloc_info))
706 < newsize)
707 break;
708
709 /* Must try again. First give back most of what we just got. */
710 (*__morecore) (- newsize * sizeof (malloc_info));
711 newsize *= 2;
712 }
713
714 /* Copy the old table to the beginning of the new,
715 and zero the rest of the new table. */
716 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
717 memset (&newinfo[heapsize], 0,
718 (newsize - heapsize) * sizeof (malloc_info));
719 oldinfo = _heapinfo;
720 _heapinfo = newinfo;
721 heapsize = newsize;
722
723 register_heapinfo ();
724
725 /* Reset _heaplimit so _free_internal never decides
726 it can relocate or resize the info table. */
727 _heaplimit = 0;
728 _free_internal_nolock (oldinfo);
729 PROTECT_MALLOC_STATE (0);
730
731 /* The new heap limit includes the new table just allocated. */
732 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
733 return result;
734 }
735
736 got_heap:
737 _heaplimit = BLOCK ((char *) result + size);
738 return result;
739 }
740
741 /* Allocate memory from the heap. */
742 __ptr_t
743 _malloc_internal_nolock (size)
744 __malloc_size_t size;
745 {
746 __ptr_t result;
747 __malloc_size_t block, blocks, lastblocks, start;
748 register __malloc_size_t i;
749 struct list *next;
750
751 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
752 valid address you can realloc and free (though not dereference).
753
754 It turns out that some extant code (sunrpc, at least Ultrix's version)
755 expects `malloc (0)' to return non-NULL and breaks otherwise.
756 Be compatible. */
757
758 #if 0
759 if (size == 0)
760 return NULL;
761 #endif
762
763 PROTECT_MALLOC_STATE (0);
764
765 if (size < sizeof (struct list))
766 size = sizeof (struct list);
767
768 #ifdef SUNOS_LOCALTIME_BUG
769 if (size < 16)
770 size = 16;
771 #endif
772
773 /* Determine the allocation policy based on the request size. */
774 if (size <= BLOCKSIZE / 2)
775 {
776 /* Small allocation to receive a fragment of a block.
777 Determine the logarithm to base two of the fragment size. */
778 register __malloc_size_t log = 1;
779 --size;
780 while ((size /= 2) != 0)
781 ++log;
782
783 /* Look in the fragment lists for a
784 free fragment of the desired size. */
785 next = _fraghead[log].next;
786 if (next != NULL)
787 {
788 /* There are free fragments of this size.
789 Pop a fragment out of the fragment list and return it.
790 Update the block's nfree and first counters. */
791 result = (__ptr_t) next;
792 next->prev->next = next->next;
793 if (next->next != NULL)
794 next->next->prev = next->prev;
795 block = BLOCK (result);
796 if (--_heapinfo[block].busy.info.frag.nfree != 0)
797 _heapinfo[block].busy.info.frag.first = (unsigned long int)
798 ((unsigned long int) ((char *) next->next - (char *) NULL)
799 % BLOCKSIZE) >> log;
800
801 /* Update the statistics. */
802 ++_chunks_used;
803 _bytes_used += 1 << log;
804 --_chunks_free;
805 _bytes_free -= 1 << log;
806 }
807 else
808 {
809 /* No free fragments of the desired size, so get a new block
810 and break it into fragments, returning the first. */
811 #ifdef GC_MALLOC_CHECK
812 result = _malloc_internal_nolock (BLOCKSIZE);
813 PROTECT_MALLOC_STATE (0);
814 #elif defined (USE_PTHREAD)
815 result = _malloc_internal_nolock (BLOCKSIZE);
816 #else
817 result = malloc (BLOCKSIZE);
818 #endif
819 if (result == NULL)
820 {
821 PROTECT_MALLOC_STATE (1);
822 goto out;
823 }
824
825 /* Link all fragments but the first into the free list. */
826 next = (struct list *) ((char *) result + (1 << log));
827 next->next = NULL;
828 next->prev = &_fraghead[log];
829 _fraghead[log].next = next;
830
831 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
832 {
833 next = (struct list *) ((char *) result + (i << log));
834 next->next = _fraghead[log].next;
835 next->prev = &_fraghead[log];
836 next->prev->next = next;
837 next->next->prev = next;
838 }
839
840 /* Initialize the nfree and first counters for this block. */
841 block = BLOCK (result);
842 _heapinfo[block].busy.type = log;
843 _heapinfo[block].busy.info.frag.nfree = i - 1;
844 _heapinfo[block].busy.info.frag.first = i - 1;
845
846 _chunks_free += (BLOCKSIZE >> log) - 1;
847 _bytes_free += BLOCKSIZE - (1 << log);
848 _bytes_used -= BLOCKSIZE - (1 << log);
849 }
850 }
851 else
852 {
853 /* Large allocation to receive one or more blocks.
854 Search the free list in a circle starting at the last place visited.
855 If we loop completely around without finding a large enough
856 space we will have to get more memory from the system. */
857 blocks = BLOCKIFY (size);
858 start = block = _heapindex;
859 while (_heapinfo[block].free.size < blocks)
860 {
861 block = _heapinfo[block].free.next;
862 if (block == start)
863 {
864 /* Need to get more from the system. Get a little extra. */
865 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
866 block = _heapinfo[0].free.prev;
867 lastblocks = _heapinfo[block].free.size;
868 /* Check to see if the new core will be contiguous with the
869 final free block; if so we don't need to get as much. */
870 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
871 /* We can't do this if we will have to make the heap info
872 table bigger to accomodate the new space. */
873 block + wantblocks <= heapsize &&
874 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
875 ADDRESS (block + lastblocks)))
876 {
877 /* We got it contiguously. Which block we are extending
878 (the `final free block' referred to above) might have
879 changed, if it got combined with a freed info table. */
880 block = _heapinfo[0].free.prev;
881 _heapinfo[block].free.size += (wantblocks - lastblocks);
882 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
883 _heaplimit += wantblocks - lastblocks;
884 continue;
885 }
886 result = morecore_nolock (wantblocks * BLOCKSIZE);
887 if (result == NULL)
888 goto out;
889 block = BLOCK (result);
890 /* Put the new block at the end of the free list. */
891 _heapinfo[block].free.size = wantblocks;
892 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
893 _heapinfo[block].free.next = 0;
894 _heapinfo[0].free.prev = block;
895 _heapinfo[_heapinfo[block].free.prev].free.next = block;
896 ++_chunks_free;
897 /* Now loop to use some of that block for this allocation. */
898 }
899 }
900
901 /* At this point we have found a suitable free list entry.
902 Figure out how to remove what we need from the list. */
903 result = ADDRESS (block);
904 if (_heapinfo[block].free.size > blocks)
905 {
906 /* The block we found has a bit left over,
907 so relink the tail end back into the free list. */
908 _heapinfo[block + blocks].free.size
909 = _heapinfo[block].free.size - blocks;
910 _heapinfo[block + blocks].free.next
911 = _heapinfo[block].free.next;
912 _heapinfo[block + blocks].free.prev
913 = _heapinfo[block].free.prev;
914 _heapinfo[_heapinfo[block].free.prev].free.next
915 = _heapinfo[_heapinfo[block].free.next].free.prev
916 = _heapindex = block + blocks;
917 }
918 else
919 {
920 /* The block exactly matches our requirements,
921 so just remove it from the list. */
922 _heapinfo[_heapinfo[block].free.next].free.prev
923 = _heapinfo[block].free.prev;
924 _heapinfo[_heapinfo[block].free.prev].free.next
925 = _heapindex = _heapinfo[block].free.next;
926 --_chunks_free;
927 }
928
929 _heapinfo[block].busy.type = 0;
930 _heapinfo[block].busy.info.size = blocks;
931 ++_chunks_used;
932 _bytes_used += blocks * BLOCKSIZE;
933 _bytes_free -= blocks * BLOCKSIZE;
934
935 /* Mark all the blocks of the object just allocated except for the
936 first with a negative number so you can find the first block by
937 adding that adjustment. */
938 while (--blocks > 0)
939 _heapinfo[block + blocks].busy.info.size = -blocks;
940 }
941
942 PROTECT_MALLOC_STATE (1);
943 out:
944 return result;
945 }
946
947 __ptr_t
948 _malloc_internal (size)
949 __malloc_size_t size;
950 {
951 __ptr_t result;
952
953 LOCK ();
954 result = _malloc_internal_nolock (size);
955 UNLOCK ();
956
957 return result;
958 }
959
960 __ptr_t
961 malloc (size)
962 __malloc_size_t size;
963 {
964 __ptr_t (*hook) (__malloc_size_t);
965
966 if (!__malloc_initialized && !__malloc_initialize ())
967 return NULL;
968
969 /* Copy the value of __malloc_hook to an automatic variable in case
970 __malloc_hook is modified in another thread between its
971 NULL-check and the use.
972
973 Note: Strictly speaking, this is not a right solution. We should
974 use mutexes to access non-read-only variables that are shared
975 among multiple threads. We just leave it for compatibility with
976 glibc malloc (i.e., assignments to __malloc_hook) for now. */
977 hook = __malloc_hook;
978 return (hook != NULL ? *hook : _malloc_internal) (size);
979 }
980 \f
981 #ifndef _LIBC
982
983 /* On some ANSI C systems, some libc functions call _malloc, _free
984 and _realloc. Make them use the GNU functions. */
985
986 __ptr_t
987 _malloc (size)
988 __malloc_size_t size;
989 {
990 return malloc (size);
991 }
992
993 void
994 _free (ptr)
995 __ptr_t ptr;
996 {
997 free (ptr);
998 }
999
1000 __ptr_t
1001 _realloc (ptr, size)
1002 __ptr_t ptr;
1003 __malloc_size_t size;
1004 {
1005 return realloc (ptr, size);
1006 }
1007
1008 #endif
1009 /* Free a block of memory allocated by `malloc'.
1010 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1011 Written May 1989 by Mike Haertel.
1012
1013 This library is free software; you can redistribute it and/or
1014 modify it under the terms of the GNU General Public License as
1015 published by the Free Software Foundation; either version 2 of the
1016 License, or (at your option) any later version.
1017
1018 This library is distributed in the hope that it will be useful,
1019 but WITHOUT ANY WARRANTY; without even the implied warranty of
1020 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1021 General Public License for more details.
1022
1023 You should have received a copy of the GNU General Public
1024 License along with this library; see the file COPYING. If
1025 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1026 Fifth Floor, Boston, MA 02110-1301, USA.
1027
1028 The author may be reached (Email) at the address mike@ai.mit.edu,
1029 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1030
1031 #ifndef _MALLOC_INTERNAL
1032 #define _MALLOC_INTERNAL
1033 #include <malloc.h>
1034 #endif
1035
1036
1037 /* Cope with systems lacking `memmove'. */
1038 #ifndef memmove
1039 #if (defined (MEMMOVE_MISSING) || \
1040 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1041 #ifdef emacs
1042 #undef __malloc_safe_bcopy
1043 #define __malloc_safe_bcopy safe_bcopy
1044 #endif
1045 /* This function is defined in realloc.c. */
1046 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1047 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1048 #endif
1049 #endif
1050
1051
1052 /* Debugging hook for free. */
1053 void (*__free_hook) PP ((__ptr_t __ptr));
1054
1055 /* List of blocks allocated by memalign. */
1056 struct alignlist *_aligned_blocks = NULL;
1057
1058 /* Return memory to the heap.
1059 Like `_free_internal' but don't lock mutex. */
1060 void
1061 _free_internal_nolock (ptr)
1062 __ptr_t ptr;
1063 {
1064 int type;
1065 __malloc_size_t block, blocks;
1066 register __malloc_size_t i;
1067 struct list *prev, *next;
1068 __ptr_t curbrk;
1069 const __malloc_size_t lesscore_threshold
1070 /* Threshold of free space at which we will return some to the system. */
1071 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1072
1073 register struct alignlist *l;
1074
1075 if (ptr == NULL)
1076 return;
1077
1078 PROTECT_MALLOC_STATE (0);
1079
1080 LOCK_ALIGNED_BLOCKS ();
1081 for (l = _aligned_blocks; l != NULL; l = l->next)
1082 if (l->aligned == ptr)
1083 {
1084 l->aligned = NULL; /* Mark the slot in the list as free. */
1085 ptr = l->exact;
1086 break;
1087 }
1088 UNLOCK_ALIGNED_BLOCKS ();
1089
1090 block = BLOCK (ptr);
1091
1092 type = _heapinfo[block].busy.type;
1093 switch (type)
1094 {
1095 case 0:
1096 /* Get as many statistics as early as we can. */
1097 --_chunks_used;
1098 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1099 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1100
1101 /* Find the free cluster previous to this one in the free list.
1102 Start searching at the last block referenced; this may benefit
1103 programs with locality of allocation. */
1104 i = _heapindex;
1105 if (i > block)
1106 while (i > block)
1107 i = _heapinfo[i].free.prev;
1108 else
1109 {
1110 do
1111 i = _heapinfo[i].free.next;
1112 while (i > 0 && i < block);
1113 i = _heapinfo[i].free.prev;
1114 }
1115
1116 /* Determine how to link this block into the free list. */
1117 if (block == i + _heapinfo[i].free.size)
1118 {
1119 /* Coalesce this block with its predecessor. */
1120 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1121 block = i;
1122 }
1123 else
1124 {
1125 /* Really link this block back into the free list. */
1126 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1127 _heapinfo[block].free.next = _heapinfo[i].free.next;
1128 _heapinfo[block].free.prev = i;
1129 _heapinfo[i].free.next = block;
1130 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1131 ++_chunks_free;
1132 }
1133
1134 /* Now that the block is linked in, see if we can coalesce it
1135 with its successor (by deleting its successor from the list
1136 and adding in its size). */
1137 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1138 {
1139 _heapinfo[block].free.size
1140 += _heapinfo[_heapinfo[block].free.next].free.size;
1141 _heapinfo[block].free.next
1142 = _heapinfo[_heapinfo[block].free.next].free.next;
1143 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1144 --_chunks_free;
1145 }
1146
1147 /* How many trailing free blocks are there now? */
1148 blocks = _heapinfo[block].free.size;
1149
1150 /* Where is the current end of accessible core? */
1151 curbrk = (*__morecore) (0);
1152
1153 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1154 {
1155 /* The end of the malloc heap is at the end of accessible core.
1156 It's possible that moving _heapinfo will allow us to
1157 return some space to the system. */
1158
1159 __malloc_size_t info_block = BLOCK (_heapinfo);
1160 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1161 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1162 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1163 __malloc_size_t next_block = _heapinfo[block].free.next;
1164 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1165
1166 if (/* Win if this block being freed is last in core, the info table
1167 is just before it, the previous free block is just before the
1168 info table, and the two free blocks together form a useful
1169 amount to return to the system. */
1170 (block + blocks == _heaplimit &&
1171 info_block + info_blocks == block &&
1172 prev_block != 0 && prev_block + prev_blocks == info_block &&
1173 blocks + prev_blocks >= lesscore_threshold) ||
1174 /* Nope, not the case. We can also win if this block being
1175 freed is just before the info table, and the table extends
1176 to the end of core or is followed only by a free block,
1177 and the total free space is worth returning to the system. */
1178 (block + blocks == info_block &&
1179 ((info_block + info_blocks == _heaplimit &&
1180 blocks >= lesscore_threshold) ||
1181 (info_block + info_blocks == next_block &&
1182 next_block + next_blocks == _heaplimit &&
1183 blocks + next_blocks >= lesscore_threshold)))
1184 )
1185 {
1186 malloc_info *newinfo;
1187 __malloc_size_t oldlimit = _heaplimit;
1188
1189 /* Free the old info table, clearing _heaplimit to avoid
1190 recursion into this code. We don't want to return the
1191 table's blocks to the system before we have copied them to
1192 the new location. */
1193 _heaplimit = 0;
1194 _free_internal_nolock (_heapinfo);
1195 _heaplimit = oldlimit;
1196
1197 /* Tell malloc to search from the beginning of the heap for
1198 free blocks, so it doesn't reuse the ones just freed. */
1199 _heapindex = 0;
1200
1201 /* Allocate new space for the info table and move its data. */
1202 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1203 * BLOCKSIZE);
1204 PROTECT_MALLOC_STATE (0);
1205 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1206 _heapinfo = newinfo;
1207
1208 /* We should now have coalesced the free block with the
1209 blocks freed from the old info table. Examine the entire
1210 trailing free block to decide below whether to return some
1211 to the system. */
1212 block = _heapinfo[0].free.prev;
1213 blocks = _heapinfo[block].free.size;
1214 }
1215
1216 /* Now see if we can return stuff to the system. */
1217 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1218 {
1219 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1220 _heaplimit -= blocks;
1221 (*__morecore) (-bytes);
1222 _heapinfo[_heapinfo[block].free.prev].free.next
1223 = _heapinfo[block].free.next;
1224 _heapinfo[_heapinfo[block].free.next].free.prev
1225 = _heapinfo[block].free.prev;
1226 block = _heapinfo[block].free.prev;
1227 --_chunks_free;
1228 _bytes_free -= bytes;
1229 }
1230 }
1231
1232 /* Set the next search to begin at this block. */
1233 _heapindex = block;
1234 break;
1235
1236 default:
1237 /* Do some of the statistics. */
1238 --_chunks_used;
1239 _bytes_used -= 1 << type;
1240 ++_chunks_free;
1241 _bytes_free += 1 << type;
1242
1243 /* Get the address of the first free fragment in this block. */
1244 prev = (struct list *) ((char *) ADDRESS (block) +
1245 (_heapinfo[block].busy.info.frag.first << type));
1246
1247 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1248 {
1249 /* If all fragments of this block are free, remove them
1250 from the fragment list and free the whole block. */
1251 next = prev;
1252 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1253 next = next->next;
1254 prev->prev->next = next;
1255 if (next != NULL)
1256 next->prev = prev->prev;
1257 _heapinfo[block].busy.type = 0;
1258 _heapinfo[block].busy.info.size = 1;
1259
1260 /* Keep the statistics accurate. */
1261 ++_chunks_used;
1262 _bytes_used += BLOCKSIZE;
1263 _chunks_free -= BLOCKSIZE >> type;
1264 _bytes_free -= BLOCKSIZE;
1265
1266 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1267 _free_internal_nolock (ADDRESS (block));
1268 #else
1269 free (ADDRESS (block));
1270 #endif
1271 }
1272 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1273 {
1274 /* If some fragments of this block are free, link this
1275 fragment into the fragment list after the first free
1276 fragment of this block. */
1277 next = (struct list *) ptr;
1278 next->next = prev->next;
1279 next->prev = prev;
1280 prev->next = next;
1281 if (next->next != NULL)
1282 next->next->prev = next;
1283 ++_heapinfo[block].busy.info.frag.nfree;
1284 }
1285 else
1286 {
1287 /* No fragments of this block are free, so link this
1288 fragment into the fragment list and announce that
1289 it is the first free fragment of this block. */
1290 prev = (struct list *) ptr;
1291 _heapinfo[block].busy.info.frag.nfree = 1;
1292 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1293 ((unsigned long int) ((char *) ptr - (char *) NULL)
1294 % BLOCKSIZE >> type);
1295 prev->next = _fraghead[type].next;
1296 prev->prev = &_fraghead[type];
1297 prev->prev->next = prev;
1298 if (prev->next != NULL)
1299 prev->next->prev = prev;
1300 }
1301 break;
1302 }
1303
1304 PROTECT_MALLOC_STATE (1);
1305 }
1306
1307 /* Return memory to the heap.
1308 Like `free' but don't call a __free_hook if there is one. */
1309 void
1310 _free_internal (ptr)
1311 __ptr_t ptr;
1312 {
1313 LOCK ();
1314 _free_internal_nolock (ptr);
1315 UNLOCK ();
1316 }
1317
1318 /* Return memory to the heap. */
1319
1320 FREE_RETURN_TYPE
1321 free (ptr)
1322 __ptr_t ptr;
1323 {
1324 void (*hook) (__ptr_t) = __free_hook;
1325
1326 if (hook != NULL)
1327 (*hook) (ptr);
1328 else
1329 _free_internal (ptr);
1330 }
1331
1332 /* Define the `cfree' alias for `free'. */
1333 #ifdef weak_alias
1334 weak_alias (free, cfree)
1335 #else
1336 void
1337 cfree (ptr)
1338 __ptr_t ptr;
1339 {
1340 free (ptr);
1341 }
1342 #endif
1343 /* Change the size of a block allocated by `malloc'.
1344 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1345 Written May 1989 by Mike Haertel.
1346
1347 This library is free software; you can redistribute it and/or
1348 modify it under the terms of the GNU General Public License as
1349 published by the Free Software Foundation; either version 2 of the
1350 License, or (at your option) any later version.
1351
1352 This library is distributed in the hope that it will be useful,
1353 but WITHOUT ANY WARRANTY; without even the implied warranty of
1354 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1355 General Public License for more details.
1356
1357 You should have received a copy of the GNU General Public
1358 License along with this library; see the file COPYING. If
1359 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1360 Fifth Floor, Boston, MA 02110-1301, USA.
1361
1362 The author may be reached (Email) at the address mike@ai.mit.edu,
1363 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1364
1365 #ifndef _MALLOC_INTERNAL
1366 #define _MALLOC_INTERNAL
1367 #include <malloc.h>
1368 #endif
1369
1370
1371
1372 /* Cope with systems lacking `memmove'. */
1373 #if (defined (MEMMOVE_MISSING) || \
1374 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1375
1376 #ifdef emacs
1377 #undef __malloc_safe_bcopy
1378 #define __malloc_safe_bcopy safe_bcopy
1379 #else
1380
1381 /* Snarfed directly from Emacs src/dispnew.c:
1382 XXX Should use system bcopy if it handles overlap. */
1383
1384 /* Like bcopy except never gets confused by overlap. */
1385
1386 void
1387 __malloc_safe_bcopy (afrom, ato, size)
1388 __ptr_t afrom;
1389 __ptr_t ato;
1390 __malloc_size_t size;
1391 {
1392 char *from = afrom, *to = ato;
1393
1394 if (size <= 0 || from == to)
1395 return;
1396
1397 /* If the source and destination don't overlap, then bcopy can
1398 handle it. If they do overlap, but the destination is lower in
1399 memory than the source, we'll assume bcopy can handle that. */
1400 if (to < from || from + size <= to)
1401 bcopy (from, to, size);
1402
1403 /* Otherwise, we'll copy from the end. */
1404 else
1405 {
1406 register char *endf = from + size;
1407 register char *endt = to + size;
1408
1409 /* If TO - FROM is large, then we should break the copy into
1410 nonoverlapping chunks of TO - FROM bytes each. However, if
1411 TO - FROM is small, then the bcopy function call overhead
1412 makes this not worth it. The crossover point could be about
1413 anywhere. Since I don't think the obvious copy loop is too
1414 bad, I'm trying to err in its favor. */
1415 if (to - from < 64)
1416 {
1417 do
1418 *--endt = *--endf;
1419 while (endf != from);
1420 }
1421 else
1422 {
1423 for (;;)
1424 {
1425 endt -= (to - from);
1426 endf -= (to - from);
1427
1428 if (endt < to)
1429 break;
1430
1431 bcopy (endf, endt, to - from);
1432 }
1433
1434 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1435 little left over. The amount left over is
1436 (endt + (to - from)) - to, which is endt - from. */
1437 bcopy (from, to, endt - from);
1438 }
1439 }
1440 }
1441 #endif /* emacs */
1442
1443 #ifndef memmove
1444 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1445 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1446 #endif
1447
1448 #endif
1449
1450
1451 #define min(A, B) ((A) < (B) ? (A) : (B))
1452
1453 /* Debugging hook for realloc. */
1454 __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
1455
1456 /* Resize the given region to the new size, returning a pointer
1457 to the (possibly moved) region. This is optimized for speed;
1458 some benchmarks seem to indicate that greater compactness is
1459 achieved by unconditionally allocating and copying to a
1460 new region. This module has incestuous knowledge of the
1461 internals of both free and malloc. */
1462 __ptr_t
1463 _realloc_internal_nolock (ptr, size)
1464 __ptr_t ptr;
1465 __malloc_size_t size;
1466 {
1467 __ptr_t result;
1468 int type;
1469 __malloc_size_t block, blocks, oldlimit;
1470
1471 if (size == 0)
1472 {
1473 _free_internal_nolock (ptr);
1474 return _malloc_internal_nolock (0);
1475 }
1476 else if (ptr == NULL)
1477 return _malloc_internal_nolock (size);
1478
1479 block = BLOCK (ptr);
1480
1481 PROTECT_MALLOC_STATE (0);
1482
1483 type = _heapinfo[block].busy.type;
1484 switch (type)
1485 {
1486 case 0:
1487 /* Maybe reallocate a large block to a small fragment. */
1488 if (size <= BLOCKSIZE / 2)
1489 {
1490 result = _malloc_internal_nolock (size);
1491 if (result != NULL)
1492 {
1493 memcpy (result, ptr, size);
1494 _free_internal_nolock (ptr);
1495 goto out;
1496 }
1497 }
1498
1499 /* The new size is a large allocation as well;
1500 see if we can hold it in place. */
1501 blocks = BLOCKIFY (size);
1502 if (blocks < _heapinfo[block].busy.info.size)
1503 {
1504 /* The new size is smaller; return
1505 excess memory to the free list. */
1506 _heapinfo[block + blocks].busy.type = 0;
1507 _heapinfo[block + blocks].busy.info.size
1508 = _heapinfo[block].busy.info.size - blocks;
1509 _heapinfo[block].busy.info.size = blocks;
1510 /* We have just created a new chunk by splitting a chunk in two.
1511 Now we will free this chunk; increment the statistics counter
1512 so it doesn't become wrong when _free_internal decrements it. */
1513 ++_chunks_used;
1514 _free_internal_nolock (ADDRESS (block + blocks));
1515 result = ptr;
1516 }
1517 else if (blocks == _heapinfo[block].busy.info.size)
1518 /* No size change necessary. */
1519 result = ptr;
1520 else
1521 {
1522 /* Won't fit, so allocate a new region that will.
1523 Free the old region first in case there is sufficient
1524 adjacent free space to grow without moving. */
1525 blocks = _heapinfo[block].busy.info.size;
1526 /* Prevent free from actually returning memory to the system. */
1527 oldlimit = _heaplimit;
1528 _heaplimit = 0;
1529 _free_internal_nolock (ptr);
1530 result = _malloc_internal_nolock (size);
1531 PROTECT_MALLOC_STATE (0);
1532 if (_heaplimit == 0)
1533 _heaplimit = oldlimit;
1534 if (result == NULL)
1535 {
1536 /* Now we're really in trouble. We have to unfree
1537 the thing we just freed. Unfortunately it might
1538 have been coalesced with its neighbors. */
1539 if (_heapindex == block)
1540 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1541 else
1542 {
1543 __ptr_t previous
1544 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1545 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1546 _free_internal_nolock (previous);
1547 }
1548 goto out;
1549 }
1550 if (ptr != result)
1551 memmove (result, ptr, blocks * BLOCKSIZE);
1552 }
1553 break;
1554
1555 default:
1556 /* Old size is a fragment; type is logarithm
1557 to base two of the fragment size. */
1558 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1559 size <= (__malloc_size_t) (1 << type))
1560 /* The new size is the same kind of fragment. */
1561 result = ptr;
1562 else
1563 {
1564 /* The new size is different; allocate a new space,
1565 and copy the lesser of the new size and the old. */
1566 result = _malloc_internal_nolock (size);
1567 if (result == NULL)
1568 goto out;
1569 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1570 _free_internal_nolock (ptr);
1571 }
1572 break;
1573 }
1574
1575 PROTECT_MALLOC_STATE (1);
1576 out:
1577 return result;
1578 }
1579
1580 __ptr_t
1581 _realloc_internal (ptr, size)
1582 __ptr_t ptr;
1583 __malloc_size_t size;
1584 {
1585 __ptr_t result;
1586
1587 LOCK();
1588 result = _realloc_internal_nolock (ptr, size);
1589 UNLOCK ();
1590
1591 return result;
1592 }
1593
1594 __ptr_t
1595 realloc (ptr, size)
1596 __ptr_t ptr;
1597 __malloc_size_t size;
1598 {
1599 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1600
1601 if (!__malloc_initialized && !__malloc_initialize ())
1602 return NULL;
1603
1604 hook = __realloc_hook;
1605 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1606 }
1607 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1608
1609 This library is free software; you can redistribute it and/or
1610 modify it under the terms of the GNU General Public License as
1611 published by the Free Software Foundation; either version 2 of the
1612 License, or (at your option) any later version.
1613
1614 This library is distributed in the hope that it will be useful,
1615 but WITHOUT ANY WARRANTY; without even the implied warranty of
1616 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1617 General Public License for more details.
1618
1619 You should have received a copy of the GNU General Public
1620 License along with this library; see the file COPYING. If
1621 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1622 Fifth Floor, Boston, MA 02110-1301, USA.
1623
1624 The author may be reached (Email) at the address mike@ai.mit.edu,
1625 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1626
1627 #ifndef _MALLOC_INTERNAL
1628 #define _MALLOC_INTERNAL
1629 #include <malloc.h>
1630 #endif
1631
1632 /* Allocate an array of NMEMB elements each SIZE bytes long.
1633 The entire array is initialized to zeros. */
1634 __ptr_t
1635 calloc (nmemb, size)
1636 register __malloc_size_t nmemb;
1637 register __malloc_size_t size;
1638 {
1639 register __ptr_t result = malloc (nmemb * size);
1640
1641 if (result != NULL)
1642 (void) memset (result, 0, nmemb * size);
1643
1644 return result;
1645 }
1646 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1647 This file is part of the GNU C Library.
1648
1649 The GNU C Library is free software; you can redistribute it and/or modify
1650 it under the terms of the GNU General Public License as published by
1651 the Free Software Foundation; either version 2, or (at your option)
1652 any later version.
1653
1654 The GNU C Library is distributed in the hope that it will be useful,
1655 but WITHOUT ANY WARRANTY; without even the implied warranty of
1656 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1657 GNU General Public License for more details.
1658
1659 You should have received a copy of the GNU General Public License
1660 along with the GNU C Library; see the file COPYING. If not, write to
1661 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1662 MA 02110-1301, USA. */
1663
1664 #ifndef _MALLOC_INTERNAL
1665 #define _MALLOC_INTERNAL
1666 #include <malloc.h>
1667 #endif
1668
1669 #ifndef __GNU_LIBRARY__
1670 #define __sbrk sbrk
1671 #endif
1672
1673 #ifdef __GNU_LIBRARY__
1674 /* It is best not to declare this and cast its result on foreign operating
1675 systems with potentially hostile include files. */
1676
1677 #include <stddef.h>
1678 extern __ptr_t __sbrk PP ((ptrdiff_t increment));
1679 #endif
1680
1681 #ifndef NULL
1682 #define NULL 0
1683 #endif
1684
1685 /* Allocate INCREMENT more bytes of data space,
1686 and return the start of data space, or NULL on errors.
1687 If INCREMENT is negative, shrink data space. */
1688 __ptr_t
1689 __default_morecore (increment)
1690 __malloc_ptrdiff_t increment;
1691 {
1692 __ptr_t result;
1693 #if defined(CYGWIN)
1694 if (!bss_sbrk_did_unexec)
1695 {
1696 return bss_sbrk (increment);
1697 }
1698 #endif
1699 result = (__ptr_t) __sbrk (increment);
1700 if (result == (__ptr_t) -1)
1701 return NULL;
1702 return result;
1703 }
1704 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1705
1706 This library is free software; you can redistribute it and/or
1707 modify it under the terms of the GNU General Public License as
1708 published by the Free Software Foundation; either version 2 of the
1709 License, or (at your option) any later version.
1710
1711 This library is distributed in the hope that it will be useful,
1712 but WITHOUT ANY WARRANTY; without even the implied warranty of
1713 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1714 General Public License for more details.
1715
1716 You should have received a copy of the GNU General Public
1717 License along with this library; see the file COPYING. If
1718 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1719 Fifth Floor, Boston, MA 02110-1301, USA. */
1720
1721 #ifndef _MALLOC_INTERNAL
1722 #define _MALLOC_INTERNAL
1723 #include <malloc.h>
1724 #endif
1725
1726 #if __DJGPP__ - 0 == 1
1727
1728 /* There is some problem with memalign in DJGPP v1 and we are supposed
1729 to omit it. Noone told me why, they just told me to do it. */
1730
1731 #else
1732
1733 __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1734 __malloc_size_t __alignment));
1735
1736 __ptr_t
1737 memalign (alignment, size)
1738 __malloc_size_t alignment;
1739 __malloc_size_t size;
1740 {
1741 __ptr_t result;
1742 unsigned long int adj, lastadj;
1743 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
1744
1745 if (hook)
1746 return (*hook) (alignment, size);
1747
1748 /* Allocate a block with enough extra space to pad the block with up to
1749 (ALIGNMENT - 1) bytes if necessary. */
1750 result = malloc (size + alignment - 1);
1751 if (result == NULL)
1752 return NULL;
1753
1754 /* Figure out how much we will need to pad this particular block
1755 to achieve the required alignment. */
1756 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1757
1758 do
1759 {
1760 /* Reallocate the block with only as much excess as it needs. */
1761 free (result);
1762 result = malloc (adj + size);
1763 if (result == NULL) /* Impossible unless interrupted. */
1764 return NULL;
1765
1766 lastadj = adj;
1767 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1768 /* It's conceivable we might have been so unlucky as to get a
1769 different block with weaker alignment. If so, this block is too
1770 short to contain SIZE after alignment correction. So we must
1771 try again and get another block, slightly larger. */
1772 } while (adj > lastadj);
1773
1774 if (adj != 0)
1775 {
1776 /* Record this block in the list of aligned blocks, so that `free'
1777 can identify the pointer it is passed, which will be in the middle
1778 of an allocated block. */
1779
1780 struct alignlist *l;
1781 LOCK_ALIGNED_BLOCKS ();
1782 for (l = _aligned_blocks; l != NULL; l = l->next)
1783 if (l->aligned == NULL)
1784 /* This slot is free. Use it. */
1785 break;
1786 if (l == NULL)
1787 {
1788 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1789 if (l != NULL)
1790 {
1791 l->next = _aligned_blocks;
1792 _aligned_blocks = l;
1793 }
1794 }
1795 if (l != NULL)
1796 {
1797 l->exact = result;
1798 result = l->aligned = (char *) result + alignment - adj;
1799 }
1800 UNLOCK_ALIGNED_BLOCKS ();
1801 if (l == NULL)
1802 {
1803 free (result);
1804 result = NULL;
1805 }
1806 }
1807
1808 return result;
1809 }
1810
1811 #endif /* Not DJGPP v1 */
1812 /* Allocate memory on a page boundary.
1813 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1814
1815 This library is free software; you can redistribute it and/or
1816 modify it under the terms of the GNU General Public License as
1817 published by the Free Software Foundation; either version 2 of the
1818 License, or (at your option) any later version.
1819
1820 This library is distributed in the hope that it will be useful,
1821 but WITHOUT ANY WARRANTY; without even the implied warranty of
1822 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1823 General Public License for more details.
1824
1825 You should have received a copy of the GNU General Public
1826 License along with this library; see the file COPYING. If
1827 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1828 Fifth Floor, Boston, MA 02110-1301, USA.
1829
1830 The author may be reached (Email) at the address mike@ai.mit.edu,
1831 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1832
1833 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1834
1835 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1836 on MSDOS, where it conflicts with a system header file. */
1837
1838 #define ELIDE_VALLOC
1839
1840 #endif
1841
1842 #ifndef ELIDE_VALLOC
1843
1844 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1845 #include <stddef.h>
1846 #include <sys/cdefs.h>
1847 #if defined (__GLIBC__) && __GLIBC__ >= 2
1848 /* __getpagesize is already declared in <unistd.h> with return type int */
1849 #else
1850 extern size_t __getpagesize PP ((void));
1851 #endif
1852 #else
1853 #include "getpagesize.h"
1854 #define __getpagesize() getpagesize()
1855 #endif
1856
1857 #ifndef _MALLOC_INTERNAL
1858 #define _MALLOC_INTERNAL
1859 #include <malloc.h>
1860 #endif
1861
1862 static __malloc_size_t pagesize;
1863
1864 __ptr_t
1865 valloc (size)
1866 __malloc_size_t size;
1867 {
1868 if (pagesize == 0)
1869 pagesize = __getpagesize ();
1870
1871 return memalign (pagesize, size);
1872 }
1873
1874 #endif /* Not ELIDE_VALLOC. */
1875
1876 #ifdef GC_MCHECK
1877
1878 /* Standard debugging hooks for `malloc'.
1879 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1880 Written May 1989 by Mike Haertel.
1881
1882 This library is free software; you can redistribute it and/or
1883 modify it under the terms of the GNU General Public License as
1884 published by the Free Software Foundation; either version 2 of the
1885 License, or (at your option) any later version.
1886
1887 This library is distributed in the hope that it will be useful,
1888 but WITHOUT ANY WARRANTY; without even the implied warranty of
1889 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1890 General Public License for more details.
1891
1892 You should have received a copy of the GNU General Public
1893 License along with this library; see the file COPYING. If
1894 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1895 Fifth Floor, Boston, MA 02110-1301, USA.
1896
1897 The author may be reached (Email) at the address mike@ai.mit.edu,
1898 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1899
1900 #ifdef emacs
1901 #include <stdio.h>
1902 #else
1903 #ifndef _MALLOC_INTERNAL
1904 #define _MALLOC_INTERNAL
1905 #include <malloc.h>
1906 #include <stdio.h>
1907 #endif
1908 #endif
1909
1910 /* Old hook values. */
1911 static void (*old_free_hook) __P ((__ptr_t ptr));
1912 static __ptr_t (*old_malloc_hook) __P ((__malloc_size_t size));
1913 static __ptr_t (*old_realloc_hook) __P ((__ptr_t ptr, __malloc_size_t size));
1914
1915 /* Function to call when something awful happens. */
1916 static void (*abortfunc) __P ((enum mcheck_status));
1917
1918 /* Arbitrary magical numbers. */
1919 #define MAGICWORD 0xfedabeeb
1920 #define MAGICFREE 0xd8675309
1921 #define MAGICBYTE ((char) 0xd7)
1922 #define MALLOCFLOOD ((char) 0x93)
1923 #define FREEFLOOD ((char) 0x95)
1924
1925 struct hdr
1926 {
1927 __malloc_size_t size; /* Exact size requested by user. */
1928 unsigned long int magic; /* Magic number to check header integrity. */
1929 };
1930
1931 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1932 #define flood memset
1933 #else
1934 static void flood __P ((__ptr_t, int, __malloc_size_t));
1935 static void
1936 flood (ptr, val, size)
1937 __ptr_t ptr;
1938 int val;
1939 __malloc_size_t size;
1940 {
1941 char *cp = ptr;
1942 while (size--)
1943 *cp++ = val;
1944 }
1945 #endif
1946
1947 static enum mcheck_status checkhdr __P ((const struct hdr *));
1948 static enum mcheck_status
1949 checkhdr (hdr)
1950 const struct hdr *hdr;
1951 {
1952 enum mcheck_status status;
1953 switch (hdr->magic)
1954 {
1955 default:
1956 status = MCHECK_HEAD;
1957 break;
1958 case MAGICFREE:
1959 status = MCHECK_FREE;
1960 break;
1961 case MAGICWORD:
1962 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1963 status = MCHECK_TAIL;
1964 else
1965 status = MCHECK_OK;
1966 break;
1967 }
1968 if (status != MCHECK_OK)
1969 (*abortfunc) (status);
1970 return status;
1971 }
1972
1973 static void freehook __P ((__ptr_t));
1974 static void
1975 freehook (ptr)
1976 __ptr_t ptr;
1977 {
1978 struct hdr *hdr;
1979
1980 if (ptr)
1981 {
1982 hdr = ((struct hdr *) ptr) - 1;
1983 checkhdr (hdr);
1984 hdr->magic = MAGICFREE;
1985 flood (ptr, FREEFLOOD, hdr->size);
1986 }
1987 else
1988 hdr = NULL;
1989
1990 __free_hook = old_free_hook;
1991 free (hdr);
1992 __free_hook = freehook;
1993 }
1994
1995 static __ptr_t mallochook __P ((__malloc_size_t));
1996 static __ptr_t
1997 mallochook (size)
1998 __malloc_size_t size;
1999 {
2000 struct hdr *hdr;
2001
2002 __malloc_hook = old_malloc_hook;
2003 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
2004 __malloc_hook = mallochook;
2005 if (hdr == NULL)
2006 return NULL;
2007
2008 hdr->size = size;
2009 hdr->magic = MAGICWORD;
2010 ((char *) &hdr[1])[size] = MAGICBYTE;
2011 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
2012 return (__ptr_t) (hdr + 1);
2013 }
2014
2015 static __ptr_t reallochook __P ((__ptr_t, __malloc_size_t));
2016 static __ptr_t
2017 reallochook (ptr, size)
2018 __ptr_t ptr;
2019 __malloc_size_t size;
2020 {
2021 struct hdr *hdr = NULL;
2022 __malloc_size_t osize = 0;
2023
2024 if (ptr)
2025 {
2026 hdr = ((struct hdr *) ptr) - 1;
2027 osize = hdr->size;
2028
2029 checkhdr (hdr);
2030 if (size < osize)
2031 flood ((char *) ptr + size, FREEFLOOD, osize - size);
2032 }
2033
2034 __free_hook = old_free_hook;
2035 __malloc_hook = old_malloc_hook;
2036 __realloc_hook = old_realloc_hook;
2037 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
2038 __free_hook = freehook;
2039 __malloc_hook = mallochook;
2040 __realloc_hook = reallochook;
2041 if (hdr == NULL)
2042 return NULL;
2043
2044 hdr->size = size;
2045 hdr->magic = MAGICWORD;
2046 ((char *) &hdr[1])[size] = MAGICBYTE;
2047 if (size > osize)
2048 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2049 return (__ptr_t) (hdr + 1);
2050 }
2051
2052 static void
2053 mabort (status)
2054 enum mcheck_status status;
2055 {
2056 const char *msg;
2057 switch (status)
2058 {
2059 case MCHECK_OK:
2060 msg = "memory is consistent, library is buggy";
2061 break;
2062 case MCHECK_HEAD:
2063 msg = "memory clobbered before allocated block";
2064 break;
2065 case MCHECK_TAIL:
2066 msg = "memory clobbered past end of allocated block";
2067 break;
2068 case MCHECK_FREE:
2069 msg = "block freed twice";
2070 break;
2071 default:
2072 msg = "bogus mcheck_status, library is buggy";
2073 break;
2074 }
2075 #ifdef __GNU_LIBRARY__
2076 __libc_fatal (msg);
2077 #else
2078 fprintf (stderr, "mcheck: %s\n", msg);
2079 fflush (stderr);
2080 abort ();
2081 #endif
2082 }
2083
2084 static int mcheck_used = 0;
2085
2086 int
2087 mcheck (func)
2088 void (*func) __P ((enum mcheck_status));
2089 {
2090 abortfunc = (func != NULL) ? func : &mabort;
2091
2092 /* These hooks may not be safely inserted if malloc is already in use. */
2093 if (!__malloc_initialized && !mcheck_used)
2094 {
2095 old_free_hook = __free_hook;
2096 __free_hook = freehook;
2097 old_malloc_hook = __malloc_hook;
2098 __malloc_hook = mallochook;
2099 old_realloc_hook = __realloc_hook;
2100 __realloc_hook = reallochook;
2101 mcheck_used = 1;
2102 }
2103
2104 return mcheck_used ? 0 : -1;
2105 }
2106
2107 enum mcheck_status
2108 mprobe (__ptr_t ptr)
2109 {
2110 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2111 }
2112
2113 #endif /* GC_MCHECK */
2114
2115 /* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
2116 (do not change this comment) */