Merge from trunk.
[bpt/emacs.git] / src / gmalloc.c
1 /* This file is no longer automatically generated from libc. */
2
3 #define _MALLOC_INTERNAL
4
5 /* The malloc headers and source files from the C library follow here. */
6
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
11
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
16
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
21
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
26
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
29
30 #ifndef _MALLOC_H
31
32 #define _MALLOC_H 1
33
34 #ifdef _MALLOC_INTERNAL
35
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
39
40 #ifdef HAVE_GTK_AND_PTHREAD
41 #define USE_PTHREAD
42 #endif
43
44 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
45 || defined STDC_HEADERS || defined PROTOTYPES))
46 #undef PP
47 #define PP(args) args
48 #undef __ptr_t
49 #define __ptr_t void *
50 #else /* Not C++ or ANSI C. */
51 #undef PP
52 #define PP(args) ()
53 #undef __ptr_t
54 #define __ptr_t char *
55 #endif /* C++ or ANSI C. */
56
57 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
58 #include <string.h>
59 #else
60 #ifndef memset
61 #define memset(s, zero, n) bzero ((s), (n))
62 #endif
63 #ifndef memcpy
64 #define memcpy(d, s, n) bcopy ((s), (d), (n))
65 #endif
66 #endif
67
68 #ifdef HAVE_LIMITS_H
69 #include <limits.h>
70 #endif
71 #ifndef CHAR_BIT
72 #define CHAR_BIT 8
73 #endif
74
75 #include <unistd.h>
76
77 #ifdef USE_PTHREAD
78 #include <pthread.h>
79 #endif
80
81 #endif /* _MALLOC_INTERNAL. */
82
83
84 #ifdef __cplusplus
85 extern "C"
86 {
87 #endif
88
89 #ifdef STDC_HEADERS
90 #include <stddef.h>
91 #define __malloc_size_t size_t
92 #define __malloc_ptrdiff_t ptrdiff_t
93 #else
94 #ifdef __GNUC__
95 #include <stddef.h>
96 #ifdef __SIZE_TYPE__
97 #define __malloc_size_t __SIZE_TYPE__
98 #endif
99 #endif
100 #ifndef __malloc_size_t
101 #define __malloc_size_t unsigned int
102 #endif
103 #define __malloc_ptrdiff_t int
104 #endif
105
106 #ifndef NULL
107 #define NULL 0
108 #endif
109
110
111 /* Allocate SIZE bytes of memory. */
112 extern __ptr_t malloc PP ((__malloc_size_t __size));
113 /* Re-allocate the previously allocated block
114 in __ptr_t, making the new block SIZE bytes long. */
115 extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
116 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
117 extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
118 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
119 extern void free PP ((__ptr_t __ptr));
120
121 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
122 #if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
123 extern __ptr_t memalign PP ((__malloc_size_t __alignment,
124 __malloc_size_t __size));
125 extern int posix_memalign PP ((__ptr_t *, __malloc_size_t,
126 __malloc_size_t size));
127 #endif
128
129 /* Allocate SIZE bytes on a page boundary. */
130 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
131 extern __ptr_t valloc PP ((__malloc_size_t __size));
132 #endif
133
134 #ifdef USE_PTHREAD
135 /* Set up mutexes and make malloc etc. thread-safe. */
136 extern void malloc_enable_thread PP ((void));
137 #endif
138
139 #ifdef _MALLOC_INTERNAL
140
141 /* The allocator divides the heap into blocks of fixed size; large
142 requests receive one or more whole blocks, and small requests
143 receive a fragment of a block. Fragment sizes are powers of two,
144 and all fragments of a block are the same size. When all the
145 fragments in a block have been freed, the block itself is freed. */
146 #define INT_BIT (CHAR_BIT * sizeof(int))
147 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
148 #define BLOCKSIZE (1 << BLOCKLOG)
149 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
150
151 /* Determine the amount of memory spanned by the initial heap table
152 (not an absolute limit). */
153 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
154
155 /* Number of contiguous free blocks allowed to build up at the end of
156 memory before they will be returned to the system. */
157 #define FINAL_FREE_BLOCKS 8
158
159 /* Data structure giving per-block information. */
160 typedef union
161 {
162 /* Heap information for a busy block. */
163 struct
164 {
165 /* Zero for a large (multiblock) object, or positive giving the
166 logarithm to the base two of the fragment size. */
167 int type;
168 union
169 {
170 struct
171 {
172 __malloc_size_t nfree; /* Free frags in a fragmented block. */
173 __malloc_size_t first; /* First free fragment of the block. */
174 } frag;
175 /* For a large object, in its first block, this has the number
176 of blocks in the object. In the other blocks, this has a
177 negative number which says how far back the first block is. */
178 __malloc_ptrdiff_t size;
179 } info;
180 } busy;
181 /* Heap information for a free block
182 (that may be the first of a free cluster). */
183 struct
184 {
185 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
186 __malloc_size_t next; /* Index of next free cluster. */
187 __malloc_size_t prev; /* Index of previous free cluster. */
188 } free;
189 } malloc_info;
190
191 /* Pointer to first block of the heap. */
192 extern char *_heapbase;
193
194 /* Table indexed by block number giving per-block information. */
195 extern malloc_info *_heapinfo;
196
197 /* Address to block number and vice versa. */
198 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
199 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
200
201 /* Current search index for the heap table. */
202 extern __malloc_size_t _heapindex;
203
204 /* Limit of valid info table indices. */
205 extern __malloc_size_t _heaplimit;
206
207 /* Doubly linked lists of free fragments. */
208 struct list
209 {
210 struct list *next;
211 struct list *prev;
212 };
213
214 /* Free list headers for each fragment size. */
215 extern struct list _fraghead[];
216
217 /* List of blocks allocated with `memalign' (or `valloc'). */
218 struct alignlist
219 {
220 struct alignlist *next;
221 __ptr_t aligned; /* The address that memaligned returned. */
222 __ptr_t exact; /* The address that malloc returned. */
223 };
224 extern struct alignlist *_aligned_blocks;
225
226 /* Instrumentation. */
227 extern __malloc_size_t _chunks_used;
228 extern __malloc_size_t _bytes_used;
229 extern __malloc_size_t _chunks_free;
230 extern __malloc_size_t _bytes_free;
231
232 /* Internal versions of `malloc', `realloc', and `free'
233 used when these functions need to call each other.
234 They are the same but don't call the hooks. */
235 extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
236 extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
237 extern void _free_internal PP ((__ptr_t __ptr));
238 extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
239 extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
240 extern void _free_internal_nolock PP ((__ptr_t __ptr));
241
242 #ifdef USE_PTHREAD
243 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
244 extern int _malloc_thread_enabled_p;
245 #define LOCK() \
246 do { \
247 if (_malloc_thread_enabled_p) \
248 pthread_mutex_lock (&_malloc_mutex); \
249 } while (0)
250 #define UNLOCK() \
251 do { \
252 if (_malloc_thread_enabled_p) \
253 pthread_mutex_unlock (&_malloc_mutex); \
254 } while (0)
255 #define LOCK_ALIGNED_BLOCKS() \
256 do { \
257 if (_malloc_thread_enabled_p) \
258 pthread_mutex_lock (&_aligned_blocks_mutex); \
259 } while (0)
260 #define UNLOCK_ALIGNED_BLOCKS() \
261 do { \
262 if (_malloc_thread_enabled_p) \
263 pthread_mutex_unlock (&_aligned_blocks_mutex); \
264 } while (0)
265 #else
266 #define LOCK()
267 #define UNLOCK()
268 #define LOCK_ALIGNED_BLOCKS()
269 #define UNLOCK_ALIGNED_BLOCKS()
270 #endif
271
272 #endif /* _MALLOC_INTERNAL. */
273
274 /* Given an address in the middle of a malloc'd object,
275 return the address of the beginning of the object. */
276 extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
277
278 /* Underlying allocation function; successive calls should
279 return contiguous pieces of memory. */
280 extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
281
282 /* Default value of `__morecore'. */
283 extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
284
285 /* If not NULL, this function is called after each time
286 `__morecore' is called to increase the data size. */
287 extern void (*__after_morecore_hook) PP ((void));
288
289 /* Number of extra blocks to get each time we ask for more core.
290 This reduces the frequency of calling `(*__morecore)'. */
291 extern __malloc_size_t __malloc_extra_blocks;
292
293 /* Nonzero if `malloc' has been called and done its initialization. */
294 extern int __malloc_initialized;
295 /* Function called to initialize malloc data structures. */
296 extern int __malloc_initialize PP ((void));
297
298 /* Hooks for debugging versions. */
299 extern void (*__malloc_initialize_hook) PP ((void));
300 extern void (*__free_hook) PP ((__ptr_t __ptr));
301 extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
302 extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
303 extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
304 __malloc_size_t __alignment));
305
306 /* Return values for `mprobe': these are the kinds of inconsistencies that
307 `mcheck' enables detection of. */
308 enum mcheck_status
309 {
310 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
311 MCHECK_OK, /* Block is fine. */
312 MCHECK_FREE, /* Block freed twice. */
313 MCHECK_HEAD, /* Memory before the block was clobbered. */
314 MCHECK_TAIL /* Memory after the block was clobbered. */
315 };
316
317 /* Activate a standard collection of debugging hooks. This must be called
318 before `malloc' is ever called. ABORTFUNC is called with an error code
319 (see enum above) when an inconsistency is detected. If ABORTFUNC is
320 null, the standard function prints on stderr and then calls `abort'. */
321 extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
322
323 /* Check for aberrations in a particular malloc'd block. You must have
324 called `mcheck' already. These are the same checks that `mcheck' does
325 when you free or reallocate a block. */
326 extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
327
328 /* Activate a standard collection of tracing hooks. */
329 extern void mtrace PP ((void));
330 extern void muntrace PP ((void));
331
332 /* Statistics available to the user. */
333 struct mstats
334 {
335 __malloc_size_t bytes_total; /* Total size of the heap. */
336 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
337 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
338 __malloc_size_t chunks_free; /* Chunks in the free list. */
339 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
340 };
341
342 /* Pick up the current statistics. */
343 extern struct mstats mstats PP ((void));
344
345 /* Call WARNFUN with a warning message when memory usage is high. */
346 extern void memory_warnings PP ((__ptr_t __start,
347 void (*__warnfun) PP ((const char *))));
348
349
350 /* Relocating allocator. */
351
352 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
353 extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
354
355 /* Free the storage allocated in HANDLEPTR. */
356 extern void r_alloc_free PP ((__ptr_t *__handleptr));
357
358 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
359 extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
360
361
362 #ifdef __cplusplus
363 }
364 #endif
365
366 #endif /* malloc.h */
367 /* Memory allocator `malloc'.
368 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
369 Written May 1989 by Mike Haertel.
370
371 This library is free software; you can redistribute it and/or
372 modify it under the terms of the GNU General Public License as
373 published by the Free Software Foundation; either version 2 of the
374 License, or (at your option) any later version.
375
376 This library is distributed in the hope that it will be useful,
377 but WITHOUT ANY WARRANTY; without even the implied warranty of
378 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
379 General Public License for more details.
380
381 You should have received a copy of the GNU General Public
382 License along with this library; see the file COPYING. If
383 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
384 Fifth Floor, Boston, MA 02110-1301, USA.
385
386 The author may be reached (Email) at the address mike@ai.mit.edu,
387 or (US mail) as Mike Haertel c/o Free Software Foundation. */
388
389 #ifndef _MALLOC_INTERNAL
390 #define _MALLOC_INTERNAL
391 #include <malloc.h>
392 #endif
393 #include <errno.h>
394
395 /* How to really get more memory. */
396 #if defined(CYGWIN)
397 extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
398 extern int bss_sbrk_did_unexec;
399 #endif
400 __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
401
402 /* Debugging hook for `malloc'. */
403 __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
404
405 /* Pointer to the base of the first block. */
406 char *_heapbase;
407
408 /* Block information table. Allocated with align/__free (not malloc/free). */
409 malloc_info *_heapinfo;
410
411 /* Number of info entries. */
412 static __malloc_size_t heapsize;
413
414 /* Search index in the info table. */
415 __malloc_size_t _heapindex;
416
417 /* Limit of valid info table indices. */
418 __malloc_size_t _heaplimit;
419
420 /* Free lists for each fragment size. */
421 struct list _fraghead[BLOCKLOG];
422
423 /* Instrumentation. */
424 __malloc_size_t _chunks_used;
425 __malloc_size_t _bytes_used;
426 __malloc_size_t _chunks_free;
427 __malloc_size_t _bytes_free;
428
429 /* Are you experienced? */
430 int __malloc_initialized;
431
432 __malloc_size_t __malloc_extra_blocks;
433
434 void (*__malloc_initialize_hook) PP ((void));
435 void (*__after_morecore_hook) PP ((void));
436
437 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
438
439 /* Some code for hunting a bug writing into _heapinfo.
440
441 Call this macro with argument PROT non-zero to protect internal
442 malloc state against writing to it, call it with a zero argument to
443 make it readable and writable.
444
445 Note that this only works if BLOCKSIZE == page size, which is
446 the case on the i386. */
447
448 #include <sys/types.h>
449 #include <sys/mman.h>
450
451 static int state_protected_p;
452 static __malloc_size_t last_state_size;
453 static malloc_info *last_heapinfo;
454
455 void
456 protect_malloc_state (protect_p)
457 int protect_p;
458 {
459 /* If _heapinfo has been relocated, make sure its old location
460 isn't left read-only; it will be reused by malloc. */
461 if (_heapinfo != last_heapinfo
462 && last_heapinfo
463 && state_protected_p)
464 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
465
466 last_state_size = _heaplimit * sizeof *_heapinfo;
467 last_heapinfo = _heapinfo;
468
469 if (protect_p != state_protected_p)
470 {
471 state_protected_p = protect_p;
472 if (mprotect (_heapinfo, last_state_size,
473 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
474 abort ();
475 }
476 }
477
478 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
479
480 #else
481 #define PROTECT_MALLOC_STATE(PROT) /* empty */
482 #endif
483
484
485 /* Aligned allocation. */
486 static __ptr_t align PP ((__malloc_size_t));
487 static __ptr_t
488 align (size)
489 __malloc_size_t size;
490 {
491 __ptr_t result;
492 unsigned long int adj;
493
494 /* align accepts an unsigned argument, but __morecore accepts a
495 signed one. This could lead to trouble if SIZE overflows a
496 signed int type accepted by __morecore. We just punt in that
497 case, since they are requesting a ludicrous amount anyway. */
498 if ((__malloc_ptrdiff_t)size < 0)
499 result = 0;
500 else
501 result = (*__morecore) (size);
502 adj = (unsigned long int) ((unsigned long int) ((char *) result -
503 (char *) NULL)) % BLOCKSIZE;
504 if (adj != 0)
505 {
506 __ptr_t new;
507 adj = BLOCKSIZE - adj;
508 new = (*__morecore) (adj);
509 result = (char *) result + adj;
510 }
511
512 if (__after_morecore_hook)
513 (*__after_morecore_hook) ();
514
515 return result;
516 }
517
518 /* Get SIZE bytes, if we can get them starting at END.
519 Return the address of the space we got.
520 If we cannot get space at END, fail and return 0. */
521 static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
522 static __ptr_t
523 get_contiguous_space (size, position)
524 __malloc_ptrdiff_t size;
525 __ptr_t position;
526 {
527 __ptr_t before;
528 __ptr_t after;
529
530 before = (*__morecore) (0);
531 /* If we can tell in advance that the break is at the wrong place,
532 fail now. */
533 if (before != position)
534 return 0;
535
536 /* Allocate SIZE bytes and get the address of them. */
537 after = (*__morecore) (size);
538 if (!after)
539 return 0;
540
541 /* It was not contiguous--reject it. */
542 if (after != position)
543 {
544 (*__morecore) (- size);
545 return 0;
546 }
547
548 return after;
549 }
550
551
552 /* This is called when `_heapinfo' and `heapsize' have just
553 been set to describe a new info table. Set up the table
554 to describe itself and account for it in the statistics. */
555 static inline void
556 register_heapinfo (void)
557 {
558 __malloc_size_t block, blocks;
559
560 block = BLOCK (_heapinfo);
561 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
562
563 /* Account for the _heapinfo block itself in the statistics. */
564 _bytes_used += blocks * BLOCKSIZE;
565 ++_chunks_used;
566
567 /* Describe the heapinfo block itself in the heapinfo. */
568 _heapinfo[block].busy.type = 0;
569 _heapinfo[block].busy.info.size = blocks;
570 /* Leave back-pointers for malloc_find_address. */
571 while (--blocks > 0)
572 _heapinfo[block + blocks].busy.info.size = -blocks;
573 }
574
575 #ifdef USE_PTHREAD
576 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
577 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
578 int _malloc_thread_enabled_p;
579
580 static void
581 malloc_atfork_handler_prepare ()
582 {
583 LOCK ();
584 LOCK_ALIGNED_BLOCKS ();
585 }
586
587 static void
588 malloc_atfork_handler_parent ()
589 {
590 UNLOCK_ALIGNED_BLOCKS ();
591 UNLOCK ();
592 }
593
594 static void
595 malloc_atfork_handler_child ()
596 {
597 UNLOCK_ALIGNED_BLOCKS ();
598 UNLOCK ();
599 }
600
601 /* Set up mutexes and make malloc etc. thread-safe. */
602 void
603 malloc_enable_thread ()
604 {
605 if (_malloc_thread_enabled_p)
606 return;
607
608 /* Some pthread implementations call malloc for statically
609 initialized mutexes when they are used first. To avoid such a
610 situation, we initialize mutexes here while their use is
611 disabled in malloc etc. */
612 pthread_mutex_init (&_malloc_mutex, NULL);
613 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
614 pthread_atfork (malloc_atfork_handler_prepare,
615 malloc_atfork_handler_parent,
616 malloc_atfork_handler_child);
617 _malloc_thread_enabled_p = 1;
618 }
619 #endif
620
621 static void
622 malloc_initialize_1 ()
623 {
624 #ifdef GC_MCHECK
625 mcheck (NULL);
626 #endif
627
628 if (__malloc_initialize_hook)
629 (*__malloc_initialize_hook) ();
630
631 heapsize = HEAP / BLOCKSIZE;
632 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
633 if (_heapinfo == NULL)
634 return;
635 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
636 _heapinfo[0].free.size = 0;
637 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
638 _heapindex = 0;
639 _heapbase = (char *) _heapinfo;
640 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
641
642 register_heapinfo ();
643
644 __malloc_initialized = 1;
645 PROTECT_MALLOC_STATE (1);
646 return;
647 }
648
649 /* Set everything up and remember that we have.
650 main will call malloc which calls this function. That is before any threads
651 or signal handlers has been set up, so we don't need thread protection. */
652 int
653 __malloc_initialize ()
654 {
655 if (__malloc_initialized)
656 return 0;
657
658 malloc_initialize_1 ();
659
660 return __malloc_initialized;
661 }
662
663 static int morecore_recursing;
664
665 /* Get neatly aligned memory, initializing or
666 growing the heap info table as necessary. */
667 static __ptr_t morecore_nolock PP ((__malloc_size_t));
668 static __ptr_t
669 morecore_nolock (size)
670 __malloc_size_t size;
671 {
672 __ptr_t result;
673 malloc_info *newinfo, *oldinfo;
674 __malloc_size_t newsize;
675
676 if (morecore_recursing)
677 /* Avoid recursion. The caller will know how to handle a null return. */
678 return NULL;
679
680 result = align (size);
681 if (result == NULL)
682 return NULL;
683
684 PROTECT_MALLOC_STATE (0);
685
686 /* Check if we need to grow the info table. */
687 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
688 {
689 /* Calculate the new _heapinfo table size. We do not account for the
690 added blocks in the table itself, as we hope to place them in
691 existing free space, which is already covered by part of the
692 existing table. */
693 newsize = heapsize;
694 do
695 newsize *= 2;
696 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
697
698 /* We must not reuse existing core for the new info table when called
699 from realloc in the case of growing a large block, because the
700 block being grown is momentarily marked as free. In this case
701 _heaplimit is zero so we know not to reuse space for internal
702 allocation. */
703 if (_heaplimit != 0)
704 {
705 /* First try to allocate the new info table in core we already
706 have, in the usual way using realloc. If realloc cannot
707 extend it in place or relocate it to existing sufficient core,
708 we will get called again, and the code above will notice the
709 `morecore_recursing' flag and return null. */
710 int save = errno; /* Don't want to clobber errno with ENOMEM. */
711 morecore_recursing = 1;
712 newinfo = (malloc_info *) _realloc_internal_nolock
713 (_heapinfo, newsize * sizeof (malloc_info));
714 morecore_recursing = 0;
715 if (newinfo == NULL)
716 errno = save;
717 else
718 {
719 /* We found some space in core, and realloc has put the old
720 table's blocks on the free list. Now zero the new part
721 of the table and install the new table location. */
722 memset (&newinfo[heapsize], 0,
723 (newsize - heapsize) * sizeof (malloc_info));
724 _heapinfo = newinfo;
725 heapsize = newsize;
726 goto got_heap;
727 }
728 }
729
730 /* Allocate new space for the malloc info table. */
731 while (1)
732 {
733 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
734
735 /* Did it fail? */
736 if (newinfo == NULL)
737 {
738 (*__morecore) (-size);
739 return NULL;
740 }
741
742 /* Is it big enough to record status for its own space?
743 If so, we win. */
744 if ((__malloc_size_t) BLOCK ((char *) newinfo
745 + newsize * sizeof (malloc_info))
746 < newsize)
747 break;
748
749 /* Must try again. First give back most of what we just got. */
750 (*__morecore) (- newsize * sizeof (malloc_info));
751 newsize *= 2;
752 }
753
754 /* Copy the old table to the beginning of the new,
755 and zero the rest of the new table. */
756 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
757 memset (&newinfo[heapsize], 0,
758 (newsize - heapsize) * sizeof (malloc_info));
759 oldinfo = _heapinfo;
760 _heapinfo = newinfo;
761 heapsize = newsize;
762
763 register_heapinfo ();
764
765 /* Reset _heaplimit so _free_internal never decides
766 it can relocate or resize the info table. */
767 _heaplimit = 0;
768 _free_internal_nolock (oldinfo);
769 PROTECT_MALLOC_STATE (0);
770
771 /* The new heap limit includes the new table just allocated. */
772 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
773 return result;
774 }
775
776 got_heap:
777 _heaplimit = BLOCK ((char *) result + size);
778 return result;
779 }
780
781 /* Allocate memory from the heap. */
782 __ptr_t
783 _malloc_internal_nolock (size)
784 __malloc_size_t size;
785 {
786 __ptr_t result;
787 __malloc_size_t block, blocks, lastblocks, start;
788 register __malloc_size_t i;
789 struct list *next;
790
791 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
792 valid address you can realloc and free (though not dereference).
793
794 It turns out that some extant code (sunrpc, at least Ultrix's version)
795 expects `malloc (0)' to return non-NULL and breaks otherwise.
796 Be compatible. */
797
798 #if 0
799 if (size == 0)
800 return NULL;
801 #endif
802
803 PROTECT_MALLOC_STATE (0);
804
805 if (size < sizeof (struct list))
806 size = sizeof (struct list);
807
808 /* Determine the allocation policy based on the request size. */
809 if (size <= BLOCKSIZE / 2)
810 {
811 /* Small allocation to receive a fragment of a block.
812 Determine the logarithm to base two of the fragment size. */
813 register __malloc_size_t log = 1;
814 --size;
815 while ((size /= 2) != 0)
816 ++log;
817
818 /* Look in the fragment lists for a
819 free fragment of the desired size. */
820 next = _fraghead[log].next;
821 if (next != NULL)
822 {
823 /* There are free fragments of this size.
824 Pop a fragment out of the fragment list and return it.
825 Update the block's nfree and first counters. */
826 result = (__ptr_t) next;
827 next->prev->next = next->next;
828 if (next->next != NULL)
829 next->next->prev = next->prev;
830 block = BLOCK (result);
831 if (--_heapinfo[block].busy.info.frag.nfree != 0)
832 _heapinfo[block].busy.info.frag.first = (unsigned long int)
833 ((unsigned long int) ((char *) next->next - (char *) NULL)
834 % BLOCKSIZE) >> log;
835
836 /* Update the statistics. */
837 ++_chunks_used;
838 _bytes_used += 1 << log;
839 --_chunks_free;
840 _bytes_free -= 1 << log;
841 }
842 else
843 {
844 /* No free fragments of the desired size, so get a new block
845 and break it into fragments, returning the first. */
846 #ifdef GC_MALLOC_CHECK
847 result = _malloc_internal_nolock (BLOCKSIZE);
848 PROTECT_MALLOC_STATE (0);
849 #elif defined (USE_PTHREAD)
850 result = _malloc_internal_nolock (BLOCKSIZE);
851 #else
852 result = malloc (BLOCKSIZE);
853 #endif
854 if (result == NULL)
855 {
856 PROTECT_MALLOC_STATE (1);
857 goto out;
858 }
859
860 /* Link all fragments but the first into the free list. */
861 next = (struct list *) ((char *) result + (1 << log));
862 next->next = NULL;
863 next->prev = &_fraghead[log];
864 _fraghead[log].next = next;
865
866 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
867 {
868 next = (struct list *) ((char *) result + (i << log));
869 next->next = _fraghead[log].next;
870 next->prev = &_fraghead[log];
871 next->prev->next = next;
872 next->next->prev = next;
873 }
874
875 /* Initialize the nfree and first counters for this block. */
876 block = BLOCK (result);
877 _heapinfo[block].busy.type = log;
878 _heapinfo[block].busy.info.frag.nfree = i - 1;
879 _heapinfo[block].busy.info.frag.first = i - 1;
880
881 _chunks_free += (BLOCKSIZE >> log) - 1;
882 _bytes_free += BLOCKSIZE - (1 << log);
883 _bytes_used -= BLOCKSIZE - (1 << log);
884 }
885 }
886 else
887 {
888 /* Large allocation to receive one or more blocks.
889 Search the free list in a circle starting at the last place visited.
890 If we loop completely around without finding a large enough
891 space we will have to get more memory from the system. */
892 blocks = BLOCKIFY (size);
893 start = block = _heapindex;
894 while (_heapinfo[block].free.size < blocks)
895 {
896 block = _heapinfo[block].free.next;
897 if (block == start)
898 {
899 /* Need to get more from the system. Get a little extra. */
900 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
901 block = _heapinfo[0].free.prev;
902 lastblocks = _heapinfo[block].free.size;
903 /* Check to see if the new core will be contiguous with the
904 final free block; if so we don't need to get as much. */
905 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
906 /* We can't do this if we will have to make the heap info
907 table bigger to accommodate the new space. */
908 block + wantblocks <= heapsize &&
909 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
910 ADDRESS (block + lastblocks)))
911 {
912 /* We got it contiguously. Which block we are extending
913 (the `final free block' referred to above) might have
914 changed, if it got combined with a freed info table. */
915 block = _heapinfo[0].free.prev;
916 _heapinfo[block].free.size += (wantblocks - lastblocks);
917 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
918 _heaplimit += wantblocks - lastblocks;
919 continue;
920 }
921 result = morecore_nolock (wantblocks * BLOCKSIZE);
922 if (result == NULL)
923 goto out;
924 block = BLOCK (result);
925 /* Put the new block at the end of the free list. */
926 _heapinfo[block].free.size = wantblocks;
927 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
928 _heapinfo[block].free.next = 0;
929 _heapinfo[0].free.prev = block;
930 _heapinfo[_heapinfo[block].free.prev].free.next = block;
931 ++_chunks_free;
932 /* Now loop to use some of that block for this allocation. */
933 }
934 }
935
936 /* At this point we have found a suitable free list entry.
937 Figure out how to remove what we need from the list. */
938 result = ADDRESS (block);
939 if (_heapinfo[block].free.size > blocks)
940 {
941 /* The block we found has a bit left over,
942 so relink the tail end back into the free list. */
943 _heapinfo[block + blocks].free.size
944 = _heapinfo[block].free.size - blocks;
945 _heapinfo[block + blocks].free.next
946 = _heapinfo[block].free.next;
947 _heapinfo[block + blocks].free.prev
948 = _heapinfo[block].free.prev;
949 _heapinfo[_heapinfo[block].free.prev].free.next
950 = _heapinfo[_heapinfo[block].free.next].free.prev
951 = _heapindex = block + blocks;
952 }
953 else
954 {
955 /* The block exactly matches our requirements,
956 so just remove it from the list. */
957 _heapinfo[_heapinfo[block].free.next].free.prev
958 = _heapinfo[block].free.prev;
959 _heapinfo[_heapinfo[block].free.prev].free.next
960 = _heapindex = _heapinfo[block].free.next;
961 --_chunks_free;
962 }
963
964 _heapinfo[block].busy.type = 0;
965 _heapinfo[block].busy.info.size = blocks;
966 ++_chunks_used;
967 _bytes_used += blocks * BLOCKSIZE;
968 _bytes_free -= blocks * BLOCKSIZE;
969
970 /* Mark all the blocks of the object just allocated except for the
971 first with a negative number so you can find the first block by
972 adding that adjustment. */
973 while (--blocks > 0)
974 _heapinfo[block + blocks].busy.info.size = -blocks;
975 }
976
977 PROTECT_MALLOC_STATE (1);
978 out:
979 return result;
980 }
981
982 __ptr_t
983 _malloc_internal (size)
984 __malloc_size_t size;
985 {
986 __ptr_t result;
987
988 LOCK ();
989 result = _malloc_internal_nolock (size);
990 UNLOCK ();
991
992 return result;
993 }
994
995 __ptr_t
996 malloc (size)
997 __malloc_size_t size;
998 {
999 __ptr_t (*hook) (__malloc_size_t);
1000
1001 if (!__malloc_initialized && !__malloc_initialize ())
1002 return NULL;
1003
1004 /* Copy the value of __malloc_hook to an automatic variable in case
1005 __malloc_hook is modified in another thread between its
1006 NULL-check and the use.
1007
1008 Note: Strictly speaking, this is not a right solution. We should
1009 use mutexes to access non-read-only variables that are shared
1010 among multiple threads. We just leave it for compatibility with
1011 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1012 hook = __malloc_hook;
1013 return (hook != NULL ? *hook : _malloc_internal) (size);
1014 }
1015 \f
1016 #ifndef _LIBC
1017
1018 /* On some ANSI C systems, some libc functions call _malloc, _free
1019 and _realloc. Make them use the GNU functions. */
1020
1021 __ptr_t
1022 _malloc (size)
1023 __malloc_size_t size;
1024 {
1025 return malloc (size);
1026 }
1027
1028 void
1029 _free (ptr)
1030 __ptr_t ptr;
1031 {
1032 free (ptr);
1033 }
1034
1035 __ptr_t
1036 _realloc (ptr, size)
1037 __ptr_t ptr;
1038 __malloc_size_t size;
1039 {
1040 return realloc (ptr, size);
1041 }
1042
1043 #endif
1044 /* Free a block of memory allocated by `malloc'.
1045 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1046 Written May 1989 by Mike Haertel.
1047
1048 This library is free software; you can redistribute it and/or
1049 modify it under the terms of the GNU General Public License as
1050 published by the Free Software Foundation; either version 2 of the
1051 License, or (at your option) any later version.
1052
1053 This library is distributed in the hope that it will be useful,
1054 but WITHOUT ANY WARRANTY; without even the implied warranty of
1055 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1056 General Public License for more details.
1057
1058 You should have received a copy of the GNU General Public
1059 License along with this library; see the file COPYING. If
1060 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1061 Fifth Floor, Boston, MA 02110-1301, USA.
1062
1063 The author may be reached (Email) at the address mike@ai.mit.edu,
1064 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1065
1066 #ifndef _MALLOC_INTERNAL
1067 #define _MALLOC_INTERNAL
1068 #include <malloc.h>
1069 #endif
1070
1071
1072 /* Cope with systems lacking `memmove'. */
1073 #ifndef memmove
1074 #if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1075 #ifdef emacs
1076 #undef __malloc_safe_bcopy
1077 #define __malloc_safe_bcopy safe_bcopy
1078 #endif
1079 /* This function is defined in realloc.c. */
1080 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1081 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1082 #endif
1083 #endif
1084
1085
1086 /* Debugging hook for free. */
1087 void (*__free_hook) PP ((__ptr_t __ptr));
1088
1089 /* List of blocks allocated by memalign. */
1090 struct alignlist *_aligned_blocks = NULL;
1091
1092 /* Return memory to the heap.
1093 Like `_free_internal' but don't lock mutex. */
1094 void
1095 _free_internal_nolock (ptr)
1096 __ptr_t ptr;
1097 {
1098 int type;
1099 __malloc_size_t block, blocks;
1100 register __malloc_size_t i;
1101 struct list *prev, *next;
1102 __ptr_t curbrk;
1103 const __malloc_size_t lesscore_threshold
1104 /* Threshold of free space at which we will return some to the system. */
1105 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1106
1107 register struct alignlist *l;
1108
1109 if (ptr == NULL)
1110 return;
1111
1112 PROTECT_MALLOC_STATE (0);
1113
1114 LOCK_ALIGNED_BLOCKS ();
1115 for (l = _aligned_blocks; l != NULL; l = l->next)
1116 if (l->aligned == ptr)
1117 {
1118 l->aligned = NULL; /* Mark the slot in the list as free. */
1119 ptr = l->exact;
1120 break;
1121 }
1122 UNLOCK_ALIGNED_BLOCKS ();
1123
1124 block = BLOCK (ptr);
1125
1126 type = _heapinfo[block].busy.type;
1127 switch (type)
1128 {
1129 case 0:
1130 /* Get as many statistics as early as we can. */
1131 --_chunks_used;
1132 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1133 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1134
1135 /* Find the free cluster previous to this one in the free list.
1136 Start searching at the last block referenced; this may benefit
1137 programs with locality of allocation. */
1138 i = _heapindex;
1139 if (i > block)
1140 while (i > block)
1141 i = _heapinfo[i].free.prev;
1142 else
1143 {
1144 do
1145 i = _heapinfo[i].free.next;
1146 while (i > 0 && i < block);
1147 i = _heapinfo[i].free.prev;
1148 }
1149
1150 /* Determine how to link this block into the free list. */
1151 if (block == i + _heapinfo[i].free.size)
1152 {
1153 /* Coalesce this block with its predecessor. */
1154 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1155 block = i;
1156 }
1157 else
1158 {
1159 /* Really link this block back into the free list. */
1160 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1161 _heapinfo[block].free.next = _heapinfo[i].free.next;
1162 _heapinfo[block].free.prev = i;
1163 _heapinfo[i].free.next = block;
1164 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1165 ++_chunks_free;
1166 }
1167
1168 /* Now that the block is linked in, see if we can coalesce it
1169 with its successor (by deleting its successor from the list
1170 and adding in its size). */
1171 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1172 {
1173 _heapinfo[block].free.size
1174 += _heapinfo[_heapinfo[block].free.next].free.size;
1175 _heapinfo[block].free.next
1176 = _heapinfo[_heapinfo[block].free.next].free.next;
1177 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1178 --_chunks_free;
1179 }
1180
1181 /* How many trailing free blocks are there now? */
1182 blocks = _heapinfo[block].free.size;
1183
1184 /* Where is the current end of accessible core? */
1185 curbrk = (*__morecore) (0);
1186
1187 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1188 {
1189 /* The end of the malloc heap is at the end of accessible core.
1190 It's possible that moving _heapinfo will allow us to
1191 return some space to the system. */
1192
1193 __malloc_size_t info_block = BLOCK (_heapinfo);
1194 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1195 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1196 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1197 __malloc_size_t next_block = _heapinfo[block].free.next;
1198 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1199
1200 if (/* Win if this block being freed is last in core, the info table
1201 is just before it, the previous free block is just before the
1202 info table, and the two free blocks together form a useful
1203 amount to return to the system. */
1204 (block + blocks == _heaplimit &&
1205 info_block + info_blocks == block &&
1206 prev_block != 0 && prev_block + prev_blocks == info_block &&
1207 blocks + prev_blocks >= lesscore_threshold) ||
1208 /* Nope, not the case. We can also win if this block being
1209 freed is just before the info table, and the table extends
1210 to the end of core or is followed only by a free block,
1211 and the total free space is worth returning to the system. */
1212 (block + blocks == info_block &&
1213 ((info_block + info_blocks == _heaplimit &&
1214 blocks >= lesscore_threshold) ||
1215 (info_block + info_blocks == next_block &&
1216 next_block + next_blocks == _heaplimit &&
1217 blocks + next_blocks >= lesscore_threshold)))
1218 )
1219 {
1220 malloc_info *newinfo;
1221 __malloc_size_t oldlimit = _heaplimit;
1222
1223 /* Free the old info table, clearing _heaplimit to avoid
1224 recursion into this code. We don't want to return the
1225 table's blocks to the system before we have copied them to
1226 the new location. */
1227 _heaplimit = 0;
1228 _free_internal_nolock (_heapinfo);
1229 _heaplimit = oldlimit;
1230
1231 /* Tell malloc to search from the beginning of the heap for
1232 free blocks, so it doesn't reuse the ones just freed. */
1233 _heapindex = 0;
1234
1235 /* Allocate new space for the info table and move its data. */
1236 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1237 * BLOCKSIZE);
1238 PROTECT_MALLOC_STATE (0);
1239 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1240 _heapinfo = newinfo;
1241
1242 /* We should now have coalesced the free block with the
1243 blocks freed from the old info table. Examine the entire
1244 trailing free block to decide below whether to return some
1245 to the system. */
1246 block = _heapinfo[0].free.prev;
1247 blocks = _heapinfo[block].free.size;
1248 }
1249
1250 /* Now see if we can return stuff to the system. */
1251 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1252 {
1253 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1254 _heaplimit -= blocks;
1255 (*__morecore) (-bytes);
1256 _heapinfo[_heapinfo[block].free.prev].free.next
1257 = _heapinfo[block].free.next;
1258 _heapinfo[_heapinfo[block].free.next].free.prev
1259 = _heapinfo[block].free.prev;
1260 block = _heapinfo[block].free.prev;
1261 --_chunks_free;
1262 _bytes_free -= bytes;
1263 }
1264 }
1265
1266 /* Set the next search to begin at this block. */
1267 _heapindex = block;
1268 break;
1269
1270 default:
1271 /* Do some of the statistics. */
1272 --_chunks_used;
1273 _bytes_used -= 1 << type;
1274 ++_chunks_free;
1275 _bytes_free += 1 << type;
1276
1277 /* Get the address of the first free fragment in this block. */
1278 prev = (struct list *) ((char *) ADDRESS (block) +
1279 (_heapinfo[block].busy.info.frag.first << type));
1280
1281 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1282 {
1283 /* If all fragments of this block are free, remove them
1284 from the fragment list and free the whole block. */
1285 next = prev;
1286 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1287 next = next->next;
1288 prev->prev->next = next;
1289 if (next != NULL)
1290 next->prev = prev->prev;
1291 _heapinfo[block].busy.type = 0;
1292 _heapinfo[block].busy.info.size = 1;
1293
1294 /* Keep the statistics accurate. */
1295 ++_chunks_used;
1296 _bytes_used += BLOCKSIZE;
1297 _chunks_free -= BLOCKSIZE >> type;
1298 _bytes_free -= BLOCKSIZE;
1299
1300 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1301 _free_internal_nolock (ADDRESS (block));
1302 #else
1303 free (ADDRESS (block));
1304 #endif
1305 }
1306 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1307 {
1308 /* If some fragments of this block are free, link this
1309 fragment into the fragment list after the first free
1310 fragment of this block. */
1311 next = (struct list *) ptr;
1312 next->next = prev->next;
1313 next->prev = prev;
1314 prev->next = next;
1315 if (next->next != NULL)
1316 next->next->prev = next;
1317 ++_heapinfo[block].busy.info.frag.nfree;
1318 }
1319 else
1320 {
1321 /* No fragments of this block are free, so link this
1322 fragment into the fragment list and announce that
1323 it is the first free fragment of this block. */
1324 prev = (struct list *) ptr;
1325 _heapinfo[block].busy.info.frag.nfree = 1;
1326 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1327 ((unsigned long int) ((char *) ptr - (char *) NULL)
1328 % BLOCKSIZE >> type);
1329 prev->next = _fraghead[type].next;
1330 prev->prev = &_fraghead[type];
1331 prev->prev->next = prev;
1332 if (prev->next != NULL)
1333 prev->next->prev = prev;
1334 }
1335 break;
1336 }
1337
1338 PROTECT_MALLOC_STATE (1);
1339 }
1340
1341 /* Return memory to the heap.
1342 Like `free' but don't call a __free_hook if there is one. */
1343 void
1344 _free_internal (ptr)
1345 __ptr_t ptr;
1346 {
1347 LOCK ();
1348 _free_internal_nolock (ptr);
1349 UNLOCK ();
1350 }
1351
1352 /* Return memory to the heap. */
1353
1354 void
1355 free (ptr)
1356 __ptr_t ptr;
1357 {
1358 void (*hook) (__ptr_t) = __free_hook;
1359
1360 if (hook != NULL)
1361 (*hook) (ptr);
1362 else
1363 _free_internal (ptr);
1364 }
1365
1366 /* Define the `cfree' alias for `free'. */
1367 #ifdef weak_alias
1368 weak_alias (free, cfree)
1369 #else
1370 void
1371 cfree (ptr)
1372 __ptr_t ptr;
1373 {
1374 free (ptr);
1375 }
1376 #endif
1377 /* Change the size of a block allocated by `malloc'.
1378 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1379 Written May 1989 by Mike Haertel.
1380
1381 This library is free software; you can redistribute it and/or
1382 modify it under the terms of the GNU General Public License as
1383 published by the Free Software Foundation; either version 2 of the
1384 License, or (at your option) any later version.
1385
1386 This library is distributed in the hope that it will be useful,
1387 but WITHOUT ANY WARRANTY; without even the implied warranty of
1388 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1389 General Public License for more details.
1390
1391 You should have received a copy of the GNU General Public
1392 License along with this library; see the file COPYING. If
1393 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1394 Fifth Floor, Boston, MA 02110-1301, USA.
1395
1396 The author may be reached (Email) at the address mike@ai.mit.edu,
1397 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1398
1399 #ifndef _MALLOC_INTERNAL
1400 #define _MALLOC_INTERNAL
1401 #include <malloc.h>
1402 #endif
1403
1404
1405
1406 /* Cope with systems lacking `memmove'. */
1407 #if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1408
1409 #ifdef emacs
1410 #undef __malloc_safe_bcopy
1411 #define __malloc_safe_bcopy safe_bcopy
1412 #else
1413
1414 /* Snarfed directly from Emacs src/dispnew.c:
1415 XXX Should use system bcopy if it handles overlap. */
1416
1417 /* Like bcopy except never gets confused by overlap. */
1418
1419 void
1420 __malloc_safe_bcopy (afrom, ato, size)
1421 __ptr_t afrom;
1422 __ptr_t ato;
1423 __malloc_size_t size;
1424 {
1425 char *from = afrom, *to = ato;
1426
1427 if (size <= 0 || from == to)
1428 return;
1429
1430 /* If the source and destination don't overlap, then bcopy can
1431 handle it. If they do overlap, but the destination is lower in
1432 memory than the source, we'll assume bcopy can handle that. */
1433 if (to < from || from + size <= to)
1434 bcopy (from, to, size);
1435
1436 /* Otherwise, we'll copy from the end. */
1437 else
1438 {
1439 register char *endf = from + size;
1440 register char *endt = to + size;
1441
1442 /* If TO - FROM is large, then we should break the copy into
1443 nonoverlapping chunks of TO - FROM bytes each. However, if
1444 TO - FROM is small, then the bcopy function call overhead
1445 makes this not worth it. The crossover point could be about
1446 anywhere. Since I don't think the obvious copy loop is too
1447 bad, I'm trying to err in its favor. */
1448 if (to - from < 64)
1449 {
1450 do
1451 *--endt = *--endf;
1452 while (endf != from);
1453 }
1454 else
1455 {
1456 for (;;)
1457 {
1458 endt -= (to - from);
1459 endf -= (to - from);
1460
1461 if (endt < to)
1462 break;
1463
1464 bcopy (endf, endt, to - from);
1465 }
1466
1467 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1468 little left over. The amount left over is
1469 (endt + (to - from)) - to, which is endt - from. */
1470 bcopy (from, to, endt - from);
1471 }
1472 }
1473 }
1474 #endif /* emacs */
1475
1476 #ifndef memmove
1477 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1478 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1479 #endif
1480
1481 #endif
1482
1483
1484 #define min(A, B) ((A) < (B) ? (A) : (B))
1485
1486 /* Debugging hook for realloc. */
1487 __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
1488
1489 /* Resize the given region to the new size, returning a pointer
1490 to the (possibly moved) region. This is optimized for speed;
1491 some benchmarks seem to indicate that greater compactness is
1492 achieved by unconditionally allocating and copying to a
1493 new region. This module has incestuous knowledge of the
1494 internals of both free and malloc. */
1495 __ptr_t
1496 _realloc_internal_nolock (ptr, size)
1497 __ptr_t ptr;
1498 __malloc_size_t size;
1499 {
1500 __ptr_t result;
1501 int type;
1502 __malloc_size_t block, blocks, oldlimit;
1503
1504 if (size == 0)
1505 {
1506 _free_internal_nolock (ptr);
1507 return _malloc_internal_nolock (0);
1508 }
1509 else if (ptr == NULL)
1510 return _malloc_internal_nolock (size);
1511
1512 block = BLOCK (ptr);
1513
1514 PROTECT_MALLOC_STATE (0);
1515
1516 type = _heapinfo[block].busy.type;
1517 switch (type)
1518 {
1519 case 0:
1520 /* Maybe reallocate a large block to a small fragment. */
1521 if (size <= BLOCKSIZE / 2)
1522 {
1523 result = _malloc_internal_nolock (size);
1524 if (result != NULL)
1525 {
1526 memcpy (result, ptr, size);
1527 _free_internal_nolock (ptr);
1528 goto out;
1529 }
1530 }
1531
1532 /* The new size is a large allocation as well;
1533 see if we can hold it in place. */
1534 blocks = BLOCKIFY (size);
1535 if (blocks < _heapinfo[block].busy.info.size)
1536 {
1537 /* The new size is smaller; return
1538 excess memory to the free list. */
1539 _heapinfo[block + blocks].busy.type = 0;
1540 _heapinfo[block + blocks].busy.info.size
1541 = _heapinfo[block].busy.info.size - blocks;
1542 _heapinfo[block].busy.info.size = blocks;
1543 /* We have just created a new chunk by splitting a chunk in two.
1544 Now we will free this chunk; increment the statistics counter
1545 so it doesn't become wrong when _free_internal decrements it. */
1546 ++_chunks_used;
1547 _free_internal_nolock (ADDRESS (block + blocks));
1548 result = ptr;
1549 }
1550 else if (blocks == _heapinfo[block].busy.info.size)
1551 /* No size change necessary. */
1552 result = ptr;
1553 else
1554 {
1555 /* Won't fit, so allocate a new region that will.
1556 Free the old region first in case there is sufficient
1557 adjacent free space to grow without moving. */
1558 blocks = _heapinfo[block].busy.info.size;
1559 /* Prevent free from actually returning memory to the system. */
1560 oldlimit = _heaplimit;
1561 _heaplimit = 0;
1562 _free_internal_nolock (ptr);
1563 result = _malloc_internal_nolock (size);
1564 PROTECT_MALLOC_STATE (0);
1565 if (_heaplimit == 0)
1566 _heaplimit = oldlimit;
1567 if (result == NULL)
1568 {
1569 /* Now we're really in trouble. We have to unfree
1570 the thing we just freed. Unfortunately it might
1571 have been coalesced with its neighbors. */
1572 if (_heapindex == block)
1573 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1574 else
1575 {
1576 __ptr_t previous
1577 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1578 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1579 _free_internal_nolock (previous);
1580 }
1581 goto out;
1582 }
1583 if (ptr != result)
1584 memmove (result, ptr, blocks * BLOCKSIZE);
1585 }
1586 break;
1587
1588 default:
1589 /* Old size is a fragment; type is logarithm
1590 to base two of the fragment size. */
1591 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1592 size <= (__malloc_size_t) (1 << type))
1593 /* The new size is the same kind of fragment. */
1594 result = ptr;
1595 else
1596 {
1597 /* The new size is different; allocate a new space,
1598 and copy the lesser of the new size and the old. */
1599 result = _malloc_internal_nolock (size);
1600 if (result == NULL)
1601 goto out;
1602 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1603 _free_internal_nolock (ptr);
1604 }
1605 break;
1606 }
1607
1608 PROTECT_MALLOC_STATE (1);
1609 out:
1610 return result;
1611 }
1612
1613 __ptr_t
1614 _realloc_internal (ptr, size)
1615 __ptr_t ptr;
1616 __malloc_size_t size;
1617 {
1618 __ptr_t result;
1619
1620 LOCK();
1621 result = _realloc_internal_nolock (ptr, size);
1622 UNLOCK ();
1623
1624 return result;
1625 }
1626
1627 __ptr_t
1628 realloc (ptr, size)
1629 __ptr_t ptr;
1630 __malloc_size_t size;
1631 {
1632 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1633
1634 if (!__malloc_initialized && !__malloc_initialize ())
1635 return NULL;
1636
1637 hook = __realloc_hook;
1638 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1639 }
1640 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1641
1642 This library is free software; you can redistribute it and/or
1643 modify it under the terms of the GNU General Public License as
1644 published by the Free Software Foundation; either version 2 of the
1645 License, or (at your option) any later version.
1646
1647 This library is distributed in the hope that it will be useful,
1648 but WITHOUT ANY WARRANTY; without even the implied warranty of
1649 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1650 General Public License for more details.
1651
1652 You should have received a copy of the GNU General Public
1653 License along with this library; see the file COPYING. If
1654 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1655 Fifth Floor, Boston, MA 02110-1301, USA.
1656
1657 The author may be reached (Email) at the address mike@ai.mit.edu,
1658 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1659
1660 #ifndef _MALLOC_INTERNAL
1661 #define _MALLOC_INTERNAL
1662 #include <malloc.h>
1663 #endif
1664
1665 /* Allocate an array of NMEMB elements each SIZE bytes long.
1666 The entire array is initialized to zeros. */
1667 __ptr_t
1668 calloc (nmemb, size)
1669 register __malloc_size_t nmemb;
1670 register __malloc_size_t size;
1671 {
1672 register __ptr_t result = malloc (nmemb * size);
1673
1674 if (result != NULL)
1675 (void) memset (result, 0, nmemb * size);
1676
1677 return result;
1678 }
1679 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1680 This file is part of the GNU C Library.
1681
1682 The GNU C Library is free software; you can redistribute it and/or modify
1683 it under the terms of the GNU General Public License as published by
1684 the Free Software Foundation; either version 2, or (at your option)
1685 any later version.
1686
1687 The GNU C Library is distributed in the hope that it will be useful,
1688 but WITHOUT ANY WARRANTY; without even the implied warranty of
1689 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1690 GNU General Public License for more details.
1691
1692 You should have received a copy of the GNU General Public License
1693 along with the GNU C Library; see the file COPYING. If not, write to
1694 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1695 MA 02110-1301, USA. */
1696
1697 #ifndef _MALLOC_INTERNAL
1698 #define _MALLOC_INTERNAL
1699 #include <malloc.h>
1700 #endif
1701
1702 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1703 compatible. */
1704 #if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
1705 #define __sbrk sbrk
1706 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1707 /* It is best not to declare this and cast its result on foreign operating
1708 systems with potentially hostile include files. */
1709
1710 #include <stddef.h>
1711 extern __ptr_t __sbrk PP ((ptrdiff_t increment));
1712 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1713
1714 #ifndef NULL
1715 #define NULL 0
1716 #endif
1717
1718 /* Allocate INCREMENT more bytes of data space,
1719 and return the start of data space, or NULL on errors.
1720 If INCREMENT is negative, shrink data space. */
1721 __ptr_t
1722 __default_morecore (increment)
1723 __malloc_ptrdiff_t increment;
1724 {
1725 __ptr_t result;
1726 #if defined(CYGWIN)
1727 if (!bss_sbrk_did_unexec)
1728 {
1729 return bss_sbrk (increment);
1730 }
1731 #endif
1732 result = (__ptr_t) __sbrk (increment);
1733 if (result == (__ptr_t) -1)
1734 return NULL;
1735 return result;
1736 }
1737 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1738
1739 This library is free software; you can redistribute it and/or
1740 modify it under the terms of the GNU General Public License as
1741 published by the Free Software Foundation; either version 2 of the
1742 License, or (at your option) any later version.
1743
1744 This library is distributed in the hope that it will be useful,
1745 but WITHOUT ANY WARRANTY; without even the implied warranty of
1746 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1747 General Public License for more details.
1748
1749 You should have received a copy of the GNU General Public
1750 License along with this library; see the file COPYING. If
1751 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1752 Fifth Floor, Boston, MA 02110-1301, USA. */
1753
1754 #ifndef _MALLOC_INTERNAL
1755 #define _MALLOC_INTERNAL
1756 #include <malloc.h>
1757 #endif
1758
1759 __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1760 __malloc_size_t __alignment));
1761
1762 __ptr_t
1763 memalign (alignment, size)
1764 __malloc_size_t alignment;
1765 __malloc_size_t size;
1766 {
1767 __ptr_t result;
1768 unsigned long int adj, lastadj;
1769 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
1770
1771 if (hook)
1772 return (*hook) (alignment, size);
1773
1774 /* Allocate a block with enough extra space to pad the block with up to
1775 (ALIGNMENT - 1) bytes if necessary. */
1776 result = malloc (size + alignment - 1);
1777 if (result == NULL)
1778 return NULL;
1779
1780 /* Figure out how much we will need to pad this particular block
1781 to achieve the required alignment. */
1782 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1783
1784 do
1785 {
1786 /* Reallocate the block with only as much excess as it needs. */
1787 free (result);
1788 result = malloc (adj + size);
1789 if (result == NULL) /* Impossible unless interrupted. */
1790 return NULL;
1791
1792 lastadj = adj;
1793 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1794 /* It's conceivable we might have been so unlucky as to get a
1795 different block with weaker alignment. If so, this block is too
1796 short to contain SIZE after alignment correction. So we must
1797 try again and get another block, slightly larger. */
1798 } while (adj > lastadj);
1799
1800 if (adj != 0)
1801 {
1802 /* Record this block in the list of aligned blocks, so that `free'
1803 can identify the pointer it is passed, which will be in the middle
1804 of an allocated block. */
1805
1806 struct alignlist *l;
1807 LOCK_ALIGNED_BLOCKS ();
1808 for (l = _aligned_blocks; l != NULL; l = l->next)
1809 if (l->aligned == NULL)
1810 /* This slot is free. Use it. */
1811 break;
1812 if (l == NULL)
1813 {
1814 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1815 if (l != NULL)
1816 {
1817 l->next = _aligned_blocks;
1818 _aligned_blocks = l;
1819 }
1820 }
1821 if (l != NULL)
1822 {
1823 l->exact = result;
1824 result = l->aligned = (char *) result + alignment - adj;
1825 }
1826 UNLOCK_ALIGNED_BLOCKS ();
1827 if (l == NULL)
1828 {
1829 free (result);
1830 result = NULL;
1831 }
1832 }
1833
1834 return result;
1835 }
1836
1837 #ifndef ENOMEM
1838 #define ENOMEM 12
1839 #endif
1840
1841 #ifndef EINVAL
1842 #define EINVAL 22
1843 #endif
1844
1845 int
1846 posix_memalign (memptr, alignment, size)
1847 __ptr_t *memptr;
1848 __malloc_size_t alignment;
1849 __malloc_size_t size;
1850 {
1851 __ptr_t mem;
1852
1853 if (alignment == 0
1854 || alignment % sizeof (__ptr_t) != 0
1855 || (alignment & (alignment - 1)) != 0)
1856 return EINVAL;
1857
1858 mem = memalign (alignment, size);
1859 if (mem == NULL)
1860 return ENOMEM;
1861
1862 *memptr = mem;
1863
1864 return 0;
1865 }
1866
1867 /* Allocate memory on a page boundary.
1868 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1869
1870 This library is free software; you can redistribute it and/or
1871 modify it under the terms of the GNU General Public License as
1872 published by the Free Software Foundation; either version 2 of the
1873 License, or (at your option) any later version.
1874
1875 This library is distributed in the hope that it will be useful,
1876 but WITHOUT ANY WARRANTY; without even the implied warranty of
1877 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1878 General Public License for more details.
1879
1880 You should have received a copy of the GNU General Public
1881 License along with this library; see the file COPYING. If
1882 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1883 Fifth Floor, Boston, MA 02110-1301, USA.
1884
1885 The author may be reached (Email) at the address mike@ai.mit.edu,
1886 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1887
1888 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1889
1890 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1891 on MSDOS, where it conflicts with a system header file. */
1892
1893 #define ELIDE_VALLOC
1894
1895 #endif
1896
1897 #ifndef ELIDE_VALLOC
1898
1899 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1900 #include <stddef.h>
1901 #include <sys/cdefs.h>
1902 #if defined (__GLIBC__) && __GLIBC__ >= 2
1903 /* __getpagesize is already declared in <unistd.h> with return type int */
1904 #else
1905 extern size_t __getpagesize PP ((void));
1906 #endif
1907 #else
1908 #include "getpagesize.h"
1909 #define __getpagesize() getpagesize()
1910 #endif
1911
1912 #ifndef _MALLOC_INTERNAL
1913 #define _MALLOC_INTERNAL
1914 #include <malloc.h>
1915 #endif
1916
1917 static __malloc_size_t pagesize;
1918
1919 __ptr_t
1920 valloc (size)
1921 __malloc_size_t size;
1922 {
1923 if (pagesize == 0)
1924 pagesize = __getpagesize ();
1925
1926 return memalign (pagesize, size);
1927 }
1928
1929 #endif /* Not ELIDE_VALLOC. */
1930
1931 #ifdef GC_MCHECK
1932
1933 /* Standard debugging hooks for `malloc'.
1934 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1935 Written May 1989 by Mike Haertel.
1936
1937 This library is free software; you can redistribute it and/or
1938 modify it under the terms of the GNU General Public License as
1939 published by the Free Software Foundation; either version 2 of the
1940 License, or (at your option) any later version.
1941
1942 This library is distributed in the hope that it will be useful,
1943 but WITHOUT ANY WARRANTY; without even the implied warranty of
1944 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1945 General Public License for more details.
1946
1947 You should have received a copy of the GNU General Public
1948 License along with this library; see the file COPYING. If
1949 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1950 Fifth Floor, Boston, MA 02110-1301, USA.
1951
1952 The author may be reached (Email) at the address mike@ai.mit.edu,
1953 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1954
1955 #ifdef emacs
1956 #include <stdio.h>
1957 #else
1958 #ifndef _MALLOC_INTERNAL
1959 #define _MALLOC_INTERNAL
1960 #include <malloc.h>
1961 #include <stdio.h>
1962 #endif
1963 #endif
1964
1965 /* Old hook values. */
1966 static void (*old_free_hook) (__ptr_t ptr);
1967 static __ptr_t (*old_malloc_hook) (__malloc_size_t size);
1968 static __ptr_t (*old_realloc_hook) (__ptr_t ptr, __malloc_size_t size);
1969
1970 /* Function to call when something awful happens. */
1971 static void (*abortfunc) (enum mcheck_status);
1972
1973 /* Arbitrary magical numbers. */
1974 #define MAGICWORD 0xfedabeeb
1975 #define MAGICFREE 0xd8675309
1976 #define MAGICBYTE ((char) 0xd7)
1977 #define MALLOCFLOOD ((char) 0x93)
1978 #define FREEFLOOD ((char) 0x95)
1979
1980 struct hdr
1981 {
1982 __malloc_size_t size; /* Exact size requested by user. */
1983 unsigned long int magic; /* Magic number to check header integrity. */
1984 };
1985
1986 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1987 #define flood memset
1988 #else
1989 static void flood (__ptr_t, int, __malloc_size_t);
1990 static void
1991 flood (ptr, val, size)
1992 __ptr_t ptr;
1993 int val;
1994 __malloc_size_t size;
1995 {
1996 char *cp = ptr;
1997 while (size--)
1998 *cp++ = val;
1999 }
2000 #endif
2001
2002 static enum mcheck_status checkhdr (const struct hdr *);
2003 static enum mcheck_status
2004 checkhdr (hdr)
2005 const struct hdr *hdr;
2006 {
2007 enum mcheck_status status;
2008 switch (hdr->magic)
2009 {
2010 default:
2011 status = MCHECK_HEAD;
2012 break;
2013 case MAGICFREE:
2014 status = MCHECK_FREE;
2015 break;
2016 case MAGICWORD:
2017 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
2018 status = MCHECK_TAIL;
2019 else
2020 status = MCHECK_OK;
2021 break;
2022 }
2023 if (status != MCHECK_OK)
2024 (*abortfunc) (status);
2025 return status;
2026 }
2027
2028 static void freehook (__ptr_t);
2029 static void
2030 freehook (ptr)
2031 __ptr_t ptr;
2032 {
2033 struct hdr *hdr;
2034
2035 if (ptr)
2036 {
2037 hdr = ((struct hdr *) ptr) - 1;
2038 checkhdr (hdr);
2039 hdr->magic = MAGICFREE;
2040 flood (ptr, FREEFLOOD, hdr->size);
2041 }
2042 else
2043 hdr = NULL;
2044
2045 __free_hook = old_free_hook;
2046 free (hdr);
2047 __free_hook = freehook;
2048 }
2049
2050 static __ptr_t mallochook (__malloc_size_t);
2051 static __ptr_t
2052 mallochook (size)
2053 __malloc_size_t size;
2054 {
2055 struct hdr *hdr;
2056
2057 __malloc_hook = old_malloc_hook;
2058 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
2059 __malloc_hook = mallochook;
2060 if (hdr == NULL)
2061 return NULL;
2062
2063 hdr->size = size;
2064 hdr->magic = MAGICWORD;
2065 ((char *) &hdr[1])[size] = MAGICBYTE;
2066 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
2067 return (__ptr_t) (hdr + 1);
2068 }
2069
2070 static __ptr_t reallochook (__ptr_t, __malloc_size_t);
2071 static __ptr_t
2072 reallochook (ptr, size)
2073 __ptr_t ptr;
2074 __malloc_size_t size;
2075 {
2076 struct hdr *hdr = NULL;
2077 __malloc_size_t osize = 0;
2078
2079 if (ptr)
2080 {
2081 hdr = ((struct hdr *) ptr) - 1;
2082 osize = hdr->size;
2083
2084 checkhdr (hdr);
2085 if (size < osize)
2086 flood ((char *) ptr + size, FREEFLOOD, osize - size);
2087 }
2088
2089 __free_hook = old_free_hook;
2090 __malloc_hook = old_malloc_hook;
2091 __realloc_hook = old_realloc_hook;
2092 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
2093 __free_hook = freehook;
2094 __malloc_hook = mallochook;
2095 __realloc_hook = reallochook;
2096 if (hdr == NULL)
2097 return NULL;
2098
2099 hdr->size = size;
2100 hdr->magic = MAGICWORD;
2101 ((char *) &hdr[1])[size] = MAGICBYTE;
2102 if (size > osize)
2103 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2104 return (__ptr_t) (hdr + 1);
2105 }
2106
2107 static void
2108 mabort (status)
2109 enum mcheck_status status;
2110 {
2111 const char *msg;
2112 switch (status)
2113 {
2114 case MCHECK_OK:
2115 msg = "memory is consistent, library is buggy";
2116 break;
2117 case MCHECK_HEAD:
2118 msg = "memory clobbered before allocated block";
2119 break;
2120 case MCHECK_TAIL:
2121 msg = "memory clobbered past end of allocated block";
2122 break;
2123 case MCHECK_FREE:
2124 msg = "block freed twice";
2125 break;
2126 default:
2127 msg = "bogus mcheck_status, library is buggy";
2128 break;
2129 }
2130 #ifdef __GNU_LIBRARY__
2131 __libc_fatal (msg);
2132 #else
2133 fprintf (stderr, "mcheck: %s\n", msg);
2134 fflush (stderr);
2135 abort ();
2136 #endif
2137 }
2138
2139 static int mcheck_used = 0;
2140
2141 int
2142 mcheck (func)
2143 void (*func) (enum mcheck_status);
2144 {
2145 abortfunc = (func != NULL) ? func : &mabort;
2146
2147 /* These hooks may not be safely inserted if malloc is already in use. */
2148 if (!__malloc_initialized && !mcheck_used)
2149 {
2150 old_free_hook = __free_hook;
2151 __free_hook = freehook;
2152 old_malloc_hook = __malloc_hook;
2153 __malloc_hook = mallochook;
2154 old_realloc_hook = __realloc_hook;
2155 __realloc_hook = reallochook;
2156 mcheck_used = 1;
2157 }
2158
2159 return mcheck_used ? 0 : -1;
2160 }
2161
2162 enum mcheck_status
2163 mprobe (__ptr_t ptr)
2164 {
2165 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2166 }
2167
2168 #endif /* GC_MCHECK */