1 /* This file is no longer automatically generated from libc. */
3 #define _MALLOC_INTERNAL
5 /* The malloc headers and source files from the C library follow here. */
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
34 #ifdef _MALLOC_INTERNAL
40 #ifdef HAVE_GTK_AND_PTHREAD
44 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
45 || defined STDC_HEADERS || defined PROTOTYPES))
49 #define __ptr_t void *
50 #else /* Not C++ or ANSI C. */
54 #define __ptr_t char *
55 #endif /* C++ or ANSI C. */
72 #endif /* _MALLOC_INTERNAL. */
82 #define __malloc_size_t size_t
83 #define __malloc_ptrdiff_t ptrdiff_t
88 #define __malloc_size_t __SIZE_TYPE__
91 #ifndef __malloc_size_t
92 #define __malloc_size_t unsigned int
94 #define __malloc_ptrdiff_t int
102 /* Allocate SIZE bytes of memory. */
103 extern __ptr_t malloc
PP ((__malloc_size_t __size
));
104 /* Re-allocate the previously allocated block
105 in __ptr_t, making the new block SIZE bytes long. */
106 extern __ptr_t realloc
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
107 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
108 extern __ptr_t calloc
PP ((__malloc_size_t __nmemb
, __malloc_size_t __size
));
109 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
110 extern void free
PP ((__ptr_t __ptr
));
112 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
113 #if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
114 extern __ptr_t memalign
PP ((__malloc_size_t __alignment
,
115 __malloc_size_t __size
));
116 extern int posix_memalign
PP ((__ptr_t
*, __malloc_size_t
,
117 __malloc_size_t size
));
120 /* Allocate SIZE bytes on a page boundary. */
121 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
122 extern __ptr_t valloc
PP ((__malloc_size_t __size
));
126 /* Set up mutexes and make malloc etc. thread-safe. */
127 extern void malloc_enable_thread
PP ((void));
130 #ifdef _MALLOC_INTERNAL
132 /* The allocator divides the heap into blocks of fixed size; large
133 requests receive one or more whole blocks, and small requests
134 receive a fragment of a block. Fragment sizes are powers of two,
135 and all fragments of a block are the same size. When all the
136 fragments in a block have been freed, the block itself is freed. */
137 #define INT_BIT (CHAR_BIT * sizeof(int))
138 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
139 #define BLOCKSIZE (1 << BLOCKLOG)
140 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
142 /* Determine the amount of memory spanned by the initial heap table
143 (not an absolute limit). */
144 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
146 /* Number of contiguous free blocks allowed to build up at the end of
147 memory before they will be returned to the system. */
148 #define FINAL_FREE_BLOCKS 8
150 /* Data structure giving per-block information. */
153 /* Heap information for a busy block. */
156 /* Zero for a large (multiblock) object, or positive giving the
157 logarithm to the base two of the fragment size. */
163 __malloc_size_t nfree
; /* Free frags in a fragmented block. */
164 __malloc_size_t first
; /* First free fragment of the block. */
166 /* For a large object, in its first block, this has the number
167 of blocks in the object. In the other blocks, this has a
168 negative number which says how far back the first block is. */
169 __malloc_ptrdiff_t size
;
172 /* Heap information for a free block
173 (that may be the first of a free cluster). */
176 __malloc_size_t size
; /* Size (in blocks) of a free cluster. */
177 __malloc_size_t next
; /* Index of next free cluster. */
178 __malloc_size_t prev
; /* Index of previous free cluster. */
182 /* Pointer to first block of the heap. */
183 extern char *_heapbase
;
185 /* Table indexed by block number giving per-block information. */
186 extern malloc_info
*_heapinfo
;
188 /* Address to block number and vice versa. */
189 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
190 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
192 /* Current search index for the heap table. */
193 extern __malloc_size_t _heapindex
;
195 /* Limit of valid info table indices. */
196 extern __malloc_size_t _heaplimit
;
198 /* Doubly linked lists of free fragments. */
205 /* Free list headers for each fragment size. */
206 extern struct list _fraghead
[];
208 /* List of blocks allocated with `memalign' (or `valloc'). */
211 struct alignlist
*next
;
212 __ptr_t aligned
; /* The address that memaligned returned. */
213 __ptr_t exact
; /* The address that malloc returned. */
215 extern struct alignlist
*_aligned_blocks
;
217 /* Instrumentation. */
218 extern __malloc_size_t _chunks_used
;
219 extern __malloc_size_t _bytes_used
;
220 extern __malloc_size_t _chunks_free
;
221 extern __malloc_size_t _bytes_free
;
223 /* Internal versions of `malloc', `realloc', and `free'
224 used when these functions need to call each other.
225 They are the same but don't call the hooks. */
226 extern __ptr_t _malloc_internal
PP ((__malloc_size_t __size
));
227 extern __ptr_t _realloc_internal
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
228 extern void _free_internal
PP ((__ptr_t __ptr
));
229 extern __ptr_t _malloc_internal_nolock
PP ((__malloc_size_t __size
));
230 extern __ptr_t _realloc_internal_nolock
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
231 extern void _free_internal_nolock
PP ((__ptr_t __ptr
));
234 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
235 extern int _malloc_thread_enabled_p
;
238 if (_malloc_thread_enabled_p) \
239 pthread_mutex_lock (&_malloc_mutex); \
243 if (_malloc_thread_enabled_p) \
244 pthread_mutex_unlock (&_malloc_mutex); \
246 #define LOCK_ALIGNED_BLOCKS() \
248 if (_malloc_thread_enabled_p) \
249 pthread_mutex_lock (&_aligned_blocks_mutex); \
251 #define UNLOCK_ALIGNED_BLOCKS() \
253 if (_malloc_thread_enabled_p) \
254 pthread_mutex_unlock (&_aligned_blocks_mutex); \
259 #define LOCK_ALIGNED_BLOCKS()
260 #define UNLOCK_ALIGNED_BLOCKS()
263 #endif /* _MALLOC_INTERNAL. */
265 /* Given an address in the middle of a malloc'd object,
266 return the address of the beginning of the object. */
267 extern __ptr_t malloc_find_object_address
PP ((__ptr_t __ptr
));
269 /* Underlying allocation function; successive calls should
270 return contiguous pieces of memory. */
271 extern __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
));
273 /* Default value of `__morecore'. */
274 extern __ptr_t __default_morecore
PP ((__malloc_ptrdiff_t __size
));
276 /* If not NULL, this function is called after each time
277 `__morecore' is called to increase the data size. */
278 extern void (*__after_morecore_hook
) PP ((void));
280 /* Number of extra blocks to get each time we ask for more core.
281 This reduces the frequency of calling `(*__morecore)'. */
282 extern __malloc_size_t __malloc_extra_blocks
;
284 /* Nonzero if `malloc' has been called and done its initialization. */
285 extern int __malloc_initialized
;
286 /* Function called to initialize malloc data structures. */
287 extern int __malloc_initialize
PP ((void));
289 /* Hooks for debugging versions. */
290 extern void (*__malloc_initialize_hook
) PP ((void));
291 extern void (*__free_hook
) PP ((__ptr_t __ptr
));
292 extern __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
293 extern __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
294 extern __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
295 __malloc_size_t __alignment
));
297 /* Return values for `mprobe': these are the kinds of inconsistencies that
298 `mcheck' enables detection of. */
301 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
302 MCHECK_OK
, /* Block is fine. */
303 MCHECK_FREE
, /* Block freed twice. */
304 MCHECK_HEAD
, /* Memory before the block was clobbered. */
305 MCHECK_TAIL
/* Memory after the block was clobbered. */
308 /* Activate a standard collection of debugging hooks. This must be called
309 before `malloc' is ever called. ABORTFUNC is called with an error code
310 (see enum above) when an inconsistency is detected. If ABORTFUNC is
311 null, the standard function prints on stderr and then calls `abort'. */
312 extern int mcheck
PP ((void (*__abortfunc
) PP ((enum mcheck_status
))));
314 /* Check for aberrations in a particular malloc'd block. You must have
315 called `mcheck' already. These are the same checks that `mcheck' does
316 when you free or reallocate a block. */
317 extern enum mcheck_status mprobe
PP ((__ptr_t __ptr
));
319 /* Activate a standard collection of tracing hooks. */
320 extern void mtrace
PP ((void));
321 extern void muntrace
PP ((void));
323 /* Statistics available to the user. */
326 __malloc_size_t bytes_total
; /* Total size of the heap. */
327 __malloc_size_t chunks_used
; /* Chunks allocated by the user. */
328 __malloc_size_t bytes_used
; /* Byte total of user-allocated chunks. */
329 __malloc_size_t chunks_free
; /* Chunks in the free list. */
330 __malloc_size_t bytes_free
; /* Byte total of chunks in the free list. */
333 /* Pick up the current statistics. */
334 extern struct mstats mstats
PP ((void));
336 /* Call WARNFUN with a warning message when memory usage is high. */
337 extern void memory_warnings
PP ((__ptr_t __start
,
338 void (*__warnfun
) PP ((const char *))));
341 /* Relocating allocator. */
343 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
344 extern __ptr_t r_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
346 /* Free the storage allocated in HANDLEPTR. */
347 extern void r_alloc_free
PP ((__ptr_t
*__handleptr
));
349 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
350 extern __ptr_t r_re_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
357 #endif /* malloc.h */
358 /* Memory allocator `malloc'.
359 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
360 Written May 1989 by Mike Haertel.
362 This library is free software; you can redistribute it and/or
363 modify it under the terms of the GNU General Public License as
364 published by the Free Software Foundation; either version 2 of the
365 License, or (at your option) any later version.
367 This library is distributed in the hope that it will be useful,
368 but WITHOUT ANY WARRANTY; without even the implied warranty of
369 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
370 General Public License for more details.
372 You should have received a copy of the GNU General Public
373 License along with this library; see the file COPYING. If
374 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
375 Fifth Floor, Boston, MA 02110-1301, USA.
377 The author may be reached (Email) at the address mike@ai.mit.edu,
378 or (US mail) as Mike Haertel c/o Free Software Foundation. */
380 #ifndef _MALLOC_INTERNAL
381 #define _MALLOC_INTERNAL
386 /* How to really get more memory. */
388 extern __ptr_t bss_sbrk
PP ((ptrdiff_t __size
));
389 extern int bss_sbrk_did_unexec
;
391 __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
)) = __default_morecore
;
393 /* Debugging hook for `malloc'. */
394 __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
396 /* Pointer to the base of the first block. */
399 /* Block information table. Allocated with align/__free (not malloc/free). */
400 malloc_info
*_heapinfo
;
402 /* Number of info entries. */
403 static __malloc_size_t heapsize
;
405 /* Search index in the info table. */
406 __malloc_size_t _heapindex
;
408 /* Limit of valid info table indices. */
409 __malloc_size_t _heaplimit
;
411 /* Free lists for each fragment size. */
412 struct list _fraghead
[BLOCKLOG
];
414 /* Instrumentation. */
415 __malloc_size_t _chunks_used
;
416 __malloc_size_t _bytes_used
;
417 __malloc_size_t _chunks_free
;
418 __malloc_size_t _bytes_free
;
420 /* Are you experienced? */
421 int __malloc_initialized
;
423 __malloc_size_t __malloc_extra_blocks
;
425 void (*__malloc_initialize_hook
) PP ((void));
426 void (*__after_morecore_hook
) PP ((void));
428 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
430 /* Some code for hunting a bug writing into _heapinfo.
432 Call this macro with argument PROT non-zero to protect internal
433 malloc state against writing to it, call it with a zero argument to
434 make it readable and writable.
436 Note that this only works if BLOCKSIZE == page size, which is
437 the case on the i386. */
439 #include <sys/types.h>
440 #include <sys/mman.h>
442 static int state_protected_p
;
443 static __malloc_size_t last_state_size
;
444 static malloc_info
*last_heapinfo
;
447 protect_malloc_state (protect_p
)
450 /* If _heapinfo has been relocated, make sure its old location
451 isn't left read-only; it will be reused by malloc. */
452 if (_heapinfo
!= last_heapinfo
454 && state_protected_p
)
455 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
457 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
458 last_heapinfo
= _heapinfo
;
460 if (protect_p
!= state_protected_p
)
462 state_protected_p
= protect_p
;
463 if (mprotect (_heapinfo
, last_state_size
,
464 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
469 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
472 #define PROTECT_MALLOC_STATE(PROT) /* empty */
476 /* Aligned allocation. */
477 static __ptr_t align
PP ((__malloc_size_t
));
480 __malloc_size_t size
;
483 unsigned long int adj
;
485 /* align accepts an unsigned argument, but __morecore accepts a
486 signed one. This could lead to trouble if SIZE overflows a
487 signed int type accepted by __morecore. We just punt in that
488 case, since they are requesting a ludicrous amount anyway. */
489 if ((__malloc_ptrdiff_t
)size
< 0)
492 result
= (*__morecore
) (size
);
493 adj
= (unsigned long int) ((unsigned long int) ((char *) result
-
494 (char *) NULL
)) % BLOCKSIZE
;
498 adj
= BLOCKSIZE
- adj
;
499 new = (*__morecore
) (adj
);
500 result
= (char *) result
+ adj
;
503 if (__after_morecore_hook
)
504 (*__after_morecore_hook
) ();
509 /* Get SIZE bytes, if we can get them starting at END.
510 Return the address of the space we got.
511 If we cannot get space at END, fail and return 0. */
512 static __ptr_t get_contiguous_space
PP ((__malloc_ptrdiff_t
, __ptr_t
));
514 get_contiguous_space (size
, position
)
515 __malloc_ptrdiff_t size
;
521 before
= (*__morecore
) (0);
522 /* If we can tell in advance that the break is at the wrong place,
524 if (before
!= position
)
527 /* Allocate SIZE bytes and get the address of them. */
528 after
= (*__morecore
) (size
);
532 /* It was not contiguous--reject it. */
533 if (after
!= position
)
535 (*__morecore
) (- size
);
543 /* This is called when `_heapinfo' and `heapsize' have just
544 been set to describe a new info table. Set up the table
545 to describe itself and account for it in the statistics. */
547 register_heapinfo (void)
549 __malloc_size_t block
, blocks
;
551 block
= BLOCK (_heapinfo
);
552 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
554 /* Account for the _heapinfo block itself in the statistics. */
555 _bytes_used
+= blocks
* BLOCKSIZE
;
558 /* Describe the heapinfo block itself in the heapinfo. */
559 _heapinfo
[block
].busy
.type
= 0;
560 _heapinfo
[block
].busy
.info
.size
= blocks
;
561 /* Leave back-pointers for malloc_find_address. */
563 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
567 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
568 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
569 int _malloc_thread_enabled_p
;
572 malloc_atfork_handler_prepare ()
575 LOCK_ALIGNED_BLOCKS ();
579 malloc_atfork_handler_parent ()
581 UNLOCK_ALIGNED_BLOCKS ();
586 malloc_atfork_handler_child ()
588 UNLOCK_ALIGNED_BLOCKS ();
592 /* Set up mutexes and make malloc etc. thread-safe. */
594 malloc_enable_thread ()
596 if (_malloc_thread_enabled_p
)
599 /* Some pthread implementations call malloc for statically
600 initialized mutexes when they are used first. To avoid such a
601 situation, we initialize mutexes here while their use is
602 disabled in malloc etc. */
603 pthread_mutex_init (&_malloc_mutex
, NULL
);
604 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
605 pthread_atfork (malloc_atfork_handler_prepare
,
606 malloc_atfork_handler_parent
,
607 malloc_atfork_handler_child
);
608 _malloc_thread_enabled_p
= 1;
613 malloc_initialize_1 ()
619 if (__malloc_initialize_hook
)
620 (*__malloc_initialize_hook
) ();
622 heapsize
= HEAP
/ BLOCKSIZE
;
623 _heapinfo
= (malloc_info
*) align (heapsize
* sizeof (malloc_info
));
624 if (_heapinfo
== NULL
)
626 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
627 _heapinfo
[0].free
.size
= 0;
628 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
630 _heapbase
= (char *) _heapinfo
;
631 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
633 register_heapinfo ();
635 __malloc_initialized
= 1;
636 PROTECT_MALLOC_STATE (1);
640 /* Set everything up and remember that we have.
641 main will call malloc which calls this function. That is before any threads
642 or signal handlers has been set up, so we don't need thread protection. */
644 __malloc_initialize ()
646 if (__malloc_initialized
)
649 malloc_initialize_1 ();
651 return __malloc_initialized
;
654 static int morecore_recursing
;
656 /* Get neatly aligned memory, initializing or
657 growing the heap info table as necessary. */
658 static __ptr_t morecore_nolock
PP ((__malloc_size_t
));
660 morecore_nolock (size
)
661 __malloc_size_t size
;
664 malloc_info
*newinfo
, *oldinfo
;
665 __malloc_size_t newsize
;
667 if (morecore_recursing
)
668 /* Avoid recursion. The caller will know how to handle a null return. */
671 result
= align (size
);
675 PROTECT_MALLOC_STATE (0);
677 /* Check if we need to grow the info table. */
678 if ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > heapsize
)
680 /* Calculate the new _heapinfo table size. We do not account for the
681 added blocks in the table itself, as we hope to place them in
682 existing free space, which is already covered by part of the
687 while ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > newsize
);
689 /* We must not reuse existing core for the new info table when called
690 from realloc in the case of growing a large block, because the
691 block being grown is momentarily marked as free. In this case
692 _heaplimit is zero so we know not to reuse space for internal
696 /* First try to allocate the new info table in core we already
697 have, in the usual way using realloc. If realloc cannot
698 extend it in place or relocate it to existing sufficient core,
699 we will get called again, and the code above will notice the
700 `morecore_recursing' flag and return null. */
701 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
702 morecore_recursing
= 1;
703 newinfo
= (malloc_info
*) _realloc_internal_nolock
704 (_heapinfo
, newsize
* sizeof (malloc_info
));
705 morecore_recursing
= 0;
710 /* We found some space in core, and realloc has put the old
711 table's blocks on the free list. Now zero the new part
712 of the table and install the new table location. */
713 memset (&newinfo
[heapsize
], 0,
714 (newsize
- heapsize
) * sizeof (malloc_info
));
721 /* Allocate new space for the malloc info table. */
724 newinfo
= (malloc_info
*) align (newsize
* sizeof (malloc_info
));
729 (*__morecore
) (-size
);
733 /* Is it big enough to record status for its own space?
735 if ((__malloc_size_t
) BLOCK ((char *) newinfo
736 + newsize
* sizeof (malloc_info
))
740 /* Must try again. First give back most of what we just got. */
741 (*__morecore
) (- newsize
* sizeof (malloc_info
));
745 /* Copy the old table to the beginning of the new,
746 and zero the rest of the new table. */
747 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
748 memset (&newinfo
[heapsize
], 0,
749 (newsize
- heapsize
) * sizeof (malloc_info
));
754 register_heapinfo ();
756 /* Reset _heaplimit so _free_internal never decides
757 it can relocate or resize the info table. */
759 _free_internal_nolock (oldinfo
);
760 PROTECT_MALLOC_STATE (0);
762 /* The new heap limit includes the new table just allocated. */
763 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
768 _heaplimit
= BLOCK ((char *) result
+ size
);
772 /* Allocate memory from the heap. */
774 _malloc_internal_nolock (size
)
775 __malloc_size_t size
;
778 __malloc_size_t block
, blocks
, lastblocks
, start
;
779 register __malloc_size_t i
;
782 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
783 valid address you can realloc and free (though not dereference).
785 It turns out that some extant code (sunrpc, at least Ultrix's version)
786 expects `malloc (0)' to return non-NULL and breaks otherwise.
794 PROTECT_MALLOC_STATE (0);
796 if (size
< sizeof (struct list
))
797 size
= sizeof (struct list
);
799 /* Determine the allocation policy based on the request size. */
800 if (size
<= BLOCKSIZE
/ 2)
802 /* Small allocation to receive a fragment of a block.
803 Determine the logarithm to base two of the fragment size. */
804 register __malloc_size_t log
= 1;
806 while ((size
/= 2) != 0)
809 /* Look in the fragment lists for a
810 free fragment of the desired size. */
811 next
= _fraghead
[log
].next
;
814 /* There are free fragments of this size.
815 Pop a fragment out of the fragment list and return it.
816 Update the block's nfree and first counters. */
817 result
= (__ptr_t
) next
;
818 next
->prev
->next
= next
->next
;
819 if (next
->next
!= NULL
)
820 next
->next
->prev
= next
->prev
;
821 block
= BLOCK (result
);
822 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
823 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
824 ((unsigned long int) ((char *) next
->next
- (char *) NULL
)
827 /* Update the statistics. */
829 _bytes_used
+= 1 << log
;
831 _bytes_free
-= 1 << log
;
835 /* No free fragments of the desired size, so get a new block
836 and break it into fragments, returning the first. */
837 #ifdef GC_MALLOC_CHECK
838 result
= _malloc_internal_nolock (BLOCKSIZE
);
839 PROTECT_MALLOC_STATE (0);
840 #elif defined (USE_PTHREAD)
841 result
= _malloc_internal_nolock (BLOCKSIZE
);
843 result
= malloc (BLOCKSIZE
);
847 PROTECT_MALLOC_STATE (1);
851 /* Link all fragments but the first into the free list. */
852 next
= (struct list
*) ((char *) result
+ (1 << log
));
854 next
->prev
= &_fraghead
[log
];
855 _fraghead
[log
].next
= next
;
857 for (i
= 2; i
< (__malloc_size_t
) (BLOCKSIZE
>> log
); ++i
)
859 next
= (struct list
*) ((char *) result
+ (i
<< log
));
860 next
->next
= _fraghead
[log
].next
;
861 next
->prev
= &_fraghead
[log
];
862 next
->prev
->next
= next
;
863 next
->next
->prev
= next
;
866 /* Initialize the nfree and first counters for this block. */
867 block
= BLOCK (result
);
868 _heapinfo
[block
].busy
.type
= log
;
869 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
870 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
872 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
873 _bytes_free
+= BLOCKSIZE
- (1 << log
);
874 _bytes_used
-= BLOCKSIZE
- (1 << log
);
879 /* Large allocation to receive one or more blocks.
880 Search the free list in a circle starting at the last place visited.
881 If we loop completely around without finding a large enough
882 space we will have to get more memory from the system. */
883 blocks
= BLOCKIFY (size
);
884 start
= block
= _heapindex
;
885 while (_heapinfo
[block
].free
.size
< blocks
)
887 block
= _heapinfo
[block
].free
.next
;
890 /* Need to get more from the system. Get a little extra. */
891 __malloc_size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
892 block
= _heapinfo
[0].free
.prev
;
893 lastblocks
= _heapinfo
[block
].free
.size
;
894 /* Check to see if the new core will be contiguous with the
895 final free block; if so we don't need to get as much. */
896 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
897 /* We can't do this if we will have to make the heap info
898 table bigger to accommodate the new space. */
899 block
+ wantblocks
<= heapsize
&&
900 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
901 ADDRESS (block
+ lastblocks
)))
903 /* We got it contiguously. Which block we are extending
904 (the `final free block' referred to above) might have
905 changed, if it got combined with a freed info table. */
906 block
= _heapinfo
[0].free
.prev
;
907 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
908 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
909 _heaplimit
+= wantblocks
- lastblocks
;
912 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
915 block
= BLOCK (result
);
916 /* Put the new block at the end of the free list. */
917 _heapinfo
[block
].free
.size
= wantblocks
;
918 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
919 _heapinfo
[block
].free
.next
= 0;
920 _heapinfo
[0].free
.prev
= block
;
921 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
923 /* Now loop to use some of that block for this allocation. */
927 /* At this point we have found a suitable free list entry.
928 Figure out how to remove what we need from the list. */
929 result
= ADDRESS (block
);
930 if (_heapinfo
[block
].free
.size
> blocks
)
932 /* The block we found has a bit left over,
933 so relink the tail end back into the free list. */
934 _heapinfo
[block
+ blocks
].free
.size
935 = _heapinfo
[block
].free
.size
- blocks
;
936 _heapinfo
[block
+ blocks
].free
.next
937 = _heapinfo
[block
].free
.next
;
938 _heapinfo
[block
+ blocks
].free
.prev
939 = _heapinfo
[block
].free
.prev
;
940 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
941 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
942 = _heapindex
= block
+ blocks
;
946 /* The block exactly matches our requirements,
947 so just remove it from the list. */
948 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
949 = _heapinfo
[block
].free
.prev
;
950 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
951 = _heapindex
= _heapinfo
[block
].free
.next
;
955 _heapinfo
[block
].busy
.type
= 0;
956 _heapinfo
[block
].busy
.info
.size
= blocks
;
958 _bytes_used
+= blocks
* BLOCKSIZE
;
959 _bytes_free
-= blocks
* BLOCKSIZE
;
961 /* Mark all the blocks of the object just allocated except for the
962 first with a negative number so you can find the first block by
963 adding that adjustment. */
965 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
968 PROTECT_MALLOC_STATE (1);
974 _malloc_internal (size
)
975 __malloc_size_t size
;
980 result
= _malloc_internal_nolock (size
);
988 __malloc_size_t size
;
990 __ptr_t (*hook
) (__malloc_size_t
);
992 if (!__malloc_initialized
&& !__malloc_initialize ())
995 /* Copy the value of __malloc_hook to an automatic variable in case
996 __malloc_hook is modified in another thread between its
997 NULL-check and the use.
999 Note: Strictly speaking, this is not a right solution. We should
1000 use mutexes to access non-read-only variables that are shared
1001 among multiple threads. We just leave it for compatibility with
1002 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1003 hook
= __malloc_hook
;
1004 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
1009 /* On some ANSI C systems, some libc functions call _malloc, _free
1010 and _realloc. Make them use the GNU functions. */
1014 __malloc_size_t size
;
1016 return malloc (size
);
1027 _realloc (ptr
, size
)
1029 __malloc_size_t size
;
1031 return realloc (ptr
, size
);
1035 /* Free a block of memory allocated by `malloc'.
1036 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1037 Written May 1989 by Mike Haertel.
1039 This library is free software; you can redistribute it and/or
1040 modify it under the terms of the GNU General Public License as
1041 published by the Free Software Foundation; either version 2 of the
1042 License, or (at your option) any later version.
1044 This library is distributed in the hope that it will be useful,
1045 but WITHOUT ANY WARRANTY; without even the implied warranty of
1046 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1047 General Public License for more details.
1049 You should have received a copy of the GNU General Public
1050 License along with this library; see the file COPYING. If
1051 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1052 Fifth Floor, Boston, MA 02110-1301, USA.
1054 The author may be reached (Email) at the address mike@ai.mit.edu,
1055 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1057 #ifndef _MALLOC_INTERNAL
1058 #define _MALLOC_INTERNAL
1063 /* Debugging hook for free. */
1064 void (*__free_hook
) PP ((__ptr_t __ptr
));
1066 /* List of blocks allocated by memalign. */
1067 struct alignlist
*_aligned_blocks
= NULL
;
1069 /* Return memory to the heap.
1070 Like `_free_internal' but don't lock mutex. */
1072 _free_internal_nolock (ptr
)
1076 __malloc_size_t block
, blocks
;
1077 register __malloc_size_t i
;
1078 struct list
*prev
, *next
;
1080 const __malloc_size_t lesscore_threshold
1081 /* Threshold of free space at which we will return some to the system. */
1082 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
1084 register struct alignlist
*l
;
1089 PROTECT_MALLOC_STATE (0);
1091 LOCK_ALIGNED_BLOCKS ();
1092 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1093 if (l
->aligned
== ptr
)
1095 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1099 UNLOCK_ALIGNED_BLOCKS ();
1101 block
= BLOCK (ptr
);
1103 type
= _heapinfo
[block
].busy
.type
;
1107 /* Get as many statistics as early as we can. */
1109 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1110 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1112 /* Find the free cluster previous to this one in the free list.
1113 Start searching at the last block referenced; this may benefit
1114 programs with locality of allocation. */
1118 i
= _heapinfo
[i
].free
.prev
;
1122 i
= _heapinfo
[i
].free
.next
;
1123 while (i
> 0 && i
< block
);
1124 i
= _heapinfo
[i
].free
.prev
;
1127 /* Determine how to link this block into the free list. */
1128 if (block
== i
+ _heapinfo
[i
].free
.size
)
1130 /* Coalesce this block with its predecessor. */
1131 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1136 /* Really link this block back into the free list. */
1137 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1138 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1139 _heapinfo
[block
].free
.prev
= i
;
1140 _heapinfo
[i
].free
.next
= block
;
1141 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1145 /* Now that the block is linked in, see if we can coalesce it
1146 with its successor (by deleting its successor from the list
1147 and adding in its size). */
1148 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1150 _heapinfo
[block
].free
.size
1151 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1152 _heapinfo
[block
].free
.next
1153 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1154 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1158 /* How many trailing free blocks are there now? */
1159 blocks
= _heapinfo
[block
].free
.size
;
1161 /* Where is the current end of accessible core? */
1162 curbrk
= (*__morecore
) (0);
1164 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1166 /* The end of the malloc heap is at the end of accessible core.
1167 It's possible that moving _heapinfo will allow us to
1168 return some space to the system. */
1170 __malloc_size_t info_block
= BLOCK (_heapinfo
);
1171 __malloc_size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1172 __malloc_size_t prev_block
= _heapinfo
[block
].free
.prev
;
1173 __malloc_size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1174 __malloc_size_t next_block
= _heapinfo
[block
].free
.next
;
1175 __malloc_size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1177 if (/* Win if this block being freed is last in core, the info table
1178 is just before it, the previous free block is just before the
1179 info table, and the two free blocks together form a useful
1180 amount to return to the system. */
1181 (block
+ blocks
== _heaplimit
&&
1182 info_block
+ info_blocks
== block
&&
1183 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1184 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1185 /* Nope, not the case. We can also win if this block being
1186 freed is just before the info table, and the table extends
1187 to the end of core or is followed only by a free block,
1188 and the total free space is worth returning to the system. */
1189 (block
+ blocks
== info_block
&&
1190 ((info_block
+ info_blocks
== _heaplimit
&&
1191 blocks
>= lesscore_threshold
) ||
1192 (info_block
+ info_blocks
== next_block
&&
1193 next_block
+ next_blocks
== _heaplimit
&&
1194 blocks
+ next_blocks
>= lesscore_threshold
)))
1197 malloc_info
*newinfo
;
1198 __malloc_size_t oldlimit
= _heaplimit
;
1200 /* Free the old info table, clearing _heaplimit to avoid
1201 recursion into this code. We don't want to return the
1202 table's blocks to the system before we have copied them to
1203 the new location. */
1205 _free_internal_nolock (_heapinfo
);
1206 _heaplimit
= oldlimit
;
1208 /* Tell malloc to search from the beginning of the heap for
1209 free blocks, so it doesn't reuse the ones just freed. */
1212 /* Allocate new space for the info table and move its data. */
1213 newinfo
= (malloc_info
*) _malloc_internal_nolock (info_blocks
1215 PROTECT_MALLOC_STATE (0);
1216 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1217 _heapinfo
= newinfo
;
1219 /* We should now have coalesced the free block with the
1220 blocks freed from the old info table. Examine the entire
1221 trailing free block to decide below whether to return some
1223 block
= _heapinfo
[0].free
.prev
;
1224 blocks
= _heapinfo
[block
].free
.size
;
1227 /* Now see if we can return stuff to the system. */
1228 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1230 register __malloc_size_t bytes
= blocks
* BLOCKSIZE
;
1231 _heaplimit
-= blocks
;
1232 (*__morecore
) (-bytes
);
1233 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1234 = _heapinfo
[block
].free
.next
;
1235 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1236 = _heapinfo
[block
].free
.prev
;
1237 block
= _heapinfo
[block
].free
.prev
;
1239 _bytes_free
-= bytes
;
1243 /* Set the next search to begin at this block. */
1248 /* Do some of the statistics. */
1250 _bytes_used
-= 1 << type
;
1252 _bytes_free
+= 1 << type
;
1254 /* Get the address of the first free fragment in this block. */
1255 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1256 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1258 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1260 /* If all fragments of this block are free, remove them
1261 from the fragment list and free the whole block. */
1263 for (i
= 1; i
< (__malloc_size_t
) (BLOCKSIZE
>> type
); ++i
)
1265 prev
->prev
->next
= next
;
1267 next
->prev
= prev
->prev
;
1268 _heapinfo
[block
].busy
.type
= 0;
1269 _heapinfo
[block
].busy
.info
.size
= 1;
1271 /* Keep the statistics accurate. */
1273 _bytes_used
+= BLOCKSIZE
;
1274 _chunks_free
-= BLOCKSIZE
>> type
;
1275 _bytes_free
-= BLOCKSIZE
;
1277 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1278 _free_internal_nolock (ADDRESS (block
));
1280 free (ADDRESS (block
));
1283 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1285 /* If some fragments of this block are free, link this
1286 fragment into the fragment list after the first free
1287 fragment of this block. */
1288 next
= (struct list
*) ptr
;
1289 next
->next
= prev
->next
;
1292 if (next
->next
!= NULL
)
1293 next
->next
->prev
= next
;
1294 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1298 /* No fragments of this block are free, so link this
1299 fragment into the fragment list and announce that
1300 it is the first free fragment of this block. */
1301 prev
= (struct list
*) ptr
;
1302 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1303 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
1304 ((unsigned long int) ((char *) ptr
- (char *) NULL
)
1305 % BLOCKSIZE
>> type
);
1306 prev
->next
= _fraghead
[type
].next
;
1307 prev
->prev
= &_fraghead
[type
];
1308 prev
->prev
->next
= prev
;
1309 if (prev
->next
!= NULL
)
1310 prev
->next
->prev
= prev
;
1315 PROTECT_MALLOC_STATE (1);
1318 /* Return memory to the heap.
1319 Like `free' but don't call a __free_hook if there is one. */
1321 _free_internal (ptr
)
1325 _free_internal_nolock (ptr
);
1329 /* Return memory to the heap. */
1335 void (*hook
) (__ptr_t
) = __free_hook
;
1340 _free_internal (ptr
);
1343 /* Define the `cfree' alias for `free'. */
1345 weak_alias (free
, cfree
)
1354 /* Change the size of a block allocated by `malloc'.
1355 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1356 Written May 1989 by Mike Haertel.
1358 This library is free software; you can redistribute it and/or
1359 modify it under the terms of the GNU General Public License as
1360 published by the Free Software Foundation; either version 2 of the
1361 License, or (at your option) any later version.
1363 This library is distributed in the hope that it will be useful,
1364 but WITHOUT ANY WARRANTY; without even the implied warranty of
1365 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1366 General Public License for more details.
1368 You should have received a copy of the GNU General Public
1369 License along with this library; see the file COPYING. If
1370 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1371 Fifth Floor, Boston, MA 02110-1301, USA.
1373 The author may be reached (Email) at the address mike@ai.mit.edu,
1374 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1376 #ifndef _MALLOC_INTERNAL
1377 #define _MALLOC_INTERNAL
1382 #define min(A, B) ((A) < (B) ? (A) : (B))
1384 /* Debugging hook for realloc. */
1385 __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
1387 /* Resize the given region to the new size, returning a pointer
1388 to the (possibly moved) region. This is optimized for speed;
1389 some benchmarks seem to indicate that greater compactness is
1390 achieved by unconditionally allocating and copying to a
1391 new region. This module has incestuous knowledge of the
1392 internals of both free and malloc. */
1394 _realloc_internal_nolock (ptr
, size
)
1396 __malloc_size_t size
;
1400 __malloc_size_t block
, blocks
, oldlimit
;
1404 _free_internal_nolock (ptr
);
1405 return _malloc_internal_nolock (0);
1407 else if (ptr
== NULL
)
1408 return _malloc_internal_nolock (size
);
1410 block
= BLOCK (ptr
);
1412 PROTECT_MALLOC_STATE (0);
1414 type
= _heapinfo
[block
].busy
.type
;
1418 /* Maybe reallocate a large block to a small fragment. */
1419 if (size
<= BLOCKSIZE
/ 2)
1421 result
= _malloc_internal_nolock (size
);
1424 memcpy (result
, ptr
, size
);
1425 _free_internal_nolock (ptr
);
1430 /* The new size is a large allocation as well;
1431 see if we can hold it in place. */
1432 blocks
= BLOCKIFY (size
);
1433 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1435 /* The new size is smaller; return
1436 excess memory to the free list. */
1437 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1438 _heapinfo
[block
+ blocks
].busy
.info
.size
1439 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1440 _heapinfo
[block
].busy
.info
.size
= blocks
;
1441 /* We have just created a new chunk by splitting a chunk in two.
1442 Now we will free this chunk; increment the statistics counter
1443 so it doesn't become wrong when _free_internal decrements it. */
1445 _free_internal_nolock (ADDRESS (block
+ blocks
));
1448 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1449 /* No size change necessary. */
1453 /* Won't fit, so allocate a new region that will.
1454 Free the old region first in case there is sufficient
1455 adjacent free space to grow without moving. */
1456 blocks
= _heapinfo
[block
].busy
.info
.size
;
1457 /* Prevent free from actually returning memory to the system. */
1458 oldlimit
= _heaplimit
;
1460 _free_internal_nolock (ptr
);
1461 result
= _malloc_internal_nolock (size
);
1462 PROTECT_MALLOC_STATE (0);
1463 if (_heaplimit
== 0)
1464 _heaplimit
= oldlimit
;
1467 /* Now we're really in trouble. We have to unfree
1468 the thing we just freed. Unfortunately it might
1469 have been coalesced with its neighbors. */
1470 if (_heapindex
== block
)
1471 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1475 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1476 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1477 _free_internal_nolock (previous
);
1482 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1487 /* Old size is a fragment; type is logarithm
1488 to base two of the fragment size. */
1489 if (size
> (__malloc_size_t
) (1 << (type
- 1)) &&
1490 size
<= (__malloc_size_t
) (1 << type
))
1491 /* The new size is the same kind of fragment. */
1495 /* The new size is different; allocate a new space,
1496 and copy the lesser of the new size and the old. */
1497 result
= _malloc_internal_nolock (size
);
1500 memcpy (result
, ptr
, min (size
, (__malloc_size_t
) 1 << type
));
1501 _free_internal_nolock (ptr
);
1506 PROTECT_MALLOC_STATE (1);
1512 _realloc_internal (ptr
, size
)
1514 __malloc_size_t size
;
1519 result
= _realloc_internal_nolock (ptr
, size
);
1528 __malloc_size_t size
;
1530 __ptr_t (*hook
) (__ptr_t
, __malloc_size_t
);
1532 if (!__malloc_initialized
&& !__malloc_initialize ())
1535 hook
= __realloc_hook
;
1536 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1538 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1540 This library is free software; you can redistribute it and/or
1541 modify it under the terms of the GNU General Public License as
1542 published by the Free Software Foundation; either version 2 of the
1543 License, or (at your option) any later version.
1545 This library is distributed in the hope that it will be useful,
1546 but WITHOUT ANY WARRANTY; without even the implied warranty of
1547 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1548 General Public License for more details.
1550 You should have received a copy of the GNU General Public
1551 License along with this library; see the file COPYING. If
1552 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1553 Fifth Floor, Boston, MA 02110-1301, USA.
1555 The author may be reached (Email) at the address mike@ai.mit.edu,
1556 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1558 #ifndef _MALLOC_INTERNAL
1559 #define _MALLOC_INTERNAL
1563 /* Allocate an array of NMEMB elements each SIZE bytes long.
1564 The entire array is initialized to zeros. */
1566 calloc (nmemb
, size
)
1567 register __malloc_size_t nmemb
;
1568 register __malloc_size_t size
;
1570 register __ptr_t result
= malloc (nmemb
* size
);
1573 (void) memset (result
, 0, nmemb
* size
);
1577 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1578 This file is part of the GNU C Library.
1580 The GNU C Library is free software; you can redistribute it and/or modify
1581 it under the terms of the GNU General Public License as published by
1582 the Free Software Foundation; either version 2, or (at your option)
1585 The GNU C Library is distributed in the hope that it will be useful,
1586 but WITHOUT ANY WARRANTY; without even the implied warranty of
1587 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1588 GNU General Public License for more details.
1590 You should have received a copy of the GNU General Public License
1591 along with the GNU C Library; see the file COPYING. If not, write to
1592 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1593 MA 02110-1301, USA. */
1595 #ifndef _MALLOC_INTERNAL
1596 #define _MALLOC_INTERNAL
1600 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1602 #if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
1604 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1605 /* It is best not to declare this and cast its result on foreign operating
1606 systems with potentially hostile include files. */
1609 extern __ptr_t __sbrk
PP ((ptrdiff_t increment
));
1610 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1616 /* Allocate INCREMENT more bytes of data space,
1617 and return the start of data space, or NULL on errors.
1618 If INCREMENT is negative, shrink data space. */
1620 __default_morecore (increment
)
1621 __malloc_ptrdiff_t increment
;
1625 if (!bss_sbrk_did_unexec
)
1627 return bss_sbrk (increment
);
1630 result
= (__ptr_t
) __sbrk (increment
);
1631 if (result
== (__ptr_t
) -1)
1635 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1637 This library is free software; you can redistribute it and/or
1638 modify it under the terms of the GNU General Public License as
1639 published by the Free Software Foundation; either version 2 of the
1640 License, or (at your option) any later version.
1642 This library is distributed in the hope that it will be useful,
1643 but WITHOUT ANY WARRANTY; without even the implied warranty of
1644 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1645 General Public License for more details.
1647 You should have received a copy of the GNU General Public
1648 License along with this library; see the file COPYING. If
1649 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1650 Fifth Floor, Boston, MA 02110-1301, USA. */
1652 #ifndef _MALLOC_INTERNAL
1653 #define _MALLOC_INTERNAL
1657 __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
1658 __malloc_size_t __alignment
));
1661 memalign (alignment
, size
)
1662 __malloc_size_t alignment
;
1663 __malloc_size_t size
;
1666 unsigned long int adj
, lastadj
;
1667 __ptr_t (*hook
) (__malloc_size_t
, __malloc_size_t
) = __memalign_hook
;
1670 return (*hook
) (alignment
, size
);
1672 /* Allocate a block with enough extra space to pad the block with up to
1673 (ALIGNMENT - 1) bytes if necessary. */
1674 result
= malloc (size
+ alignment
- 1);
1678 /* Figure out how much we will need to pad this particular block
1679 to achieve the required alignment. */
1680 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1684 /* Reallocate the block with only as much excess as it needs. */
1686 result
= malloc (adj
+ size
);
1687 if (result
== NULL
) /* Impossible unless interrupted. */
1691 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1692 /* It's conceivable we might have been so unlucky as to get a
1693 different block with weaker alignment. If so, this block is too
1694 short to contain SIZE after alignment correction. So we must
1695 try again and get another block, slightly larger. */
1696 } while (adj
> lastadj
);
1700 /* Record this block in the list of aligned blocks, so that `free'
1701 can identify the pointer it is passed, which will be in the middle
1702 of an allocated block. */
1704 struct alignlist
*l
;
1705 LOCK_ALIGNED_BLOCKS ();
1706 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1707 if (l
->aligned
== NULL
)
1708 /* This slot is free. Use it. */
1712 l
= (struct alignlist
*) malloc (sizeof (struct alignlist
));
1715 l
->next
= _aligned_blocks
;
1716 _aligned_blocks
= l
;
1722 result
= l
->aligned
= (char *) result
+ alignment
- adj
;
1724 UNLOCK_ALIGNED_BLOCKS ();
1744 posix_memalign (memptr
, alignment
, size
)
1746 __malloc_size_t alignment
;
1747 __malloc_size_t size
;
1752 || alignment
% sizeof (__ptr_t
) != 0
1753 || (alignment
& (alignment
- 1)) != 0)
1756 mem
= memalign (alignment
, size
);
1765 /* Allocate memory on a page boundary.
1766 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1768 This library is free software; you can redistribute it and/or
1769 modify it under the terms of the GNU General Public License as
1770 published by the Free Software Foundation; either version 2 of the
1771 License, or (at your option) any later version.
1773 This library is distributed in the hope that it will be useful,
1774 but WITHOUT ANY WARRANTY; without even the implied warranty of
1775 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1776 General Public License for more details.
1778 You should have received a copy of the GNU General Public
1779 License along with this library; see the file COPYING. If
1780 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1781 Fifth Floor, Boston, MA 02110-1301, USA.
1783 The author may be reached (Email) at the address mike@ai.mit.edu,
1784 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1786 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1788 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1789 on MSDOS, where it conflicts with a system header file. */
1791 #define ELIDE_VALLOC
1795 #ifndef ELIDE_VALLOC
1797 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1799 #include <sys/cdefs.h>
1800 #if defined (__GLIBC__) && __GLIBC__ >= 2
1801 /* __getpagesize is already declared in <unistd.h> with return type int */
1803 extern size_t __getpagesize
PP ((void));
1806 #include "getpagesize.h"
1807 #define __getpagesize() getpagesize()
1810 #ifndef _MALLOC_INTERNAL
1811 #define _MALLOC_INTERNAL
1815 static __malloc_size_t pagesize
;
1819 __malloc_size_t size
;
1822 pagesize
= __getpagesize ();
1824 return memalign (pagesize
, size
);
1827 #endif /* Not ELIDE_VALLOC. */
1831 /* Standard debugging hooks for `malloc'.
1832 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1833 Written May 1989 by Mike Haertel.
1835 This library is free software; you can redistribute it and/or
1836 modify it under the terms of the GNU General Public License as
1837 published by the Free Software Foundation; either version 2 of the
1838 License, or (at your option) any later version.
1840 This library is distributed in the hope that it will be useful,
1841 but WITHOUT ANY WARRANTY; without even the implied warranty of
1842 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1843 General Public License for more details.
1845 You should have received a copy of the GNU General Public
1846 License along with this library; see the file COPYING. If
1847 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1848 Fifth Floor, Boston, MA 02110-1301, USA.
1850 The author may be reached (Email) at the address mike@ai.mit.edu,
1851 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1856 #ifndef _MALLOC_INTERNAL
1857 #define _MALLOC_INTERNAL
1863 /* Old hook values. */
1864 static void (*old_free_hook
) (__ptr_t ptr
);
1865 static __ptr_t (*old_malloc_hook
) (__malloc_size_t size
);
1866 static __ptr_t (*old_realloc_hook
) (__ptr_t ptr
, __malloc_size_t size
);
1868 /* Function to call when something awful happens. */
1869 static void (*abortfunc
) (enum mcheck_status
);
1871 /* Arbitrary magical numbers. */
1872 #define MAGICWORD 0xfedabeeb
1873 #define MAGICFREE 0xd8675309
1874 #define MAGICBYTE ((char) 0xd7)
1875 #define MALLOCFLOOD ((char) 0x93)
1876 #define FREEFLOOD ((char) 0x95)
1880 __malloc_size_t size
; /* Exact size requested by user. */
1881 unsigned long int magic
; /* Magic number to check header integrity. */
1884 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1885 #define flood memset
1887 static void flood (__ptr_t
, int, __malloc_size_t
);
1889 flood (ptr
, val
, size
)
1892 __malloc_size_t size
;
1900 static enum mcheck_status
checkhdr (const struct hdr
*);
1901 static enum mcheck_status
1903 const struct hdr
*hdr
;
1905 enum mcheck_status status
;
1909 status
= MCHECK_HEAD
;
1912 status
= MCHECK_FREE
;
1915 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1916 status
= MCHECK_TAIL
;
1921 if (status
!= MCHECK_OK
)
1922 (*abortfunc
) (status
);
1926 static void freehook (__ptr_t
);
1935 hdr
= ((struct hdr
*) ptr
) - 1;
1937 hdr
->magic
= MAGICFREE
;
1938 flood (ptr
, FREEFLOOD
, hdr
->size
);
1943 __free_hook
= old_free_hook
;
1945 __free_hook
= freehook
;
1948 static __ptr_t
mallochook (__malloc_size_t
);
1951 __malloc_size_t size
;
1955 __malloc_hook
= old_malloc_hook
;
1956 hdr
= (struct hdr
*) malloc (sizeof (struct hdr
) + size
+ 1);
1957 __malloc_hook
= mallochook
;
1962 hdr
->magic
= MAGICWORD
;
1963 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1964 flood ((__ptr_t
) (hdr
+ 1), MALLOCFLOOD
, size
);
1965 return (__ptr_t
) (hdr
+ 1);
1968 static __ptr_t
reallochook (__ptr_t
, __malloc_size_t
);
1970 reallochook (ptr
, size
)
1972 __malloc_size_t size
;
1974 struct hdr
*hdr
= NULL
;
1975 __malloc_size_t osize
= 0;
1979 hdr
= ((struct hdr
*) ptr
) - 1;
1984 flood ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
1987 __free_hook
= old_free_hook
;
1988 __malloc_hook
= old_malloc_hook
;
1989 __realloc_hook
= old_realloc_hook
;
1990 hdr
= (struct hdr
*) realloc ((__ptr_t
) hdr
, sizeof (struct hdr
) + size
+ 1);
1991 __free_hook
= freehook
;
1992 __malloc_hook
= mallochook
;
1993 __realloc_hook
= reallochook
;
1998 hdr
->magic
= MAGICWORD
;
1999 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
2001 flood ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
2002 return (__ptr_t
) (hdr
+ 1);
2007 enum mcheck_status status
;
2013 msg
= "memory is consistent, library is buggy";
2016 msg
= "memory clobbered before allocated block";
2019 msg
= "memory clobbered past end of allocated block";
2022 msg
= "block freed twice";
2025 msg
= "bogus mcheck_status, library is buggy";
2028 #ifdef __GNU_LIBRARY__
2031 fprintf (stderr
, "mcheck: %s\n", msg
);
2037 static int mcheck_used
= 0;
2041 void (*func
) (enum mcheck_status
);
2043 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
2045 /* These hooks may not be safely inserted if malloc is already in use. */
2046 if (!__malloc_initialized
&& !mcheck_used
)
2048 old_free_hook
= __free_hook
;
2049 __free_hook
= freehook
;
2050 old_malloc_hook
= __malloc_hook
;
2051 __malloc_hook
= mallochook
;
2052 old_realloc_hook
= __realloc_hook
;
2053 __realloc_hook
= reallochook
;
2057 return mcheck_used
? 0 : -1;
2061 mprobe (__ptr_t ptr
)
2063 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
2066 #endif /* GC_MCHECK */