1 /* This file is no longer automatically generated from libc. */
3 #define _MALLOC_INTERNAL
5 /* The malloc headers and source files from the C library follow here. */
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
34 #ifdef _MALLOC_INTERNAL
47 #define __ptr_t void *
57 #endif /* _MALLOC_INTERNAL. */
66 #define __malloc_size_t size_t
67 #define __malloc_ptrdiff_t ptrdiff_t
70 /* Allocate SIZE bytes of memory. */
71 extern __ptr_t malloc
PP ((__malloc_size_t __size
));
72 /* Re-allocate the previously allocated block
73 in __ptr_t, making the new block SIZE bytes long. */
74 extern __ptr_t realloc
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
75 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
76 extern __ptr_t calloc
PP ((__malloc_size_t __nmemb
, __malloc_size_t __size
));
77 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
78 extern void free
PP ((__ptr_t __ptr
));
80 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
81 #if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
82 extern __ptr_t memalign
PP ((__malloc_size_t __alignment
,
83 __malloc_size_t __size
));
84 extern int posix_memalign
PP ((__ptr_t
*, __malloc_size_t
,
85 __malloc_size_t size
));
88 /* Allocate SIZE bytes on a page boundary. */
89 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
90 extern __ptr_t valloc
PP ((__malloc_size_t __size
));
94 /* Set up mutexes and make malloc etc. thread-safe. */
95 extern void malloc_enable_thread
PP ((void));
98 #ifdef _MALLOC_INTERNAL
100 /* The allocator divides the heap into blocks of fixed size; large
101 requests receive one or more whole blocks, and small requests
102 receive a fragment of a block. Fragment sizes are powers of two,
103 and all fragments of a block are the same size. When all the
104 fragments in a block have been freed, the block itself is freed. */
105 #define INT_BIT (CHAR_BIT * sizeof(int))
106 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
107 #define BLOCKSIZE (1 << BLOCKLOG)
108 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
110 /* Determine the amount of memory spanned by the initial heap table
111 (not an absolute limit). */
112 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
114 /* Number of contiguous free blocks allowed to build up at the end of
115 memory before they will be returned to the system. */
116 #define FINAL_FREE_BLOCKS 8
118 /* Data structure giving per-block information. */
121 /* Heap information for a busy block. */
124 /* Zero for a large (multiblock) object, or positive giving the
125 logarithm to the base two of the fragment size. */
131 __malloc_size_t nfree
; /* Free frags in a fragmented block. */
132 __malloc_size_t first
; /* First free fragment of the block. */
134 /* For a large object, in its first block, this has the number
135 of blocks in the object. In the other blocks, this has a
136 negative number which says how far back the first block is. */
137 __malloc_ptrdiff_t size
;
140 /* Heap information for a free block
141 (that may be the first of a free cluster). */
144 __malloc_size_t size
; /* Size (in blocks) of a free cluster. */
145 __malloc_size_t next
; /* Index of next free cluster. */
146 __malloc_size_t prev
; /* Index of previous free cluster. */
150 /* Pointer to first block of the heap. */
151 extern char *_heapbase
;
153 /* Table indexed by block number giving per-block information. */
154 extern malloc_info
*_heapinfo
;
156 /* Address to block number and vice versa. */
157 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
158 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
160 /* Current search index for the heap table. */
161 extern __malloc_size_t _heapindex
;
163 /* Limit of valid info table indices. */
164 extern __malloc_size_t _heaplimit
;
166 /* Doubly linked lists of free fragments. */
173 /* Free list headers for each fragment size. */
174 extern struct list _fraghead
[];
176 /* List of blocks allocated with `memalign' (or `valloc'). */
179 struct alignlist
*next
;
180 __ptr_t aligned
; /* The address that memaligned returned. */
181 __ptr_t exact
; /* The address that malloc returned. */
183 extern struct alignlist
*_aligned_blocks
;
185 /* Instrumentation. */
186 extern __malloc_size_t _chunks_used
;
187 extern __malloc_size_t _bytes_used
;
188 extern __malloc_size_t _chunks_free
;
189 extern __malloc_size_t _bytes_free
;
191 /* Internal versions of `malloc', `realloc', and `free'
192 used when these functions need to call each other.
193 They are the same but don't call the hooks. */
194 extern __ptr_t _malloc_internal
PP ((__malloc_size_t __size
));
195 extern __ptr_t _realloc_internal
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
196 extern void _free_internal
PP ((__ptr_t __ptr
));
197 extern __ptr_t _malloc_internal_nolock
PP ((__malloc_size_t __size
));
198 extern __ptr_t _realloc_internal_nolock
PP ((__ptr_t __ptr
, __malloc_size_t __size
));
199 extern void _free_internal_nolock
PP ((__ptr_t __ptr
));
202 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
203 extern int _malloc_thread_enabled_p
;
206 if (_malloc_thread_enabled_p) \
207 pthread_mutex_lock (&_malloc_mutex); \
211 if (_malloc_thread_enabled_p) \
212 pthread_mutex_unlock (&_malloc_mutex); \
214 #define LOCK_ALIGNED_BLOCKS() \
216 if (_malloc_thread_enabled_p) \
217 pthread_mutex_lock (&_aligned_blocks_mutex); \
219 #define UNLOCK_ALIGNED_BLOCKS() \
221 if (_malloc_thread_enabled_p) \
222 pthread_mutex_unlock (&_aligned_blocks_mutex); \
227 #define LOCK_ALIGNED_BLOCKS()
228 #define UNLOCK_ALIGNED_BLOCKS()
231 #endif /* _MALLOC_INTERNAL. */
233 /* Given an address in the middle of a malloc'd object,
234 return the address of the beginning of the object. */
235 extern __ptr_t malloc_find_object_address
PP ((__ptr_t __ptr
));
237 /* Underlying allocation function; successive calls should
238 return contiguous pieces of memory. */
239 extern __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
));
241 /* Default value of `__morecore'. */
242 extern __ptr_t __default_morecore
PP ((__malloc_ptrdiff_t __size
));
244 /* If not NULL, this function is called after each time
245 `__morecore' is called to increase the data size. */
246 extern void (*__after_morecore_hook
) PP ((void));
248 /* Number of extra blocks to get each time we ask for more core.
249 This reduces the frequency of calling `(*__morecore)'. */
250 extern __malloc_size_t __malloc_extra_blocks
;
252 /* Nonzero if `malloc' has been called and done its initialization. */
253 extern int __malloc_initialized
;
254 /* Function called to initialize malloc data structures. */
255 extern int __malloc_initialize
PP ((void));
257 /* Hooks for debugging versions. */
258 extern void (*__malloc_initialize_hook
) PP ((void));
259 extern void (*__free_hook
) PP ((__ptr_t __ptr
));
260 extern __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
261 extern __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
262 extern __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
263 __malloc_size_t __alignment
));
265 /* Return values for `mprobe': these are the kinds of inconsistencies that
266 `mcheck' enables detection of. */
269 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
270 MCHECK_OK
, /* Block is fine. */
271 MCHECK_FREE
, /* Block freed twice. */
272 MCHECK_HEAD
, /* Memory before the block was clobbered. */
273 MCHECK_TAIL
/* Memory after the block was clobbered. */
276 /* Activate a standard collection of debugging hooks. This must be called
277 before `malloc' is ever called. ABORTFUNC is called with an error code
278 (see enum above) when an inconsistency is detected. If ABORTFUNC is
279 null, the standard function prints on stderr and then calls `abort'. */
280 extern int mcheck
PP ((void (*__abortfunc
) PP ((enum mcheck_status
))));
282 /* Check for aberrations in a particular malloc'd block. You must have
283 called `mcheck' already. These are the same checks that `mcheck' does
284 when you free or reallocate a block. */
285 extern enum mcheck_status mprobe
PP ((__ptr_t __ptr
));
287 /* Activate a standard collection of tracing hooks. */
288 extern void mtrace
PP ((void));
289 extern void muntrace
PP ((void));
291 /* Statistics available to the user. */
294 __malloc_size_t bytes_total
; /* Total size of the heap. */
295 __malloc_size_t chunks_used
; /* Chunks allocated by the user. */
296 __malloc_size_t bytes_used
; /* Byte total of user-allocated chunks. */
297 __malloc_size_t chunks_free
; /* Chunks in the free list. */
298 __malloc_size_t bytes_free
; /* Byte total of chunks in the free list. */
301 /* Pick up the current statistics. */
302 extern struct mstats mstats
PP ((void));
304 /* Call WARNFUN with a warning message when memory usage is high. */
305 extern void memory_warnings
PP ((__ptr_t __start
,
306 void (*__warnfun
) PP ((const char *))));
309 /* Relocating allocator. */
311 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
312 extern __ptr_t r_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
314 /* Free the storage allocated in HANDLEPTR. */
315 extern void r_alloc_free
PP ((__ptr_t
*__handleptr
));
317 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
318 extern __ptr_t r_re_alloc
PP ((__ptr_t
*__handleptr
, __malloc_size_t __size
));
325 #endif /* malloc.h */
326 /* Memory allocator `malloc'.
327 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
328 Written May 1989 by Mike Haertel.
330 This library is free software; you can redistribute it and/or
331 modify it under the terms of the GNU General Public License as
332 published by the Free Software Foundation; either version 2 of the
333 License, or (at your option) any later version.
335 This library is distributed in the hope that it will be useful,
336 but WITHOUT ANY WARRANTY; without even the implied warranty of
337 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
338 General Public License for more details.
340 You should have received a copy of the GNU General Public
341 License along with this library; see the file COPYING. If
342 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
343 Fifth Floor, Boston, MA 02110-1301, USA.
345 The author may be reached (Email) at the address mike@ai.mit.edu,
346 or (US mail) as Mike Haertel c/o Free Software Foundation. */
348 #ifndef _MALLOC_INTERNAL
349 #define _MALLOC_INTERNAL
354 /* On Cygwin there are two heaps. temacs uses the static heap
355 (defined in sheap.c and managed with bss_sbrk), and the dumped
356 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
357 on Cygwin, it reinitializes malloc, and we save the old info for
358 use by free and realloc if they're called with a pointer into the
361 extern __ptr_t bss_sbrk
PP ((ptrdiff_t __size
));
362 extern int bss_sbrk_did_unexec
;
363 char *bss_sbrk_heapbase
; /* _heapbase for static heap */
364 malloc_info
*bss_sbrk_heapinfo
; /* _heapinfo for static heap */
366 __ptr_t (*__morecore
) PP ((__malloc_ptrdiff_t __size
)) = __default_morecore
;
368 /* Debugging hook for `malloc'. */
369 __ptr_t (*__malloc_hook
) PP ((__malloc_size_t __size
));
371 /* Pointer to the base of the first block. */
374 /* Block information table. Allocated with align/__free (not malloc/free). */
375 malloc_info
*_heapinfo
;
377 /* Number of info entries. */
378 static __malloc_size_t heapsize
;
380 /* Search index in the info table. */
381 __malloc_size_t _heapindex
;
383 /* Limit of valid info table indices. */
384 __malloc_size_t _heaplimit
;
386 /* Free lists for each fragment size. */
387 struct list _fraghead
[BLOCKLOG
];
389 /* Instrumentation. */
390 __malloc_size_t _chunks_used
;
391 __malloc_size_t _bytes_used
;
392 __malloc_size_t _chunks_free
;
393 __malloc_size_t _bytes_free
;
395 /* Are you experienced? */
396 int __malloc_initialized
;
398 __malloc_size_t __malloc_extra_blocks
;
400 void (*__malloc_initialize_hook
) PP ((void));
401 void (*__after_morecore_hook
) PP ((void));
403 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
405 /* Some code for hunting a bug writing into _heapinfo.
407 Call this macro with argument PROT non-zero to protect internal
408 malloc state against writing to it, call it with a zero argument to
409 make it readable and writable.
411 Note that this only works if BLOCKSIZE == page size, which is
412 the case on the i386. */
414 #include <sys/types.h>
415 #include <sys/mman.h>
417 static int state_protected_p
;
418 static __malloc_size_t last_state_size
;
419 static malloc_info
*last_heapinfo
;
422 protect_malloc_state (protect_p
)
425 /* If _heapinfo has been relocated, make sure its old location
426 isn't left read-only; it will be reused by malloc. */
427 if (_heapinfo
!= last_heapinfo
429 && state_protected_p
)
430 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
432 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
433 last_heapinfo
= _heapinfo
;
435 if (protect_p
!= state_protected_p
)
437 state_protected_p
= protect_p
;
438 if (mprotect (_heapinfo
, last_state_size
,
439 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
444 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
447 #define PROTECT_MALLOC_STATE(PROT) /* empty */
451 /* Aligned allocation. */
452 static __ptr_t align
PP ((__malloc_size_t
));
455 __malloc_size_t size
;
458 unsigned long int adj
;
460 /* align accepts an unsigned argument, but __morecore accepts a
461 signed one. This could lead to trouble if SIZE overflows a
462 signed int type accepted by __morecore. We just punt in that
463 case, since they are requesting a ludicrous amount anyway. */
464 if ((__malloc_ptrdiff_t
)size
< 0)
467 result
= (*__morecore
) (size
);
468 adj
= (unsigned long int) ((unsigned long int) ((char *) result
-
469 (char *) NULL
)) % BLOCKSIZE
;
473 adj
= BLOCKSIZE
- adj
;
474 new = (*__morecore
) (adj
);
475 result
= (char *) result
+ adj
;
478 if (__after_morecore_hook
)
479 (*__after_morecore_hook
) ();
484 /* Get SIZE bytes, if we can get them starting at END.
485 Return the address of the space we got.
486 If we cannot get space at END, fail and return 0. */
487 static __ptr_t get_contiguous_space
PP ((__malloc_ptrdiff_t
, __ptr_t
));
489 get_contiguous_space (size
, position
)
490 __malloc_ptrdiff_t size
;
496 before
= (*__morecore
) (0);
497 /* If we can tell in advance that the break is at the wrong place,
499 if (before
!= position
)
502 /* Allocate SIZE bytes and get the address of them. */
503 after
= (*__morecore
) (size
);
507 /* It was not contiguous--reject it. */
508 if (after
!= position
)
510 (*__morecore
) (- size
);
518 /* This is called when `_heapinfo' and `heapsize' have just
519 been set to describe a new info table. Set up the table
520 to describe itself and account for it in the statistics. */
522 register_heapinfo (void)
524 __malloc_size_t block
, blocks
;
526 block
= BLOCK (_heapinfo
);
527 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
529 /* Account for the _heapinfo block itself in the statistics. */
530 _bytes_used
+= blocks
* BLOCKSIZE
;
533 /* Describe the heapinfo block itself in the heapinfo. */
534 _heapinfo
[block
].busy
.type
= 0;
535 _heapinfo
[block
].busy
.info
.size
= blocks
;
536 /* Leave back-pointers for malloc_find_address. */
538 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
542 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
543 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
544 int _malloc_thread_enabled_p
;
547 malloc_atfork_handler_prepare ()
550 LOCK_ALIGNED_BLOCKS ();
554 malloc_atfork_handler_parent ()
556 UNLOCK_ALIGNED_BLOCKS ();
561 malloc_atfork_handler_child ()
563 UNLOCK_ALIGNED_BLOCKS ();
567 /* Set up mutexes and make malloc etc. thread-safe. */
569 malloc_enable_thread ()
571 if (_malloc_thread_enabled_p
)
574 /* Some pthread implementations call malloc for statically
575 initialized mutexes when they are used first. To avoid such a
576 situation, we initialize mutexes here while their use is
577 disabled in malloc etc. */
578 pthread_mutex_init (&_malloc_mutex
, NULL
);
579 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
580 pthread_atfork (malloc_atfork_handler_prepare
,
581 malloc_atfork_handler_parent
,
582 malloc_atfork_handler_child
);
583 _malloc_thread_enabled_p
= 1;
588 malloc_initialize_1 ()
595 if (bss_sbrk_did_unexec
)
596 /* we're reinitializing the dumped emacs */
598 bss_sbrk_heapbase
= _heapbase
;
599 bss_sbrk_heapinfo
= _heapinfo
;
600 memset (_fraghead
, 0, BLOCKLOG
* sizeof (struct list
));
604 if (__malloc_initialize_hook
)
605 (*__malloc_initialize_hook
) ();
607 heapsize
= HEAP
/ BLOCKSIZE
;
608 _heapinfo
= (malloc_info
*) align (heapsize
* sizeof (malloc_info
));
609 if (_heapinfo
== NULL
)
611 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
612 _heapinfo
[0].free
.size
= 0;
613 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
615 _heapbase
= (char *) _heapinfo
;
616 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
618 register_heapinfo ();
620 __malloc_initialized
= 1;
621 PROTECT_MALLOC_STATE (1);
625 /* Set everything up and remember that we have.
626 main will call malloc which calls this function. That is before any threads
627 or signal handlers has been set up, so we don't need thread protection. */
629 __malloc_initialize ()
631 if (__malloc_initialized
)
634 malloc_initialize_1 ();
636 return __malloc_initialized
;
639 static int morecore_recursing
;
641 /* Get neatly aligned memory, initializing or
642 growing the heap info table as necessary. */
643 static __ptr_t morecore_nolock
PP ((__malloc_size_t
));
645 morecore_nolock (size
)
646 __malloc_size_t size
;
649 malloc_info
*newinfo
, *oldinfo
;
650 __malloc_size_t newsize
;
652 if (morecore_recursing
)
653 /* Avoid recursion. The caller will know how to handle a null return. */
656 result
= align (size
);
660 PROTECT_MALLOC_STATE (0);
662 /* Check if we need to grow the info table. */
663 if ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > heapsize
)
665 /* Calculate the new _heapinfo table size. We do not account for the
666 added blocks in the table itself, as we hope to place them in
667 existing free space, which is already covered by part of the
672 while ((__malloc_size_t
) BLOCK ((char *) result
+ size
) > newsize
);
674 /* We must not reuse existing core for the new info table when called
675 from realloc in the case of growing a large block, because the
676 block being grown is momentarily marked as free. In this case
677 _heaplimit is zero so we know not to reuse space for internal
681 /* First try to allocate the new info table in core we already
682 have, in the usual way using realloc. If realloc cannot
683 extend it in place or relocate it to existing sufficient core,
684 we will get called again, and the code above will notice the
685 `morecore_recursing' flag and return null. */
686 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
687 morecore_recursing
= 1;
688 newinfo
= (malloc_info
*) _realloc_internal_nolock
689 (_heapinfo
, newsize
* sizeof (malloc_info
));
690 morecore_recursing
= 0;
695 /* We found some space in core, and realloc has put the old
696 table's blocks on the free list. Now zero the new part
697 of the table and install the new table location. */
698 memset (&newinfo
[heapsize
], 0,
699 (newsize
- heapsize
) * sizeof (malloc_info
));
706 /* Allocate new space for the malloc info table. */
709 newinfo
= (malloc_info
*) align (newsize
* sizeof (malloc_info
));
714 (*__morecore
) (-size
);
718 /* Is it big enough to record status for its own space?
720 if ((__malloc_size_t
) BLOCK ((char *) newinfo
721 + newsize
* sizeof (malloc_info
))
725 /* Must try again. First give back most of what we just got. */
726 (*__morecore
) (- newsize
* sizeof (malloc_info
));
730 /* Copy the old table to the beginning of the new,
731 and zero the rest of the new table. */
732 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
733 memset (&newinfo
[heapsize
], 0,
734 (newsize
- heapsize
) * sizeof (malloc_info
));
739 register_heapinfo ();
741 /* Reset _heaplimit so _free_internal never decides
742 it can relocate or resize the info table. */
744 _free_internal_nolock (oldinfo
);
745 PROTECT_MALLOC_STATE (0);
747 /* The new heap limit includes the new table just allocated. */
748 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
753 _heaplimit
= BLOCK ((char *) result
+ size
);
757 /* Allocate memory from the heap. */
759 _malloc_internal_nolock (size
)
760 __malloc_size_t size
;
763 __malloc_size_t block
, blocks
, lastblocks
, start
;
764 register __malloc_size_t i
;
767 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
768 valid address you can realloc and free (though not dereference).
770 It turns out that some extant code (sunrpc, at least Ultrix's version)
771 expects `malloc (0)' to return non-NULL and breaks otherwise.
779 PROTECT_MALLOC_STATE (0);
781 if (size
< sizeof (struct list
))
782 size
= sizeof (struct list
);
784 /* Determine the allocation policy based on the request size. */
785 if (size
<= BLOCKSIZE
/ 2)
787 /* Small allocation to receive a fragment of a block.
788 Determine the logarithm to base two of the fragment size. */
789 register __malloc_size_t log
= 1;
791 while ((size
/= 2) != 0)
794 /* Look in the fragment lists for a
795 free fragment of the desired size. */
796 next
= _fraghead
[log
].next
;
799 /* There are free fragments of this size.
800 Pop a fragment out of the fragment list and return it.
801 Update the block's nfree and first counters. */
802 result
= (__ptr_t
) next
;
803 next
->prev
->next
= next
->next
;
804 if (next
->next
!= NULL
)
805 next
->next
->prev
= next
->prev
;
806 block
= BLOCK (result
);
807 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
808 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
809 ((unsigned long int) ((char *) next
->next
- (char *) NULL
)
812 /* Update the statistics. */
814 _bytes_used
+= 1 << log
;
816 _bytes_free
-= 1 << log
;
820 /* No free fragments of the desired size, so get a new block
821 and break it into fragments, returning the first. */
822 #ifdef GC_MALLOC_CHECK
823 result
= _malloc_internal_nolock (BLOCKSIZE
);
824 PROTECT_MALLOC_STATE (0);
825 #elif defined (USE_PTHREAD)
826 result
= _malloc_internal_nolock (BLOCKSIZE
);
828 result
= malloc (BLOCKSIZE
);
832 PROTECT_MALLOC_STATE (1);
836 /* Link all fragments but the first into the free list. */
837 next
= (struct list
*) ((char *) result
+ (1 << log
));
839 next
->prev
= &_fraghead
[log
];
840 _fraghead
[log
].next
= next
;
842 for (i
= 2; i
< (__malloc_size_t
) (BLOCKSIZE
>> log
); ++i
)
844 next
= (struct list
*) ((char *) result
+ (i
<< log
));
845 next
->next
= _fraghead
[log
].next
;
846 next
->prev
= &_fraghead
[log
];
847 next
->prev
->next
= next
;
848 next
->next
->prev
= next
;
851 /* Initialize the nfree and first counters for this block. */
852 block
= BLOCK (result
);
853 _heapinfo
[block
].busy
.type
= log
;
854 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
855 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
857 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
858 _bytes_free
+= BLOCKSIZE
- (1 << log
);
859 _bytes_used
-= BLOCKSIZE
- (1 << log
);
864 /* Large allocation to receive one or more blocks.
865 Search the free list in a circle starting at the last place visited.
866 If we loop completely around without finding a large enough
867 space we will have to get more memory from the system. */
868 blocks
= BLOCKIFY (size
);
869 start
= block
= _heapindex
;
870 while (_heapinfo
[block
].free
.size
< blocks
)
872 block
= _heapinfo
[block
].free
.next
;
875 /* Need to get more from the system. Get a little extra. */
876 __malloc_size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
877 block
= _heapinfo
[0].free
.prev
;
878 lastblocks
= _heapinfo
[block
].free
.size
;
879 /* Check to see if the new core will be contiguous with the
880 final free block; if so we don't need to get as much. */
881 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
882 /* We can't do this if we will have to make the heap info
883 table bigger to accommodate the new space. */
884 block
+ wantblocks
<= heapsize
&&
885 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
886 ADDRESS (block
+ lastblocks
)))
888 /* We got it contiguously. Which block we are extending
889 (the `final free block' referred to above) might have
890 changed, if it got combined with a freed info table. */
891 block
= _heapinfo
[0].free
.prev
;
892 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
893 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
894 _heaplimit
+= wantblocks
- lastblocks
;
897 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
900 block
= BLOCK (result
);
901 /* Put the new block at the end of the free list. */
902 _heapinfo
[block
].free
.size
= wantblocks
;
903 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
904 _heapinfo
[block
].free
.next
= 0;
905 _heapinfo
[0].free
.prev
= block
;
906 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
908 /* Now loop to use some of that block for this allocation. */
912 /* At this point we have found a suitable free list entry.
913 Figure out how to remove what we need from the list. */
914 result
= ADDRESS (block
);
915 if (_heapinfo
[block
].free
.size
> blocks
)
917 /* The block we found has a bit left over,
918 so relink the tail end back into the free list. */
919 _heapinfo
[block
+ blocks
].free
.size
920 = _heapinfo
[block
].free
.size
- blocks
;
921 _heapinfo
[block
+ blocks
].free
.next
922 = _heapinfo
[block
].free
.next
;
923 _heapinfo
[block
+ blocks
].free
.prev
924 = _heapinfo
[block
].free
.prev
;
925 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
926 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
927 = _heapindex
= block
+ blocks
;
931 /* The block exactly matches our requirements,
932 so just remove it from the list. */
933 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
934 = _heapinfo
[block
].free
.prev
;
935 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
936 = _heapindex
= _heapinfo
[block
].free
.next
;
940 _heapinfo
[block
].busy
.type
= 0;
941 _heapinfo
[block
].busy
.info
.size
= blocks
;
943 _bytes_used
+= blocks
* BLOCKSIZE
;
944 _bytes_free
-= blocks
* BLOCKSIZE
;
946 /* Mark all the blocks of the object just allocated except for the
947 first with a negative number so you can find the first block by
948 adding that adjustment. */
950 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
953 PROTECT_MALLOC_STATE (1);
959 _malloc_internal (size
)
960 __malloc_size_t size
;
965 result
= _malloc_internal_nolock (size
);
973 __malloc_size_t size
;
975 __ptr_t (*hook
) (__malloc_size_t
);
977 if (!__malloc_initialized
&& !__malloc_initialize ())
980 /* Copy the value of __malloc_hook to an automatic variable in case
981 __malloc_hook is modified in another thread between its
982 NULL-check and the use.
984 Note: Strictly speaking, this is not a right solution. We should
985 use mutexes to access non-read-only variables that are shared
986 among multiple threads. We just leave it for compatibility with
987 glibc malloc (i.e., assignments to __malloc_hook) for now. */
988 hook
= __malloc_hook
;
989 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
994 /* On some ANSI C systems, some libc functions call _malloc, _free
995 and _realloc. Make them use the GNU functions. */
999 __malloc_size_t size
;
1001 return malloc (size
);
1012 _realloc (ptr
, size
)
1014 __malloc_size_t size
;
1016 return realloc (ptr
, size
);
1020 /* Free a block of memory allocated by `malloc'.
1021 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1022 Written May 1989 by Mike Haertel.
1024 This library is free software; you can redistribute it and/or
1025 modify it under the terms of the GNU General Public License as
1026 published by the Free Software Foundation; either version 2 of the
1027 License, or (at your option) any later version.
1029 This library is distributed in the hope that it will be useful,
1030 but WITHOUT ANY WARRANTY; without even the implied warranty of
1031 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1032 General Public License for more details.
1034 You should have received a copy of the GNU General Public
1035 License along with this library; see the file COPYING. If
1036 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1037 Fifth Floor, Boston, MA 02110-1301, USA.
1039 The author may be reached (Email) at the address mike@ai.mit.edu,
1040 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1042 #ifndef _MALLOC_INTERNAL
1043 #define _MALLOC_INTERNAL
1048 /* Debugging hook for free. */
1049 void (*__free_hook
) PP ((__ptr_t __ptr
));
1051 /* List of blocks allocated by memalign. */
1052 struct alignlist
*_aligned_blocks
= NULL
;
1054 /* Return memory to the heap.
1055 Like `_free_internal' but don't lock mutex. */
1057 _free_internal_nolock (ptr
)
1061 __malloc_size_t block
, blocks
;
1062 register __malloc_size_t i
;
1063 struct list
*prev
, *next
;
1065 const __malloc_size_t lesscore_threshold
1066 /* Threshold of free space at which we will return some to the system. */
1067 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
1069 register struct alignlist
*l
;
1075 if (ptr
< _heapbase
)
1076 /* We're being asked to free something in the static heap. */
1080 PROTECT_MALLOC_STATE (0);
1082 LOCK_ALIGNED_BLOCKS ();
1083 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1084 if (l
->aligned
== ptr
)
1086 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1090 UNLOCK_ALIGNED_BLOCKS ();
1092 block
= BLOCK (ptr
);
1094 type
= _heapinfo
[block
].busy
.type
;
1098 /* Get as many statistics as early as we can. */
1100 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1101 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1103 /* Find the free cluster previous to this one in the free list.
1104 Start searching at the last block referenced; this may benefit
1105 programs with locality of allocation. */
1109 i
= _heapinfo
[i
].free
.prev
;
1113 i
= _heapinfo
[i
].free
.next
;
1114 while (i
> 0 && i
< block
);
1115 i
= _heapinfo
[i
].free
.prev
;
1118 /* Determine how to link this block into the free list. */
1119 if (block
== i
+ _heapinfo
[i
].free
.size
)
1121 /* Coalesce this block with its predecessor. */
1122 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1127 /* Really link this block back into the free list. */
1128 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1129 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1130 _heapinfo
[block
].free
.prev
= i
;
1131 _heapinfo
[i
].free
.next
= block
;
1132 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1136 /* Now that the block is linked in, see if we can coalesce it
1137 with its successor (by deleting its successor from the list
1138 and adding in its size). */
1139 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1141 _heapinfo
[block
].free
.size
1142 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1143 _heapinfo
[block
].free
.next
1144 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1145 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1149 /* How many trailing free blocks are there now? */
1150 blocks
= _heapinfo
[block
].free
.size
;
1152 /* Where is the current end of accessible core? */
1153 curbrk
= (*__morecore
) (0);
1155 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1157 /* The end of the malloc heap is at the end of accessible core.
1158 It's possible that moving _heapinfo will allow us to
1159 return some space to the system. */
1161 __malloc_size_t info_block
= BLOCK (_heapinfo
);
1162 __malloc_size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1163 __malloc_size_t prev_block
= _heapinfo
[block
].free
.prev
;
1164 __malloc_size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1165 __malloc_size_t next_block
= _heapinfo
[block
].free
.next
;
1166 __malloc_size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1168 if (/* Win if this block being freed is last in core, the info table
1169 is just before it, the previous free block is just before the
1170 info table, and the two free blocks together form a useful
1171 amount to return to the system. */
1172 (block
+ blocks
== _heaplimit
&&
1173 info_block
+ info_blocks
== block
&&
1174 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1175 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1176 /* Nope, not the case. We can also win if this block being
1177 freed is just before the info table, and the table extends
1178 to the end of core or is followed only by a free block,
1179 and the total free space is worth returning to the system. */
1180 (block
+ blocks
== info_block
&&
1181 ((info_block
+ info_blocks
== _heaplimit
&&
1182 blocks
>= lesscore_threshold
) ||
1183 (info_block
+ info_blocks
== next_block
&&
1184 next_block
+ next_blocks
== _heaplimit
&&
1185 blocks
+ next_blocks
>= lesscore_threshold
)))
1188 malloc_info
*newinfo
;
1189 __malloc_size_t oldlimit
= _heaplimit
;
1191 /* Free the old info table, clearing _heaplimit to avoid
1192 recursion into this code. We don't want to return the
1193 table's blocks to the system before we have copied them to
1194 the new location. */
1196 _free_internal_nolock (_heapinfo
);
1197 _heaplimit
= oldlimit
;
1199 /* Tell malloc to search from the beginning of the heap for
1200 free blocks, so it doesn't reuse the ones just freed. */
1203 /* Allocate new space for the info table and move its data. */
1204 newinfo
= (malloc_info
*) _malloc_internal_nolock (info_blocks
1206 PROTECT_MALLOC_STATE (0);
1207 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1208 _heapinfo
= newinfo
;
1210 /* We should now have coalesced the free block with the
1211 blocks freed from the old info table. Examine the entire
1212 trailing free block to decide below whether to return some
1214 block
= _heapinfo
[0].free
.prev
;
1215 blocks
= _heapinfo
[block
].free
.size
;
1218 /* Now see if we can return stuff to the system. */
1219 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1221 register __malloc_size_t bytes
= blocks
* BLOCKSIZE
;
1222 _heaplimit
-= blocks
;
1223 (*__morecore
) (-bytes
);
1224 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1225 = _heapinfo
[block
].free
.next
;
1226 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1227 = _heapinfo
[block
].free
.prev
;
1228 block
= _heapinfo
[block
].free
.prev
;
1230 _bytes_free
-= bytes
;
1234 /* Set the next search to begin at this block. */
1239 /* Do some of the statistics. */
1241 _bytes_used
-= 1 << type
;
1243 _bytes_free
+= 1 << type
;
1245 /* Get the address of the first free fragment in this block. */
1246 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1247 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1249 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1251 /* If all fragments of this block are free, remove them
1252 from the fragment list and free the whole block. */
1254 for (i
= 1; i
< (__malloc_size_t
) (BLOCKSIZE
>> type
); ++i
)
1256 prev
->prev
->next
= next
;
1258 next
->prev
= prev
->prev
;
1259 _heapinfo
[block
].busy
.type
= 0;
1260 _heapinfo
[block
].busy
.info
.size
= 1;
1262 /* Keep the statistics accurate. */
1264 _bytes_used
+= BLOCKSIZE
;
1265 _chunks_free
-= BLOCKSIZE
>> type
;
1266 _bytes_free
-= BLOCKSIZE
;
1268 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1269 _free_internal_nolock (ADDRESS (block
));
1271 free (ADDRESS (block
));
1274 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1276 /* If some fragments of this block are free, link this
1277 fragment into the fragment list after the first free
1278 fragment of this block. */
1279 next
= (struct list
*) ptr
;
1280 next
->next
= prev
->next
;
1283 if (next
->next
!= NULL
)
1284 next
->next
->prev
= next
;
1285 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1289 /* No fragments of this block are free, so link this
1290 fragment into the fragment list and announce that
1291 it is the first free fragment of this block. */
1292 prev
= (struct list
*) ptr
;
1293 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1294 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
1295 ((unsigned long int) ((char *) ptr
- (char *) NULL
)
1296 % BLOCKSIZE
>> type
);
1297 prev
->next
= _fraghead
[type
].next
;
1298 prev
->prev
= &_fraghead
[type
];
1299 prev
->prev
->next
= prev
;
1300 if (prev
->next
!= NULL
)
1301 prev
->next
->prev
= prev
;
1306 PROTECT_MALLOC_STATE (1);
1309 /* Return memory to the heap.
1310 Like `free' but don't call a __free_hook if there is one. */
1312 _free_internal (ptr
)
1316 _free_internal_nolock (ptr
);
1320 /* Return memory to the heap. */
1326 void (*hook
) (__ptr_t
) = __free_hook
;
1331 _free_internal (ptr
);
1334 /* Define the `cfree' alias for `free'. */
1336 weak_alias (free
, cfree
)
1345 /* Change the size of a block allocated by `malloc'.
1346 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1347 Written May 1989 by Mike Haertel.
1349 This library is free software; you can redistribute it and/or
1350 modify it under the terms of the GNU General Public License as
1351 published by the Free Software Foundation; either version 2 of the
1352 License, or (at your option) any later version.
1354 This library is distributed in the hope that it will be useful,
1355 but WITHOUT ANY WARRANTY; without even the implied warranty of
1356 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1357 General Public License for more details.
1359 You should have received a copy of the GNU General Public
1360 License along with this library; see the file COPYING. If
1361 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1362 Fifth Floor, Boston, MA 02110-1301, USA.
1364 The author may be reached (Email) at the address mike@ai.mit.edu,
1365 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1367 #ifndef _MALLOC_INTERNAL
1368 #define _MALLOC_INTERNAL
1373 #define min(A, B) ((A) < (B) ? (A) : (B))
1375 /* On Cygwin the dumped emacs may try to realloc storage allocated in
1376 the static heap. We just malloc space in the new heap and copy the
1380 special_realloc (ptr
, size
)
1382 __malloc_size_t size
;
1386 __malloc_size_t block
, oldsize
;
1388 block
= ((char *) ptr
- bss_sbrk_heapbase
) / BLOCKSIZE
+ 1;
1389 type
= bss_sbrk_heapinfo
[block
].busy
.type
;
1391 type
== 0 ? bss_sbrk_heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
1392 : (__malloc_size_t
) 1 << type
;
1393 result
= _malloc_internal_nolock (size
);
1395 memcpy (result
, ptr
, min (oldsize
, size
));
1400 /* Debugging hook for realloc. */
1401 __ptr_t (*__realloc_hook
) PP ((__ptr_t __ptr
, __malloc_size_t __size
));
1403 /* Resize the given region to the new size, returning a pointer
1404 to the (possibly moved) region. This is optimized for speed;
1405 some benchmarks seem to indicate that greater compactness is
1406 achieved by unconditionally allocating and copying to a
1407 new region. This module has incestuous knowledge of the
1408 internals of both free and malloc. */
1410 _realloc_internal_nolock (ptr
, size
)
1412 __malloc_size_t size
;
1416 __malloc_size_t block
, blocks
, oldlimit
;
1420 _free_internal_nolock (ptr
);
1421 return _malloc_internal_nolock (0);
1423 else if (ptr
== NULL
)
1424 return _malloc_internal_nolock (size
);
1427 if (ptr
< _heapbase
)
1428 /* ptr points into the static heap */
1429 return special_realloc (ptr
, size
);
1432 block
= BLOCK (ptr
);
1434 PROTECT_MALLOC_STATE (0);
1436 type
= _heapinfo
[block
].busy
.type
;
1440 /* Maybe reallocate a large block to a small fragment. */
1441 if (size
<= BLOCKSIZE
/ 2)
1443 result
= _malloc_internal_nolock (size
);
1446 memcpy (result
, ptr
, size
);
1447 _free_internal_nolock (ptr
);
1452 /* The new size is a large allocation as well;
1453 see if we can hold it in place. */
1454 blocks
= BLOCKIFY (size
);
1455 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1457 /* The new size is smaller; return
1458 excess memory to the free list. */
1459 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1460 _heapinfo
[block
+ blocks
].busy
.info
.size
1461 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1462 _heapinfo
[block
].busy
.info
.size
= blocks
;
1463 /* We have just created a new chunk by splitting a chunk in two.
1464 Now we will free this chunk; increment the statistics counter
1465 so it doesn't become wrong when _free_internal decrements it. */
1467 _free_internal_nolock (ADDRESS (block
+ blocks
));
1470 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1471 /* No size change necessary. */
1475 /* Won't fit, so allocate a new region that will.
1476 Free the old region first in case there is sufficient
1477 adjacent free space to grow without moving. */
1478 blocks
= _heapinfo
[block
].busy
.info
.size
;
1479 /* Prevent free from actually returning memory to the system. */
1480 oldlimit
= _heaplimit
;
1482 _free_internal_nolock (ptr
);
1483 result
= _malloc_internal_nolock (size
);
1484 PROTECT_MALLOC_STATE (0);
1485 if (_heaplimit
== 0)
1486 _heaplimit
= oldlimit
;
1489 /* Now we're really in trouble. We have to unfree
1490 the thing we just freed. Unfortunately it might
1491 have been coalesced with its neighbors. */
1492 if (_heapindex
== block
)
1493 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1497 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1498 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1499 _free_internal_nolock (previous
);
1504 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1509 /* Old size is a fragment; type is logarithm
1510 to base two of the fragment size. */
1511 if (size
> (__malloc_size_t
) (1 << (type
- 1)) &&
1512 size
<= (__malloc_size_t
) (1 << type
))
1513 /* The new size is the same kind of fragment. */
1517 /* The new size is different; allocate a new space,
1518 and copy the lesser of the new size and the old. */
1519 result
= _malloc_internal_nolock (size
);
1522 memcpy (result
, ptr
, min (size
, (__malloc_size_t
) 1 << type
));
1523 _free_internal_nolock (ptr
);
1528 PROTECT_MALLOC_STATE (1);
1534 _realloc_internal (ptr
, size
)
1536 __malloc_size_t size
;
1541 result
= _realloc_internal_nolock (ptr
, size
);
1550 __malloc_size_t size
;
1552 __ptr_t (*hook
) (__ptr_t
, __malloc_size_t
);
1554 if (!__malloc_initialized
&& !__malloc_initialize ())
1557 hook
= __realloc_hook
;
1558 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1560 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1562 This library is free software; you can redistribute it and/or
1563 modify it under the terms of the GNU General Public License as
1564 published by the Free Software Foundation; either version 2 of the
1565 License, or (at your option) any later version.
1567 This library is distributed in the hope that it will be useful,
1568 but WITHOUT ANY WARRANTY; without even the implied warranty of
1569 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1570 General Public License for more details.
1572 You should have received a copy of the GNU General Public
1573 License along with this library; see the file COPYING. If
1574 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1575 Fifth Floor, Boston, MA 02110-1301, USA.
1577 The author may be reached (Email) at the address mike@ai.mit.edu,
1578 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1580 #ifndef _MALLOC_INTERNAL
1581 #define _MALLOC_INTERNAL
1585 /* Allocate an array of NMEMB elements each SIZE bytes long.
1586 The entire array is initialized to zeros. */
1588 calloc (nmemb
, size
)
1589 register __malloc_size_t nmemb
;
1590 register __malloc_size_t size
;
1592 register __ptr_t result
= malloc (nmemb
* size
);
1595 (void) memset (result
, 0, nmemb
* size
);
1599 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1600 This file is part of the GNU C Library.
1602 The GNU C Library is free software; you can redistribute it and/or modify
1603 it under the terms of the GNU General Public License as published by
1604 the Free Software Foundation; either version 2, or (at your option)
1607 The GNU C Library is distributed in the hope that it will be useful,
1608 but WITHOUT ANY WARRANTY; without even the implied warranty of
1609 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1610 GNU General Public License for more details.
1612 You should have received a copy of the GNU General Public License
1613 along with the GNU C Library; see the file COPYING. If not, write to
1614 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1615 MA 02110-1301, USA. */
1617 #ifndef _MALLOC_INTERNAL
1618 #define _MALLOC_INTERNAL
1622 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1624 #if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
1626 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1627 /* It is best not to declare this and cast its result on foreign operating
1628 systems with potentially hostile include files. */
1631 extern __ptr_t __sbrk
PP ((ptrdiff_t increment
));
1632 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1638 /* Allocate INCREMENT more bytes of data space,
1639 and return the start of data space, or NULL on errors.
1640 If INCREMENT is negative, shrink data space. */
1642 __default_morecore (increment
)
1643 __malloc_ptrdiff_t increment
;
1647 if (!bss_sbrk_did_unexec
)
1649 return bss_sbrk (increment
);
1652 result
= (__ptr_t
) __sbrk (increment
);
1653 if (result
== (__ptr_t
) -1)
1657 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1659 This library is free software; you can redistribute it and/or
1660 modify it under the terms of the GNU General Public License as
1661 published by the Free Software Foundation; either version 2 of the
1662 License, or (at your option) any later version.
1664 This library is distributed in the hope that it will be useful,
1665 but WITHOUT ANY WARRANTY; without even the implied warranty of
1666 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1667 General Public License for more details.
1669 You should have received a copy of the GNU General Public
1670 License along with this library; see the file COPYING. If
1671 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1672 Fifth Floor, Boston, MA 02110-1301, USA. */
1674 #ifndef _MALLOC_INTERNAL
1675 #define _MALLOC_INTERNAL
1679 __ptr_t (*__memalign_hook
) PP ((__malloc_size_t __size
,
1680 __malloc_size_t __alignment
));
1683 memalign (alignment
, size
)
1684 __malloc_size_t alignment
;
1685 __malloc_size_t size
;
1688 unsigned long int adj
, lastadj
;
1689 __ptr_t (*hook
) (__malloc_size_t
, __malloc_size_t
) = __memalign_hook
;
1692 return (*hook
) (alignment
, size
);
1694 /* Allocate a block with enough extra space to pad the block with up to
1695 (ALIGNMENT - 1) bytes if necessary. */
1696 result
= malloc (size
+ alignment
- 1);
1700 /* Figure out how much we will need to pad this particular block
1701 to achieve the required alignment. */
1702 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1706 /* Reallocate the block with only as much excess as it needs. */
1708 result
= malloc (adj
+ size
);
1709 if (result
== NULL
) /* Impossible unless interrupted. */
1713 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1714 /* It's conceivable we might have been so unlucky as to get a
1715 different block with weaker alignment. If so, this block is too
1716 short to contain SIZE after alignment correction. So we must
1717 try again and get another block, slightly larger. */
1718 } while (adj
> lastadj
);
1722 /* Record this block in the list of aligned blocks, so that `free'
1723 can identify the pointer it is passed, which will be in the middle
1724 of an allocated block. */
1726 struct alignlist
*l
;
1727 LOCK_ALIGNED_BLOCKS ();
1728 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1729 if (l
->aligned
== NULL
)
1730 /* This slot is free. Use it. */
1734 l
= (struct alignlist
*) malloc (sizeof (struct alignlist
));
1737 l
->next
= _aligned_blocks
;
1738 _aligned_blocks
= l
;
1744 result
= l
->aligned
= (char *) result
+ alignment
- adj
;
1746 UNLOCK_ALIGNED_BLOCKS ();
1766 posix_memalign (memptr
, alignment
, size
)
1768 __malloc_size_t alignment
;
1769 __malloc_size_t size
;
1774 || alignment
% sizeof (__ptr_t
) != 0
1775 || (alignment
& (alignment
- 1)) != 0)
1778 mem
= memalign (alignment
, size
);
1787 /* Allocate memory on a page boundary.
1788 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1790 This library is free software; you can redistribute it and/or
1791 modify it under the terms of the GNU General Public License as
1792 published by the Free Software Foundation; either version 2 of the
1793 License, or (at your option) any later version.
1795 This library is distributed in the hope that it will be useful,
1796 but WITHOUT ANY WARRANTY; without even the implied warranty of
1797 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1798 General Public License for more details.
1800 You should have received a copy of the GNU General Public
1801 License along with this library; see the file COPYING. If
1802 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1803 Fifth Floor, Boston, MA 02110-1301, USA.
1805 The author may be reached (Email) at the address mike@ai.mit.edu,
1806 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1808 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1810 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1811 on MSDOS, where it conflicts with a system header file. */
1813 #define ELIDE_VALLOC
1817 #ifndef ELIDE_VALLOC
1819 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1821 #include <sys/cdefs.h>
1822 #if defined (__GLIBC__) && __GLIBC__ >= 2
1823 /* __getpagesize is already declared in <unistd.h> with return type int */
1825 extern size_t __getpagesize
PP ((void));
1828 #include "getpagesize.h"
1829 #define __getpagesize() getpagesize()
1832 #ifndef _MALLOC_INTERNAL
1833 #define _MALLOC_INTERNAL
1837 static __malloc_size_t pagesize
;
1841 __malloc_size_t size
;
1844 pagesize
= __getpagesize ();
1846 return memalign (pagesize
, size
);
1849 #endif /* Not ELIDE_VALLOC. */
1853 /* Standard debugging hooks for `malloc'.
1854 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1855 Written May 1989 by Mike Haertel.
1857 This library is free software; you can redistribute it and/or
1858 modify it under the terms of the GNU General Public License as
1859 published by the Free Software Foundation; either version 2 of the
1860 License, or (at your option) any later version.
1862 This library is distributed in the hope that it will be useful,
1863 but WITHOUT ANY WARRANTY; without even the implied warranty of
1864 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1865 General Public License for more details.
1867 You should have received a copy of the GNU General Public
1868 License along with this library; see the file COPYING. If
1869 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1870 Fifth Floor, Boston, MA 02110-1301, USA.
1872 The author may be reached (Email) at the address mike@ai.mit.edu,
1873 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1878 #ifndef _MALLOC_INTERNAL
1879 #define _MALLOC_INTERNAL
1885 /* Old hook values. */
1886 static void (*old_free_hook
) (__ptr_t ptr
);
1887 static __ptr_t (*old_malloc_hook
) (__malloc_size_t size
);
1888 static __ptr_t (*old_realloc_hook
) (__ptr_t ptr
, __malloc_size_t size
);
1890 /* Function to call when something awful happens. */
1891 static void (*abortfunc
) (enum mcheck_status
);
1893 /* Arbitrary magical numbers. */
1894 #define MAGICWORD 0xfedabeeb
1895 #define MAGICFREE 0xd8675309
1896 #define MAGICBYTE ((char) 0xd7)
1897 #define MALLOCFLOOD ((char) 0x93)
1898 #define FREEFLOOD ((char) 0x95)
1902 __malloc_size_t size
; /* Exact size requested by user. */
1903 unsigned long int magic
; /* Magic number to check header integrity. */
1906 static enum mcheck_status
checkhdr (const struct hdr
*);
1907 static enum mcheck_status
1909 const struct hdr
*hdr
;
1911 enum mcheck_status status
;
1915 status
= MCHECK_HEAD
;
1918 status
= MCHECK_FREE
;
1921 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1922 status
= MCHECK_TAIL
;
1927 if (status
!= MCHECK_OK
)
1928 (*abortfunc
) (status
);
1932 static void freehook (__ptr_t
);
1941 hdr
= ((struct hdr
*) ptr
) - 1;
1943 hdr
->magic
= MAGICFREE
;
1944 memset (ptr
, FREEFLOOD
, hdr
->size
);
1949 __free_hook
= old_free_hook
;
1951 __free_hook
= freehook
;
1954 static __ptr_t
mallochook (__malloc_size_t
);
1957 __malloc_size_t size
;
1961 __malloc_hook
= old_malloc_hook
;
1962 hdr
= (struct hdr
*) malloc (sizeof (struct hdr
) + size
+ 1);
1963 __malloc_hook
= mallochook
;
1968 hdr
->magic
= MAGICWORD
;
1969 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1970 memset ((__ptr_t
) (hdr
+ 1), MALLOCFLOOD
, size
);
1971 return (__ptr_t
) (hdr
+ 1);
1974 static __ptr_t
reallochook (__ptr_t
, __malloc_size_t
);
1976 reallochook (ptr
, size
)
1978 __malloc_size_t size
;
1980 struct hdr
*hdr
= NULL
;
1981 __malloc_size_t osize
= 0;
1985 hdr
= ((struct hdr
*) ptr
) - 1;
1990 memset ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
1993 __free_hook
= old_free_hook
;
1994 __malloc_hook
= old_malloc_hook
;
1995 __realloc_hook
= old_realloc_hook
;
1996 hdr
= (struct hdr
*) realloc ((__ptr_t
) hdr
, sizeof (struct hdr
) + size
+ 1);
1997 __free_hook
= freehook
;
1998 __malloc_hook
= mallochook
;
1999 __realloc_hook
= reallochook
;
2004 hdr
->magic
= MAGICWORD
;
2005 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
2007 memset ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
2008 return (__ptr_t
) (hdr
+ 1);
2013 enum mcheck_status status
;
2019 msg
= "memory is consistent, library is buggy";
2022 msg
= "memory clobbered before allocated block";
2025 msg
= "memory clobbered past end of allocated block";
2028 msg
= "block freed twice";
2031 msg
= "bogus mcheck_status, library is buggy";
2034 #ifdef __GNU_LIBRARY__
2037 fprintf (stderr
, "mcheck: %s\n", msg
);
2043 static int mcheck_used
= 0;
2047 void (*func
) (enum mcheck_status
);
2049 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
2051 /* These hooks may not be safely inserted if malloc is already in use. */
2052 if (!__malloc_initialized
&& !mcheck_used
)
2054 old_free_hook
= __free_hook
;
2055 __free_hook
= freehook
;
2056 old_malloc_hook
= __malloc_hook
;
2057 __malloc_hook
= mallochook
;
2058 old_realloc_hook
= __realloc_hook
;
2059 __realloc_hook
= reallochook
;
2063 return mcheck_used
? 0 : -1;
2067 mprobe (__ptr_t ptr
)
2069 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
2072 #endif /* GC_MCHECK */