Bump version to 24.2
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f
KH
1/* This file is no longer automatically generated from libc. */
2
3#define _MALLOC_INTERNAL
4
5/* The malloc headers and source files from the C library follow here. */
6
7/* Declarations for `malloc' and friends.
0b5538bd 8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
4e6835db 9 2005, 2006, 2007 Free Software Foundation, Inc.
74ad5c7f
KH
10 Written May 1989 by Mike Haertel.
11
12This library is free software; you can redistribute it and/or
423a1f3c 13modify it under the terms of the GNU General Public License as
74ad5c7f
KH
14published by the Free Software Foundation; either version 2 of the
15License, or (at your option) any later version.
16
17This library is distributed in the hope that it will be useful,
18but WITHOUT ANY WARRANTY; without even the implied warranty of
19MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 20General Public License for more details.
74ad5c7f 21
423a1f3c
JB
22You should have received a copy of the GNU General Public
23License along with this library; see the file COPYING. If
3ef97fb6
LK
24not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
26
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
29
30#ifndef _MALLOC_H
31
32#define _MALLOC_H 1
33
34#ifdef _MALLOC_INTERNAL
35
36#ifdef HAVE_CONFIG_H
37#include <config.h>
38#endif
39
ae9e757a 40#ifdef HAVE_PTHREAD
8d0d84d2
YM
41#define USE_PTHREAD
42#endif
43
0a27e8ed
RS
44#undef PP
45#define PP(args) args
74ad5c7f
KH
46#undef __ptr_t
47#define __ptr_t void *
74ad5c7f 48
74ad5c7f 49#include <string.h>
74ad5c7f 50#include <limits.h>
74ad5c7f 51#include <unistd.h>
74ad5c7f 52
2f213514
YM
53#ifdef USE_PTHREAD
54#include <pthread.h>
55#endif
56
74ad5c7f
KH
57#endif /* _MALLOC_INTERNAL. */
58
59
60#ifdef __cplusplus
61extern "C"
62{
63#endif
64
74ad5c7f
KH
65#include <stddef.h>
66#define __malloc_size_t size_t
67#define __malloc_ptrdiff_t ptrdiff_t
74ad5c7f
KH
68
69
70/* Allocate SIZE bytes of memory. */
0a27e8ed 71extern __ptr_t malloc PP ((__malloc_size_t __size));
74ad5c7f
KH
72/* Re-allocate the previously allocated block
73 in __ptr_t, making the new block SIZE bytes long. */
0a27e8ed 74extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f 75/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
0a27e8ed 76extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
74ad5c7f 77/* Free a block allocated by `malloc', `realloc' or `calloc'. */
4624371d 78extern void free PP ((__ptr_t __ptr));
74ad5c7f
KH
79
80/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
ed68db4d 81#if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
0a27e8ed
RS
82extern __ptr_t memalign PP ((__malloc_size_t __alignment,
83 __malloc_size_t __size));
72359c32
YM
84extern int posix_memalign PP ((__ptr_t *, __malloc_size_t,
85 __malloc_size_t size));
74ad5c7f
KH
86#endif
87
88/* Allocate SIZE bytes on a page boundary. */
89#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
0a27e8ed 90extern __ptr_t valloc PP ((__malloc_size_t __size));
74ad5c7f
KH
91#endif
92
3ceeb306
YM
93#ifdef USE_PTHREAD
94/* Set up mutexes and make malloc etc. thread-safe. */
95extern void malloc_enable_thread PP ((void));
96#endif
74ad5c7f
KH
97
98#ifdef _MALLOC_INTERNAL
99
100/* The allocator divides the heap into blocks of fixed size; large
101 requests receive one or more whole blocks, and small requests
102 receive a fragment of a block. Fragment sizes are powers of two,
103 and all fragments of a block are the same size. When all the
104 fragments in a block have been freed, the block itself is freed. */
5e617bc2 105#define INT_BIT (CHAR_BIT * sizeof (int))
74ad5c7f
KH
106#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
107#define BLOCKSIZE (1 << BLOCKLOG)
108#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
109
110/* Determine the amount of memory spanned by the initial heap table
111 (not an absolute limit). */
112#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
113
114/* Number of contiguous free blocks allowed to build up at the end of
115 memory before they will be returned to the system. */
116#define FINAL_FREE_BLOCKS 8
117
118/* Data structure giving per-block information. */
119typedef union
120 {
121 /* Heap information for a busy block. */
122 struct
123 {
124 /* Zero for a large (multiblock) object, or positive giving the
125 logarithm to the base two of the fragment size. */
126 int type;
127 union
128 {
129 struct
130 {
131 __malloc_size_t nfree; /* Free frags in a fragmented block. */
132 __malloc_size_t first; /* First free fragment of the block. */
133 } frag;
134 /* For a large object, in its first block, this has the number
135 of blocks in the object. In the other blocks, this has a
136 negative number which says how far back the first block is. */
137 __malloc_ptrdiff_t size;
138 } info;
139 } busy;
140 /* Heap information for a free block
141 (that may be the first of a free cluster). */
142 struct
143 {
144 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
145 __malloc_size_t next; /* Index of next free cluster. */
146 __malloc_size_t prev; /* Index of previous free cluster. */
147 } free;
148 } malloc_info;
149
150/* Pointer to first block of the heap. */
151extern char *_heapbase;
152
153/* Table indexed by block number giving per-block information. */
154extern malloc_info *_heapinfo;
155
156/* Address to block number and vice versa. */
157#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
158#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
159
160/* Current search index for the heap table. */
161extern __malloc_size_t _heapindex;
162
163/* Limit of valid info table indices. */
164extern __malloc_size_t _heaplimit;
165
166/* Doubly linked lists of free fragments. */
167struct list
168 {
169 struct list *next;
170 struct list *prev;
171 };
172
173/* Free list headers for each fragment size. */
174extern struct list _fraghead[];
175
176/* List of blocks allocated with `memalign' (or `valloc'). */
177struct alignlist
178 {
179 struct alignlist *next;
180 __ptr_t aligned; /* The address that memaligned returned. */
181 __ptr_t exact; /* The address that malloc returned. */
182 };
183extern struct alignlist *_aligned_blocks;
184
185/* Instrumentation. */
186extern __malloc_size_t _chunks_used;
187extern __malloc_size_t _bytes_used;
188extern __malloc_size_t _chunks_free;
189extern __malloc_size_t _bytes_free;
190
191/* Internal versions of `malloc', `realloc', and `free'
192 used when these functions need to call each other.
193 They are the same but don't call the hooks. */
0a27e8ed
RS
194extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
195extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
196extern void _free_internal PP ((__ptr_t __ptr));
8d0d84d2
YM
197extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
198extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
199extern void _free_internal_nolock PP ((__ptr_t __ptr));
74ad5c7f 200
2f213514 201#ifdef USE_PTHREAD
8d0d84d2 202extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
3ceeb306
YM
203extern int _malloc_thread_enabled_p;
204#define LOCK() \
205 do { \
206 if (_malloc_thread_enabled_p) \
207 pthread_mutex_lock (&_malloc_mutex); \
208 } while (0)
209#define UNLOCK() \
210 do { \
211 if (_malloc_thread_enabled_p) \
212 pthread_mutex_unlock (&_malloc_mutex); \
213 } while (0)
214#define LOCK_ALIGNED_BLOCKS() \
215 do { \
216 if (_malloc_thread_enabled_p) \
217 pthread_mutex_lock (&_aligned_blocks_mutex); \
218 } while (0)
219#define UNLOCK_ALIGNED_BLOCKS() \
220 do { \
221 if (_malloc_thread_enabled_p) \
222 pthread_mutex_unlock (&_aligned_blocks_mutex); \
223 } while (0)
2f213514
YM
224#else
225#define LOCK()
226#define UNLOCK()
8d0d84d2
YM
227#define LOCK_ALIGNED_BLOCKS()
228#define UNLOCK_ALIGNED_BLOCKS()
2f213514
YM
229#endif
230
74ad5c7f
KH
231#endif /* _MALLOC_INTERNAL. */
232
233/* Given an address in the middle of a malloc'd object,
234 return the address of the beginning of the object. */
0a27e8ed 235extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
74ad5c7f
KH
236
237/* Underlying allocation function; successive calls should
238 return contiguous pieces of memory. */
0a27e8ed 239extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
240
241/* Default value of `__morecore'. */
0a27e8ed 242extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
243
244/* If not NULL, this function is called after each time
245 `__morecore' is called to increase the data size. */
0a27e8ed 246extern void (*__after_morecore_hook) PP ((void));
74ad5c7f
KH
247
248/* Number of extra blocks to get each time we ask for more core.
249 This reduces the frequency of calling `(*__morecore)'. */
250extern __malloc_size_t __malloc_extra_blocks;
251
252/* Nonzero if `malloc' has been called and done its initialization. */
253extern int __malloc_initialized;
254/* Function called to initialize malloc data structures. */
0a27e8ed 255extern int __malloc_initialize PP ((void));
74ad5c7f
KH
256
257/* Hooks for debugging versions. */
0a27e8ed
RS
258extern void (*__malloc_initialize_hook) PP ((void));
259extern void (*__free_hook) PP ((__ptr_t __ptr));
260extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
261extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
262extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
263 __malloc_size_t __alignment));
74ad5c7f
KH
264
265/* Return values for `mprobe': these are the kinds of inconsistencies that
266 `mcheck' enables detection of. */
267enum mcheck_status
268 {
269 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
270 MCHECK_OK, /* Block is fine. */
271 MCHECK_FREE, /* Block freed twice. */
272 MCHECK_HEAD, /* Memory before the block was clobbered. */
273 MCHECK_TAIL /* Memory after the block was clobbered. */
274 };
275
276/* Activate a standard collection of debugging hooks. This must be called
277 before `malloc' is ever called. ABORTFUNC is called with an error code
278 (see enum above) when an inconsistency is detected. If ABORTFUNC is
279 null, the standard function prints on stderr and then calls `abort'. */
0a27e8ed 280extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
74ad5c7f
KH
281
282/* Check for aberrations in a particular malloc'd block. You must have
283 called `mcheck' already. These are the same checks that `mcheck' does
284 when you free or reallocate a block. */
0a27e8ed 285extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
74ad5c7f
KH
286
287/* Activate a standard collection of tracing hooks. */
0a27e8ed
RS
288extern void mtrace PP ((void));
289extern void muntrace PP ((void));
74ad5c7f
KH
290
291/* Statistics available to the user. */
292struct mstats
293 {
294 __malloc_size_t bytes_total; /* Total size of the heap. */
295 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
296 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
297 __malloc_size_t chunks_free; /* Chunks in the free list. */
298 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
299 };
300
301/* Pick up the current statistics. */
0a27e8ed 302extern struct mstats mstats PP ((void));
74ad5c7f
KH
303
304/* Call WARNFUN with a warning message when memory usage is high. */
0a27e8ed
RS
305extern void memory_warnings PP ((__ptr_t __start,
306 void (*__warnfun) PP ((const char *))));
74ad5c7f
KH
307
308
309/* Relocating allocator. */
310
311/* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
0a27e8ed 312extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
313
314/* Free the storage allocated in HANDLEPTR. */
0a27e8ed 315extern void r_alloc_free PP ((__ptr_t *__handleptr));
74ad5c7f
KH
316
317/* Adjust the block at HANDLEPTR to be SIZE bytes long. */
0a27e8ed 318extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
319
320
321#ifdef __cplusplus
322}
323#endif
324
325#endif /* malloc.h */
326/* Memory allocator `malloc'.
327 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
328 Written May 1989 by Mike Haertel.
329
330This library is free software; you can redistribute it and/or
423a1f3c 331modify it under the terms of the GNU General Public License as
74ad5c7f
KH
332published by the Free Software Foundation; either version 2 of the
333License, or (at your option) any later version.
334
335This library is distributed in the hope that it will be useful,
336but WITHOUT ANY WARRANTY; without even the implied warranty of
337MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 338General Public License for more details.
74ad5c7f 339
423a1f3c
JB
340You should have received a copy of the GNU General Public
341License along with this library; see the file COPYING. If
3ef97fb6
LK
342not, write to the Free Software Foundation, Inc., 51 Franklin Street,
343Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
344
345 The author may be reached (Email) at the address mike@ai.mit.edu,
346 or (US mail) as Mike Haertel c/o Free Software Foundation. */
347
348#ifndef _MALLOC_INTERNAL
349#define _MALLOC_INTERNAL
350#include <malloc.h>
351#endif
352#include <errno.h>
353
a4579d33
KB
354/* On Cygwin there are two heaps. temacs uses the static heap
355 (defined in sheap.c and managed with bss_sbrk), and the dumped
356 emacs uses the Cygwin heap (managed with sbrk). When emacs starts
357 on Cygwin, it reinitializes malloc, and we save the old info for
358 use by free and realloc if they're called with a pointer into the
db76dd85
KB
359 static heap.
360
361 Currently (2011-08-16) the Cygwin build doesn't use ralloc.c; if
362 this is changed in the future, we'll have to similarly deal with
363 reinitializing ralloc. */
a4579d33 364#ifdef CYGWIN
ef6d1039
SM
365extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
366extern int bss_sbrk_did_unexec;
a4579d33
KB
367char *bss_sbrk_heapbase; /* _heapbase for static heap */
368malloc_info *bss_sbrk_heapinfo; /* _heapinfo for static heap */
ef6d1039 369#endif
3cacba85 370__ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
74ad5c7f
KH
371
372/* Debugging hook for `malloc'. */
0a27e8ed 373__ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
74ad5c7f
KH
374
375/* Pointer to the base of the first block. */
376char *_heapbase;
377
378/* Block information table. Allocated with align/__free (not malloc/free). */
379malloc_info *_heapinfo;
380
381/* Number of info entries. */
382static __malloc_size_t heapsize;
383
384/* Search index in the info table. */
385__malloc_size_t _heapindex;
386
387/* Limit of valid info table indices. */
388__malloc_size_t _heaplimit;
389
390/* Free lists for each fragment size. */
391struct list _fraghead[BLOCKLOG];
392
393/* Instrumentation. */
394__malloc_size_t _chunks_used;
395__malloc_size_t _bytes_used;
396__malloc_size_t _chunks_free;
397__malloc_size_t _bytes_free;
398
399/* Are you experienced? */
400int __malloc_initialized;
401
402__malloc_size_t __malloc_extra_blocks;
403
0a27e8ed
RS
404void (*__malloc_initialize_hook) PP ((void));
405void (*__after_morecore_hook) PP ((void));
74ad5c7f 406
5dcab13e
GM
407#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
408
409/* Some code for hunting a bug writing into _heapinfo.
410
411 Call this macro with argument PROT non-zero to protect internal
412 malloc state against writing to it, call it with a zero argument to
413 make it readable and writable.
414
415 Note that this only works if BLOCKSIZE == page size, which is
416 the case on the i386. */
417
418#include <sys/types.h>
419#include <sys/mman.h>
420
421static int state_protected_p;
422static __malloc_size_t last_state_size;
423static malloc_info *last_heapinfo;
424
425void
426protect_malloc_state (protect_p)
427 int protect_p;
428{
429 /* If _heapinfo has been relocated, make sure its old location
430 isn't left read-only; it will be reused by malloc. */
431 if (_heapinfo != last_heapinfo
432 && last_heapinfo
433 && state_protected_p)
434 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
435
436 last_state_size = _heaplimit * sizeof *_heapinfo;
437 last_heapinfo = _heapinfo;
177c0ea7 438
5dcab13e
GM
439 if (protect_p != state_protected_p)
440 {
441 state_protected_p = protect_p;
442 if (mprotect (_heapinfo, last_state_size,
443 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
444 abort ();
445 }
446}
447
5e617bc2 448#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
5dcab13e
GM
449
450#else
451#define PROTECT_MALLOC_STATE(PROT) /* empty */
452#endif
453
74ad5c7f
KH
454
455/* Aligned allocation. */
0a27e8ed 456static __ptr_t align PP ((__malloc_size_t));
74ad5c7f
KH
457static __ptr_t
458align (size)
459 __malloc_size_t size;
460{
461 __ptr_t result;
462 unsigned long int adj;
463
ceeb3d7d
EZ
464 /* align accepts an unsigned argument, but __morecore accepts a
465 signed one. This could lead to trouble if SIZE overflows a
466 signed int type accepted by __morecore. We just punt in that
467 case, since they are requesting a ludicrous amount anyway. */
468 if ((__malloc_ptrdiff_t)size < 0)
469 result = 0;
470 else
471 result = (*__morecore) (size);
74ad5c7f
KH
472 adj = (unsigned long int) ((unsigned long int) ((char *) result -
473 (char *) NULL)) % BLOCKSIZE;
474 if (adj != 0)
475 {
476 __ptr_t new;
477 adj = BLOCKSIZE - adj;
478 new = (*__morecore) (adj);
479 result = (char *) result + adj;
480 }
481
482 if (__after_morecore_hook)
483 (*__after_morecore_hook) ();
484
485 return result;
486}
487
488/* Get SIZE bytes, if we can get them starting at END.
489 Return the address of the space we got.
490 If we cannot get space at END, fail and return 0. */
0a27e8ed 491static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
74ad5c7f
KH
492static __ptr_t
493get_contiguous_space (size, position)
494 __malloc_ptrdiff_t size;
495 __ptr_t position;
496{
497 __ptr_t before;
498 __ptr_t after;
499
500 before = (*__morecore) (0);
501 /* If we can tell in advance that the break is at the wrong place,
502 fail now. */
503 if (before != position)
504 return 0;
505
506 /* Allocate SIZE bytes and get the address of them. */
507 after = (*__morecore) (size);
508 if (!after)
509 return 0;
510
511 /* It was not contiguous--reject it. */
512 if (after != position)
513 {
514 (*__morecore) (- size);
515 return 0;
516 }
517
518 return after;
519}
520
521
522/* This is called when `_heapinfo' and `heapsize' have just
523 been set to describe a new info table. Set up the table
524 to describe itself and account for it in the statistics. */
55d4c1b2
PE
525static inline void
526register_heapinfo (void)
74ad5c7f
KH
527{
528 __malloc_size_t block, blocks;
529
530 block = BLOCK (_heapinfo);
531 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
532
533 /* Account for the _heapinfo block itself in the statistics. */
534 _bytes_used += blocks * BLOCKSIZE;
535 ++_chunks_used;
536
537 /* Describe the heapinfo block itself in the heapinfo. */
538 _heapinfo[block].busy.type = 0;
539 _heapinfo[block].busy.info.size = blocks;
540 /* Leave back-pointers for malloc_find_address. */
541 while (--blocks > 0)
542 _heapinfo[block + blocks].busy.info.size = -blocks;
543}
544
2f213514 545#ifdef USE_PTHREAD
8d0d84d2
YM
546pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
547pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
3ceeb306
YM
548int _malloc_thread_enabled_p;
549
550static void
551malloc_atfork_handler_prepare ()
552{
553 LOCK ();
554 LOCK_ALIGNED_BLOCKS ();
555}
556
557static void
558malloc_atfork_handler_parent ()
559{
560 UNLOCK_ALIGNED_BLOCKS ();
561 UNLOCK ();
562}
563
564static void
565malloc_atfork_handler_child ()
566{
567 UNLOCK_ALIGNED_BLOCKS ();
568 UNLOCK ();
569}
570
571/* Set up mutexes and make malloc etc. thread-safe. */
572void
573malloc_enable_thread ()
574{
575 if (_malloc_thread_enabled_p)
576 return;
577
578 /* Some pthread implementations call malloc for statically
579 initialized mutexes when they are used first. To avoid such a
580 situation, we initialize mutexes here while their use is
581 disabled in malloc etc. */
582 pthread_mutex_init (&_malloc_mutex, NULL);
583 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
584 pthread_atfork (malloc_atfork_handler_prepare,
585 malloc_atfork_handler_parent,
586 malloc_atfork_handler_child);
587 _malloc_thread_enabled_p = 1;
588}
2f213514 589#endif
74ad5c7f 590
2f213514
YM
591static void
592malloc_initialize_1 ()
593{
a3ba27da
GM
594#ifdef GC_MCHECK
595 mcheck (NULL);
596#endif
597
a4579d33
KB
598#ifdef CYGWIN
599 if (bss_sbrk_did_unexec)
600 /* we're reinitializing the dumped emacs */
601 {
602 bss_sbrk_heapbase = _heapbase;
603 bss_sbrk_heapinfo = _heapinfo;
604 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
605 }
606#endif
607
74ad5c7f
KH
608 if (__malloc_initialize_hook)
609 (*__malloc_initialize_hook) ();
610
611 heapsize = HEAP / BLOCKSIZE;
612 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
613 if (_heapinfo == NULL)
2f213514 614 return;
74ad5c7f
KH
615 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
616 _heapinfo[0].free.size = 0;
617 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
618 _heapindex = 0;
619 _heapbase = (char *) _heapinfo;
620 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
621
622 register_heapinfo ();
623
624 __malloc_initialized = 1;
5dcab13e 625 PROTECT_MALLOC_STATE (1);
2f213514
YM
626 return;
627}
628
784c1472
JD
629/* Set everything up and remember that we have.
630 main will call malloc which calls this function. That is before any threads
631 or signal handlers has been set up, so we don't need thread protection. */
2f213514
YM
632int
633__malloc_initialize ()
634{
2f213514
YM
635 if (__malloc_initialized)
636 return 0;
637
638 malloc_initialize_1 ();
2f213514
YM
639
640 return __malloc_initialized;
74ad5c7f
KH
641}
642
643static int morecore_recursing;
644
645/* Get neatly aligned memory, initializing or
646 growing the heap info table as necessary. */
8d0d84d2 647static __ptr_t morecore_nolock PP ((__malloc_size_t));
74ad5c7f 648static __ptr_t
8d0d84d2 649morecore_nolock (size)
74ad5c7f
KH
650 __malloc_size_t size;
651{
652 __ptr_t result;
653 malloc_info *newinfo, *oldinfo;
654 __malloc_size_t newsize;
655
656 if (morecore_recursing)
657 /* Avoid recursion. The caller will know how to handle a null return. */
658 return NULL;
659
660 result = align (size);
661 if (result == NULL)
662 return NULL;
663
5dcab13e
GM
664 PROTECT_MALLOC_STATE (0);
665
74ad5c7f
KH
666 /* Check if we need to grow the info table. */
667 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
668 {
669 /* Calculate the new _heapinfo table size. We do not account for the
670 added blocks in the table itself, as we hope to place them in
671 existing free space, which is already covered by part of the
672 existing table. */
673 newsize = heapsize;
674 do
675 newsize *= 2;
676 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
677
678 /* We must not reuse existing core for the new info table when called
679 from realloc in the case of growing a large block, because the
680 block being grown is momentarily marked as free. In this case
681 _heaplimit is zero so we know not to reuse space for internal
682 allocation. */
683 if (_heaplimit != 0)
684 {
685 /* First try to allocate the new info table in core we already
686 have, in the usual way using realloc. If realloc cannot
687 extend it in place or relocate it to existing sufficient core,
688 we will get called again, and the code above will notice the
689 `morecore_recursing' flag and return null. */
690 int save = errno; /* Don't want to clobber errno with ENOMEM. */
691 morecore_recursing = 1;
8d0d84d2 692 newinfo = (malloc_info *) _realloc_internal_nolock
74ad5c7f
KH
693 (_heapinfo, newsize * sizeof (malloc_info));
694 morecore_recursing = 0;
695 if (newinfo == NULL)
696 errno = save;
697 else
698 {
699 /* We found some space in core, and realloc has put the old
700 table's blocks on the free list. Now zero the new part
701 of the table and install the new table location. */
702 memset (&newinfo[heapsize], 0,
703 (newsize - heapsize) * sizeof (malloc_info));
704 _heapinfo = newinfo;
705 heapsize = newsize;
706 goto got_heap;
707 }
708 }
709
710 /* Allocate new space for the malloc info table. */
711 while (1)
712 {
713 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
714
715 /* Did it fail? */
716 if (newinfo == NULL)
717 {
718 (*__morecore) (-size);
719 return NULL;
720 }
721
722 /* Is it big enough to record status for its own space?
723 If so, we win. */
724 if ((__malloc_size_t) BLOCK ((char *) newinfo
725 + newsize * sizeof (malloc_info))
726 < newsize)
727 break;
728
729 /* Must try again. First give back most of what we just got. */
730 (*__morecore) (- newsize * sizeof (malloc_info));
731 newsize *= 2;
732 }
733
734 /* Copy the old table to the beginning of the new,
735 and zero the rest of the new table. */
736 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
737 memset (&newinfo[heapsize], 0,
738 (newsize - heapsize) * sizeof (malloc_info));
739 oldinfo = _heapinfo;
740 _heapinfo = newinfo;
741 heapsize = newsize;
742
743 register_heapinfo ();
744
745 /* Reset _heaplimit so _free_internal never decides
746 it can relocate or resize the info table. */
747 _heaplimit = 0;
8d0d84d2 748 _free_internal_nolock (oldinfo);
5dcab13e 749 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
750
751 /* The new heap limit includes the new table just allocated. */
752 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
753 return result;
754 }
755
756 got_heap:
757 _heaplimit = BLOCK ((char *) result + size);
758 return result;
759}
760
761/* Allocate memory from the heap. */
762__ptr_t
8d0d84d2 763_malloc_internal_nolock (size)
74ad5c7f
KH
764 __malloc_size_t size;
765{
766 __ptr_t result;
767 __malloc_size_t block, blocks, lastblocks, start;
768 register __malloc_size_t i;
769 struct list *next;
770
771 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
772 valid address you can realloc and free (though not dereference).
773
774 It turns out that some extant code (sunrpc, at least Ultrix's version)
775 expects `malloc (0)' to return non-NULL and breaks otherwise.
776 Be compatible. */
777
778#if 0
779 if (size == 0)
780 return NULL;
781#endif
782
5dcab13e
GM
783 PROTECT_MALLOC_STATE (0);
784
74ad5c7f
KH
785 if (size < sizeof (struct list))
786 size = sizeof (struct list);
787
74ad5c7f
KH
788 /* Determine the allocation policy based on the request size. */
789 if (size <= BLOCKSIZE / 2)
790 {
791 /* Small allocation to receive a fragment of a block.
792 Determine the logarithm to base two of the fragment size. */
793 register __malloc_size_t log = 1;
794 --size;
795 while ((size /= 2) != 0)
796 ++log;
797
798 /* Look in the fragment lists for a
799 free fragment of the desired size. */
800 next = _fraghead[log].next;
801 if (next != NULL)
802 {
803 /* There are free fragments of this size.
804 Pop a fragment out of the fragment list and return it.
805 Update the block's nfree and first counters. */
806 result = (__ptr_t) next;
807 next->prev->next = next->next;
808 if (next->next != NULL)
809 next->next->prev = next->prev;
810 block = BLOCK (result);
811 if (--_heapinfo[block].busy.info.frag.nfree != 0)
812 _heapinfo[block].busy.info.frag.first = (unsigned long int)
813 ((unsigned long int) ((char *) next->next - (char *) NULL)
814 % BLOCKSIZE) >> log;
815
816 /* Update the statistics. */
817 ++_chunks_used;
818 _bytes_used += 1 << log;
819 --_chunks_free;
820 _bytes_free -= 1 << log;
821 }
822 else
823 {
824 /* No free fragments of the desired size, so get a new block
825 and break it into fragments, returning the first. */
8094989b 826#ifdef GC_MALLOC_CHECK
8d0d84d2 827 result = _malloc_internal_nolock (BLOCKSIZE);
5dcab13e 828 PROTECT_MALLOC_STATE (0);
8d0d84d2
YM
829#elif defined (USE_PTHREAD)
830 result = _malloc_internal_nolock (BLOCKSIZE);
8094989b 831#else
74ad5c7f 832 result = malloc (BLOCKSIZE);
8094989b 833#endif
74ad5c7f 834 if (result == NULL)
5dcab13e
GM
835 {
836 PROTECT_MALLOC_STATE (1);
2f213514 837 goto out;
5dcab13e 838 }
74ad5c7f
KH
839
840 /* Link all fragments but the first into the free list. */
841 next = (struct list *) ((char *) result + (1 << log));
842 next->next = NULL;
843 next->prev = &_fraghead[log];
844 _fraghead[log].next = next;
845
846 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
847 {
848 next = (struct list *) ((char *) result + (i << log));
849 next->next = _fraghead[log].next;
850 next->prev = &_fraghead[log];
851 next->prev->next = next;
852 next->next->prev = next;
853 }
854
855 /* Initialize the nfree and first counters for this block. */
856 block = BLOCK (result);
857 _heapinfo[block].busy.type = log;
858 _heapinfo[block].busy.info.frag.nfree = i - 1;
859 _heapinfo[block].busy.info.frag.first = i - 1;
860
861 _chunks_free += (BLOCKSIZE >> log) - 1;
862 _bytes_free += BLOCKSIZE - (1 << log);
863 _bytes_used -= BLOCKSIZE - (1 << log);
864 }
865 }
866 else
867 {
868 /* Large allocation to receive one or more blocks.
869 Search the free list in a circle starting at the last place visited.
870 If we loop completely around without finding a large enough
871 space we will have to get more memory from the system. */
872 blocks = BLOCKIFY (size);
873 start = block = _heapindex;
874 while (_heapinfo[block].free.size < blocks)
875 {
876 block = _heapinfo[block].free.next;
877 if (block == start)
878 {
879 /* Need to get more from the system. Get a little extra. */
880 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
881 block = _heapinfo[0].free.prev;
882 lastblocks = _heapinfo[block].free.size;
883 /* Check to see if the new core will be contiguous with the
884 final free block; if so we don't need to get as much. */
885 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
886 /* We can't do this if we will have to make the heap info
cc4a96c6 887 table bigger to accommodate the new space. */
74ad5c7f
KH
888 block + wantblocks <= heapsize &&
889 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
890 ADDRESS (block + lastblocks)))
891 {
892 /* We got it contiguously. Which block we are extending
893 (the `final free block' referred to above) might have
894 changed, if it got combined with a freed info table. */
895 block = _heapinfo[0].free.prev;
896 _heapinfo[block].free.size += (wantblocks - lastblocks);
897 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
898 _heaplimit += wantblocks - lastblocks;
899 continue;
900 }
8d0d84d2 901 result = morecore_nolock (wantblocks * BLOCKSIZE);
74ad5c7f 902 if (result == NULL)
2f213514 903 goto out;
74ad5c7f
KH
904 block = BLOCK (result);
905 /* Put the new block at the end of the free list. */
906 _heapinfo[block].free.size = wantblocks;
907 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
908 _heapinfo[block].free.next = 0;
909 _heapinfo[0].free.prev = block;
910 _heapinfo[_heapinfo[block].free.prev].free.next = block;
911 ++_chunks_free;
912 /* Now loop to use some of that block for this allocation. */
913 }
914 }
915
916 /* At this point we have found a suitable free list entry.
917 Figure out how to remove what we need from the list. */
918 result = ADDRESS (block);
919 if (_heapinfo[block].free.size > blocks)
920 {
921 /* The block we found has a bit left over,
922 so relink the tail end back into the free list. */
923 _heapinfo[block + blocks].free.size
924 = _heapinfo[block].free.size - blocks;
925 _heapinfo[block + blocks].free.next
926 = _heapinfo[block].free.next;
927 _heapinfo[block + blocks].free.prev
928 = _heapinfo[block].free.prev;
929 _heapinfo[_heapinfo[block].free.prev].free.next
930 = _heapinfo[_heapinfo[block].free.next].free.prev
931 = _heapindex = block + blocks;
932 }
933 else
934 {
935 /* The block exactly matches our requirements,
936 so just remove it from the list. */
937 _heapinfo[_heapinfo[block].free.next].free.prev
938 = _heapinfo[block].free.prev;
939 _heapinfo[_heapinfo[block].free.prev].free.next
940 = _heapindex = _heapinfo[block].free.next;
941 --_chunks_free;
942 }
943
944 _heapinfo[block].busy.type = 0;
945 _heapinfo[block].busy.info.size = blocks;
946 ++_chunks_used;
947 _bytes_used += blocks * BLOCKSIZE;
948 _bytes_free -= blocks * BLOCKSIZE;
949
950 /* Mark all the blocks of the object just allocated except for the
951 first with a negative number so you can find the first block by
952 adding that adjustment. */
953 while (--blocks > 0)
954 _heapinfo[block + blocks].busy.info.size = -blocks;
955 }
956
5dcab13e 957 PROTECT_MALLOC_STATE (1);
2f213514 958 out:
8d0d84d2
YM
959 return result;
960}
961
962__ptr_t
963_malloc_internal (size)
964 __malloc_size_t size;
965{
966 __ptr_t result;
967
968 LOCK ();
969 result = _malloc_internal_nolock (size);
2f213514 970 UNLOCK ();
8d0d84d2 971
74ad5c7f
KH
972 return result;
973}
974
975__ptr_t
976malloc (size)
977 __malloc_size_t size;
978{
8d0d84d2
YM
979 __ptr_t (*hook) (__malloc_size_t);
980
74ad5c7f
KH
981 if (!__malloc_initialized && !__malloc_initialize ())
982 return NULL;
983
8d0d84d2
YM
984 /* Copy the value of __malloc_hook to an automatic variable in case
985 __malloc_hook is modified in another thread between its
986 NULL-check and the use.
987
988 Note: Strictly speaking, this is not a right solution. We should
989 use mutexes to access non-read-only variables that are shared
990 among multiple threads. We just leave it for compatibility with
991 glibc malloc (i.e., assignments to __malloc_hook) for now. */
992 hook = __malloc_hook;
993 return (hook != NULL ? *hook : _malloc_internal) (size);
74ad5c7f
KH
994}
995\f
996#ifndef _LIBC
997
998/* On some ANSI C systems, some libc functions call _malloc, _free
999 and _realloc. Make them use the GNU functions. */
1000
1001__ptr_t
1002_malloc (size)
1003 __malloc_size_t size;
1004{
1005 return malloc (size);
1006}
1007
1008void
1009_free (ptr)
1010 __ptr_t ptr;
1011{
1012 free (ptr);
1013}
1014
1015__ptr_t
1016_realloc (ptr, size)
1017 __ptr_t ptr;
1018 __malloc_size_t size;
1019{
1020 return realloc (ptr, size);
1021}
1022
1023#endif
1024/* Free a block of memory allocated by `malloc'.
1025 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1026 Written May 1989 by Mike Haertel.
1027
1028This library is free software; you can redistribute it and/or
423a1f3c 1029modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1030published by the Free Software Foundation; either version 2 of the
1031License, or (at your option) any later version.
1032
1033This library is distributed in the hope that it will be useful,
1034but WITHOUT ANY WARRANTY; without even the implied warranty of
1035MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1036General Public License for more details.
74ad5c7f 1037
423a1f3c
JB
1038You should have received a copy of the GNU General Public
1039License along with this library; see the file COPYING. If
3ef97fb6
LK
1040not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1041Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1042
1043 The author may be reached (Email) at the address mike@ai.mit.edu,
1044 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1045
1046#ifndef _MALLOC_INTERNAL
1047#define _MALLOC_INTERNAL
1048#include <malloc.h>
1049#endif
1050
1051
74ad5c7f 1052/* Debugging hook for free. */
0a27e8ed 1053void (*__free_hook) PP ((__ptr_t __ptr));
74ad5c7f
KH
1054
1055/* List of blocks allocated by memalign. */
1056struct alignlist *_aligned_blocks = NULL;
1057
1058/* Return memory to the heap.
8d0d84d2 1059 Like `_free_internal' but don't lock mutex. */
74ad5c7f 1060void
8d0d84d2 1061_free_internal_nolock (ptr)
74ad5c7f
KH
1062 __ptr_t ptr;
1063{
1064 int type;
1065 __malloc_size_t block, blocks;
1066 register __malloc_size_t i;
1067 struct list *prev, *next;
1068 __ptr_t curbrk;
1069 const __malloc_size_t lesscore_threshold
1070 /* Threshold of free space at which we will return some to the system. */
1071 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1072
1073 register struct alignlist *l;
1074
1075 if (ptr == NULL)
1076 return;
1077
a4579d33
KB
1078#ifdef CYGWIN
1079 if (ptr < _heapbase)
1080 /* We're being asked to free something in the static heap. */
1081 return;
1082#endif
1083
5dcab13e 1084 PROTECT_MALLOC_STATE (0);
177c0ea7 1085
8d0d84d2 1086 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1087 for (l = _aligned_blocks; l != NULL; l = l->next)
1088 if (l->aligned == ptr)
1089 {
1090 l->aligned = NULL; /* Mark the slot in the list as free. */
1091 ptr = l->exact;
1092 break;
1093 }
8d0d84d2 1094 UNLOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1095
1096 block = BLOCK (ptr);
1097
1098 type = _heapinfo[block].busy.type;
1099 switch (type)
1100 {
1101 case 0:
1102 /* Get as many statistics as early as we can. */
1103 --_chunks_used;
1104 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1105 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1106
1107 /* Find the free cluster previous to this one in the free list.
1108 Start searching at the last block referenced; this may benefit
1109 programs with locality of allocation. */
1110 i = _heapindex;
1111 if (i > block)
1112 while (i > block)
1113 i = _heapinfo[i].free.prev;
1114 else
1115 {
1116 do
1117 i = _heapinfo[i].free.next;
1118 while (i > 0 && i < block);
1119 i = _heapinfo[i].free.prev;
1120 }
1121
1122 /* Determine how to link this block into the free list. */
1123 if (block == i + _heapinfo[i].free.size)
1124 {
1125 /* Coalesce this block with its predecessor. */
1126 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1127 block = i;
1128 }
1129 else
1130 {
1131 /* Really link this block back into the free list. */
1132 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1133 _heapinfo[block].free.next = _heapinfo[i].free.next;
1134 _heapinfo[block].free.prev = i;
1135 _heapinfo[i].free.next = block;
1136 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1137 ++_chunks_free;
1138 }
1139
1140 /* Now that the block is linked in, see if we can coalesce it
1141 with its successor (by deleting its successor from the list
1142 and adding in its size). */
1143 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1144 {
1145 _heapinfo[block].free.size
1146 += _heapinfo[_heapinfo[block].free.next].free.size;
1147 _heapinfo[block].free.next
1148 = _heapinfo[_heapinfo[block].free.next].free.next;
1149 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1150 --_chunks_free;
1151 }
1152
1153 /* How many trailing free blocks are there now? */
1154 blocks = _heapinfo[block].free.size;
1155
1156 /* Where is the current end of accessible core? */
1157 curbrk = (*__morecore) (0);
1158
1159 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1160 {
1161 /* The end of the malloc heap is at the end of accessible core.
1162 It's possible that moving _heapinfo will allow us to
1163 return some space to the system. */
1164
1165 __malloc_size_t info_block = BLOCK (_heapinfo);
1166 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1167 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1168 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1169 __malloc_size_t next_block = _heapinfo[block].free.next;
1170 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1171
1172 if (/* Win if this block being freed is last in core, the info table
1173 is just before it, the previous free block is just before the
1174 info table, and the two free blocks together form a useful
1175 amount to return to the system. */
1176 (block + blocks == _heaplimit &&
1177 info_block + info_blocks == block &&
1178 prev_block != 0 && prev_block + prev_blocks == info_block &&
1179 blocks + prev_blocks >= lesscore_threshold) ||
1180 /* Nope, not the case. We can also win if this block being
1181 freed is just before the info table, and the table extends
1182 to the end of core or is followed only by a free block,
1183 and the total free space is worth returning to the system. */
1184 (block + blocks == info_block &&
1185 ((info_block + info_blocks == _heaplimit &&
1186 blocks >= lesscore_threshold) ||
1187 (info_block + info_blocks == next_block &&
1188 next_block + next_blocks == _heaplimit &&
1189 blocks + next_blocks >= lesscore_threshold)))
1190 )
1191 {
1192 malloc_info *newinfo;
1193 __malloc_size_t oldlimit = _heaplimit;
1194
1195 /* Free the old info table, clearing _heaplimit to avoid
1196 recursion into this code. We don't want to return the
1197 table's blocks to the system before we have copied them to
1198 the new location. */
1199 _heaplimit = 0;
8d0d84d2 1200 _free_internal_nolock (_heapinfo);
74ad5c7f
KH
1201 _heaplimit = oldlimit;
1202
1203 /* Tell malloc to search from the beginning of the heap for
1204 free blocks, so it doesn't reuse the ones just freed. */
1205 _heapindex = 0;
1206
1207 /* Allocate new space for the info table and move its data. */
8d0d84d2
YM
1208 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1209 * BLOCKSIZE);
5dcab13e 1210 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1211 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1212 _heapinfo = newinfo;
1213
1214 /* We should now have coalesced the free block with the
1215 blocks freed from the old info table. Examine the entire
1216 trailing free block to decide below whether to return some
1217 to the system. */
1218 block = _heapinfo[0].free.prev;
1219 blocks = _heapinfo[block].free.size;
1220 }
1221
1222 /* Now see if we can return stuff to the system. */
1223 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1224 {
1225 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1226 _heaplimit -= blocks;
1227 (*__morecore) (-bytes);
1228 _heapinfo[_heapinfo[block].free.prev].free.next
1229 = _heapinfo[block].free.next;
1230 _heapinfo[_heapinfo[block].free.next].free.prev
1231 = _heapinfo[block].free.prev;
1232 block = _heapinfo[block].free.prev;
1233 --_chunks_free;
1234 _bytes_free -= bytes;
1235 }
1236 }
1237
1238 /* Set the next search to begin at this block. */
1239 _heapindex = block;
1240 break;
1241
1242 default:
1243 /* Do some of the statistics. */
1244 --_chunks_used;
1245 _bytes_used -= 1 << type;
1246 ++_chunks_free;
1247 _bytes_free += 1 << type;
1248
1249 /* Get the address of the first free fragment in this block. */
1250 prev = (struct list *) ((char *) ADDRESS (block) +
1251 (_heapinfo[block].busy.info.frag.first << type));
1252
1253 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1254 {
1255 /* If all fragments of this block are free, remove them
1256 from the fragment list and free the whole block. */
1257 next = prev;
1258 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1259 next = next->next;
1260 prev->prev->next = next;
1261 if (next != NULL)
1262 next->prev = prev->prev;
1263 _heapinfo[block].busy.type = 0;
1264 _heapinfo[block].busy.info.size = 1;
1265
1266 /* Keep the statistics accurate. */
1267 ++_chunks_used;
1268 _bytes_used += BLOCKSIZE;
1269 _chunks_free -= BLOCKSIZE >> type;
1270 _bytes_free -= BLOCKSIZE;
1271
8d0d84d2
YM
1272#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1273 _free_internal_nolock (ADDRESS (block));
8094989b 1274#else
74ad5c7f 1275 free (ADDRESS (block));
8094989b 1276#endif
74ad5c7f
KH
1277 }
1278 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1279 {
1280 /* If some fragments of this block are free, link this
1281 fragment into the fragment list after the first free
1282 fragment of this block. */
1283 next = (struct list *) ptr;
1284 next->next = prev->next;
1285 next->prev = prev;
1286 prev->next = next;
1287 if (next->next != NULL)
1288 next->next->prev = next;
1289 ++_heapinfo[block].busy.info.frag.nfree;
1290 }
1291 else
1292 {
1293 /* No fragments of this block are free, so link this
1294 fragment into the fragment list and announce that
1295 it is the first free fragment of this block. */
1296 prev = (struct list *) ptr;
1297 _heapinfo[block].busy.info.frag.nfree = 1;
1298 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1299 ((unsigned long int) ((char *) ptr - (char *) NULL)
1300 % BLOCKSIZE >> type);
1301 prev->next = _fraghead[type].next;
1302 prev->prev = &_fraghead[type];
1303 prev->prev->next = prev;
1304 if (prev->next != NULL)
1305 prev->next->prev = prev;
1306 }
1307 break;
1308 }
177c0ea7 1309
5dcab13e 1310 PROTECT_MALLOC_STATE (1);
8d0d84d2
YM
1311}
1312
1313/* Return memory to the heap.
1314 Like `free' but don't call a __free_hook if there is one. */
1315void
1316_free_internal (ptr)
1317 __ptr_t ptr;
1318{
1319 LOCK ();
1320 _free_internal_nolock (ptr);
2f213514 1321 UNLOCK ();
74ad5c7f
KH
1322}
1323
1324/* Return memory to the heap. */
ca9c0567 1325
4624371d 1326void
74ad5c7f
KH
1327free (ptr)
1328 __ptr_t ptr;
1329{
8d0d84d2
YM
1330 void (*hook) (__ptr_t) = __free_hook;
1331
1332 if (hook != NULL)
1333 (*hook) (ptr);
74ad5c7f
KH
1334 else
1335 _free_internal (ptr);
1336}
1337
1338/* Define the `cfree' alias for `free'. */
1339#ifdef weak_alias
1340weak_alias (free, cfree)
1341#else
1342void
1343cfree (ptr)
1344 __ptr_t ptr;
1345{
1346 free (ptr);
1347}
1348#endif
1349/* Change the size of a block allocated by `malloc'.
1350 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1351 Written May 1989 by Mike Haertel.
1352
1353This library is free software; you can redistribute it and/or
423a1f3c 1354modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1355published by the Free Software Foundation; either version 2 of the
1356License, or (at your option) any later version.
1357
1358This library is distributed in the hope that it will be useful,
1359but WITHOUT ANY WARRANTY; without even the implied warranty of
1360MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1361General Public License for more details.
74ad5c7f 1362
423a1f3c
JB
1363You should have received a copy of the GNU General Public
1364License along with this library; see the file COPYING. If
3ef97fb6
LK
1365not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1366Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1367
1368 The author may be reached (Email) at the address mike@ai.mit.edu,
1369 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1370
1371#ifndef _MALLOC_INTERNAL
1372#define _MALLOC_INTERNAL
1373#include <malloc.h>
1374#endif
1375
1376
74ad5c7f
KH
1377#define min(A, B) ((A) < (B) ? (A) : (B))
1378
a4579d33
KB
1379/* On Cygwin the dumped emacs may try to realloc storage allocated in
1380 the static heap. We just malloc space in the new heap and copy the
1381 data. */
1382#ifdef CYGWIN
1383__ptr_t
1384special_realloc (ptr, size)
1385 __ptr_t ptr;
1386 __malloc_size_t size;
1387{
1388 __ptr_t result;
1389 int type;
1390 __malloc_size_t block, oldsize;
1391
1392 block = ((char *) ptr - bss_sbrk_heapbase) / BLOCKSIZE + 1;
1393 type = bss_sbrk_heapinfo[block].busy.type;
1394 oldsize =
1395 type == 0 ? bss_sbrk_heapinfo[block].busy.info.size * BLOCKSIZE
1396 : (__malloc_size_t) 1 << type;
1397 result = _malloc_internal_nolock (size);
1398 if (result != NULL)
1399 memcpy (result, ptr, min (oldsize, size));
1400 return result;
1401}
1402#endif
1403
74ad5c7f 1404/* Debugging hook for realloc. */
0a27e8ed 1405__ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f
KH
1406
1407/* Resize the given region to the new size, returning a pointer
1408 to the (possibly moved) region. This is optimized for speed;
1409 some benchmarks seem to indicate that greater compactness is
1410 achieved by unconditionally allocating and copying to a
1411 new region. This module has incestuous knowledge of the
1412 internals of both free and malloc. */
1413__ptr_t
8d0d84d2 1414_realloc_internal_nolock (ptr, size)
74ad5c7f
KH
1415 __ptr_t ptr;
1416 __malloc_size_t size;
1417{
1418 __ptr_t result;
1419 int type;
1420 __malloc_size_t block, blocks, oldlimit;
1421
1422 if (size == 0)
1423 {
8d0d84d2
YM
1424 _free_internal_nolock (ptr);
1425 return _malloc_internal_nolock (0);
74ad5c7f
KH
1426 }
1427 else if (ptr == NULL)
8d0d84d2 1428 return _malloc_internal_nolock (size);
74ad5c7f 1429
a4579d33
KB
1430#ifdef CYGWIN
1431 if (ptr < _heapbase)
1432 /* ptr points into the static heap */
1433 return special_realloc (ptr, size);
1434#endif
1435
74ad5c7f
KH
1436 block = BLOCK (ptr);
1437
5dcab13e 1438 PROTECT_MALLOC_STATE (0);
177c0ea7 1439
74ad5c7f
KH
1440 type = _heapinfo[block].busy.type;
1441 switch (type)
1442 {
1443 case 0:
1444 /* Maybe reallocate a large block to a small fragment. */
1445 if (size <= BLOCKSIZE / 2)
1446 {
8d0d84d2 1447 result = _malloc_internal_nolock (size);
74ad5c7f
KH
1448 if (result != NULL)
1449 {
1450 memcpy (result, ptr, size);
8d0d84d2 1451 _free_internal_nolock (ptr);
2f213514 1452 goto out;
74ad5c7f
KH
1453 }
1454 }
1455
1456 /* The new size is a large allocation as well;
1457 see if we can hold it in place. */
1458 blocks = BLOCKIFY (size);
1459 if (blocks < _heapinfo[block].busy.info.size)
1460 {
1461 /* The new size is smaller; return
1462 excess memory to the free list. */
1463 _heapinfo[block + blocks].busy.type = 0;
1464 _heapinfo[block + blocks].busy.info.size
1465 = _heapinfo[block].busy.info.size - blocks;
1466 _heapinfo[block].busy.info.size = blocks;
1467 /* We have just created a new chunk by splitting a chunk in two.
1468 Now we will free this chunk; increment the statistics counter
1469 so it doesn't become wrong when _free_internal decrements it. */
1470 ++_chunks_used;
8d0d84d2 1471 _free_internal_nolock (ADDRESS (block + blocks));
74ad5c7f
KH
1472 result = ptr;
1473 }
1474 else if (blocks == _heapinfo[block].busy.info.size)
1475 /* No size change necessary. */
1476 result = ptr;
1477 else
1478 {
1479 /* Won't fit, so allocate a new region that will.
1480 Free the old region first in case there is sufficient
1481 adjacent free space to grow without moving. */
1482 blocks = _heapinfo[block].busy.info.size;
1483 /* Prevent free from actually returning memory to the system. */
1484 oldlimit = _heaplimit;
1485 _heaplimit = 0;
8d0d84d2
YM
1486 _free_internal_nolock (ptr);
1487 result = _malloc_internal_nolock (size);
5dcab13e 1488 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1489 if (_heaplimit == 0)
1490 _heaplimit = oldlimit;
1491 if (result == NULL)
1492 {
1493 /* Now we're really in trouble. We have to unfree
1494 the thing we just freed. Unfortunately it might
1495 have been coalesced with its neighbors. */
1496 if (_heapindex == block)
8d0d84d2 1497 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
74ad5c7f
KH
1498 else
1499 {
1500 __ptr_t previous
8d0d84d2
YM
1501 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1502 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1503 _free_internal_nolock (previous);
74ad5c7f 1504 }
2f213514 1505 goto out;
74ad5c7f
KH
1506 }
1507 if (ptr != result)
1508 memmove (result, ptr, blocks * BLOCKSIZE);
1509 }
1510 break;
1511
1512 default:
1513 /* Old size is a fragment; type is logarithm
1514 to base two of the fragment size. */
1515 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1516 size <= (__malloc_size_t) (1 << type))
1517 /* The new size is the same kind of fragment. */
1518 result = ptr;
1519 else
1520 {
1521 /* The new size is different; allocate a new space,
1522 and copy the lesser of the new size and the old. */
8d0d84d2 1523 result = _malloc_internal_nolock (size);
74ad5c7f 1524 if (result == NULL)
2f213514 1525 goto out;
74ad5c7f 1526 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
8d0d84d2 1527 _free_internal_nolock (ptr);
74ad5c7f
KH
1528 }
1529 break;
1530 }
1531
5dcab13e 1532 PROTECT_MALLOC_STATE (1);
2f213514 1533 out:
8d0d84d2
YM
1534 return result;
1535}
1536
1537__ptr_t
1538_realloc_internal (ptr, size)
1539 __ptr_t ptr;
1540 __malloc_size_t size;
1541{
1542 __ptr_t result;
1543
5e617bc2 1544 LOCK ();
8d0d84d2 1545 result = _realloc_internal_nolock (ptr, size);
2f213514 1546 UNLOCK ();
8d0d84d2 1547
74ad5c7f
KH
1548 return result;
1549}
1550
1551__ptr_t
1552realloc (ptr, size)
1553 __ptr_t ptr;
1554 __malloc_size_t size;
1555{
8d0d84d2
YM
1556 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1557
74ad5c7f
KH
1558 if (!__malloc_initialized && !__malloc_initialize ())
1559 return NULL;
1560
8d0d84d2
YM
1561 hook = __realloc_hook;
1562 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
74ad5c7f
KH
1563}
1564/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1565
1566This library is free software; you can redistribute it and/or
423a1f3c 1567modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1568published by the Free Software Foundation; either version 2 of the
1569License, or (at your option) any later version.
1570
1571This library is distributed in the hope that it will be useful,
1572but WITHOUT ANY WARRANTY; without even the implied warranty of
1573MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1574General Public License for more details.
74ad5c7f 1575
423a1f3c
JB
1576You should have received a copy of the GNU General Public
1577License along with this library; see the file COPYING. If
3ef97fb6
LK
1578not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1579Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1580
1581 The author may be reached (Email) at the address mike@ai.mit.edu,
1582 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1583
1584#ifndef _MALLOC_INTERNAL
1585#define _MALLOC_INTERNAL
1586#include <malloc.h>
1587#endif
1588
1589/* Allocate an array of NMEMB elements each SIZE bytes long.
1590 The entire array is initialized to zeros. */
1591__ptr_t
1592calloc (nmemb, size)
1593 register __malloc_size_t nmemb;
1594 register __malloc_size_t size;
1595{
1596 register __ptr_t result = malloc (nmemb * size);
1597
1598 if (result != NULL)
1599 (void) memset (result, 0, nmemb * size);
1600
1601 return result;
1602}
1603/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1604This file is part of the GNU C Library.
1605
1606The GNU C Library is free software; you can redistribute it and/or modify
1607it under the terms of the GNU General Public License as published by
1608the Free Software Foundation; either version 2, or (at your option)
1609any later version.
1610
1611The GNU C Library is distributed in the hope that it will be useful,
1612but WITHOUT ANY WARRANTY; without even the implied warranty of
1613MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1614GNU General Public License for more details.
1615
1616You should have received a copy of the GNU General Public License
1617along with the GNU C Library; see the file COPYING. If not, write to
3ef97fb6
LK
1618the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1619MA 02110-1301, USA. */
74ad5c7f
KH
1620
1621#ifndef _MALLOC_INTERNAL
1622#define _MALLOC_INTERNAL
1623#include <malloc.h>
1624#endif
1625
65f451d0
DN
1626/* uClibc defines __GNU_LIBRARY__, but it is not completely
1627 compatible. */
5e617bc2 1628#if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
74ad5c7f 1629#define __sbrk sbrk
65f451d0 1630#else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1631/* It is best not to declare this and cast its result on foreign operating
1632 systems with potentially hostile include files. */
1633
1634#include <stddef.h>
0a27e8ed 1635extern __ptr_t __sbrk PP ((ptrdiff_t increment));
65f451d0 1636#endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1637
1638#ifndef NULL
1639#define NULL 0
1640#endif
1641
1642/* Allocate INCREMENT more bytes of data space,
1643 and return the start of data space, or NULL on errors.
1644 If INCREMENT is negative, shrink data space. */
1645__ptr_t
1646__default_morecore (increment)
1647 __malloc_ptrdiff_t increment;
1648{
ef6d1039 1649 __ptr_t result;
5e617bc2 1650#if defined (CYGWIN)
ef6d1039
SM
1651 if (!bss_sbrk_did_unexec)
1652 {
1653 return bss_sbrk (increment);
1654 }
1655#endif
1656 result = (__ptr_t) __sbrk (increment);
74ad5c7f
KH
1657 if (result == (__ptr_t) -1)
1658 return NULL;
1659 return result;
1660}
1661/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1662
1663This library is free software; you can redistribute it and/or
423a1f3c 1664modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1665published by the Free Software Foundation; either version 2 of the
1666License, or (at your option) any later version.
1667
1668This library is distributed in the hope that it will be useful,
1669but WITHOUT ANY WARRANTY; without even the implied warranty of
1670MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1671General Public License for more details.
74ad5c7f 1672
423a1f3c
JB
1673You should have received a copy of the GNU General Public
1674License along with this library; see the file COPYING. If
3ef97fb6
LK
1675not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1676Fifth Floor, Boston, MA 02110-1301, USA. */
74ad5c7f
KH
1677
1678#ifndef _MALLOC_INTERNAL
1679#define _MALLOC_INTERNAL
1680#include <malloc.h>
1681#endif
1682
eec2d1de
EZ
1683__ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1684 __malloc_size_t __alignment));
74ad5c7f
KH
1685
1686__ptr_t
1687memalign (alignment, size)
1688 __malloc_size_t alignment;
1689 __malloc_size_t size;
1690{
1691 __ptr_t result;
1692 unsigned long int adj, lastadj;
8d0d84d2 1693 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
74ad5c7f 1694
8d0d84d2
YM
1695 if (hook)
1696 return (*hook) (alignment, size);
74ad5c7f
KH
1697
1698 /* Allocate a block with enough extra space to pad the block with up to
1699 (ALIGNMENT - 1) bytes if necessary. */
1700 result = malloc (size + alignment - 1);
1701 if (result == NULL)
1702 return NULL;
1703
1704 /* Figure out how much we will need to pad this particular block
1705 to achieve the required alignment. */
1706 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1707
1708 do
1709 {
1710 /* Reallocate the block with only as much excess as it needs. */
1711 free (result);
1712 result = malloc (adj + size);
1713 if (result == NULL) /* Impossible unless interrupted. */
1714 return NULL;
1715
1716 lastadj = adj;
1717 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1718 /* It's conceivable we might have been so unlucky as to get a
1719 different block with weaker alignment. If so, this block is too
1720 short to contain SIZE after alignment correction. So we must
1721 try again and get another block, slightly larger. */
1722 } while (adj > lastadj);
1723
1724 if (adj != 0)
1725 {
1726 /* Record this block in the list of aligned blocks, so that `free'
1727 can identify the pointer it is passed, which will be in the middle
1728 of an allocated block. */
1729
1730 struct alignlist *l;
8d0d84d2 1731 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1732 for (l = _aligned_blocks; l != NULL; l = l->next)
1733 if (l->aligned == NULL)
1734 /* This slot is free. Use it. */
1735 break;
1736 if (l == NULL)
1737 {
1738 l = (struct alignlist *) malloc (sizeof (struct alignlist));
8d0d84d2 1739 if (l != NULL)
74ad5c7f 1740 {
8d0d84d2
YM
1741 l->next = _aligned_blocks;
1742 _aligned_blocks = l;
74ad5c7f 1743 }
74ad5c7f 1744 }
8d0d84d2
YM
1745 if (l != NULL)
1746 {
1747 l->exact = result;
1748 result = l->aligned = (char *) result + alignment - adj;
1749 }
1750 UNLOCK_ALIGNED_BLOCKS ();
1751 if (l == NULL)
1752 {
1753 free (result);
1754 result = NULL;
1755 }
74ad5c7f
KH
1756 }
1757
1758 return result;
1759}
1760
72359c32
YM
1761#ifndef ENOMEM
1762#define ENOMEM 12
1763#endif
1764
1765#ifndef EINVAL
1766#define EINVAL 22
1767#endif
1768
1769int
1770posix_memalign (memptr, alignment, size)
1771 __ptr_t *memptr;
1772 __malloc_size_t alignment;
1773 __malloc_size_t size;
1774{
1775 __ptr_t mem;
1776
1777 if (alignment == 0
1778 || alignment % sizeof (__ptr_t) != 0
1779 || (alignment & (alignment - 1)) != 0)
1780 return EINVAL;
1781
1782 mem = memalign (alignment, size);
1783 if (mem == NULL)
1784 return ENOMEM;
1785
1786 *memptr = mem;
1787
1788 return 0;
1789}
1790
74ad5c7f
KH
1791/* Allocate memory on a page boundary.
1792 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1793
1794This library is free software; you can redistribute it and/or
423a1f3c 1795modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1796published by the Free Software Foundation; either version 2 of the
1797License, or (at your option) any later version.
1798
1799This library is distributed in the hope that it will be useful,
1800but WITHOUT ANY WARRANTY; without even the implied warranty of
1801MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1802General Public License for more details.
74ad5c7f 1803
423a1f3c
JB
1804You should have received a copy of the GNU General Public
1805License along with this library; see the file COPYING. If
3ef97fb6
LK
1806not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1807Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1808
1809 The author may be reached (Email) at the address mike@ai.mit.edu,
1810 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1811
1812#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1813
1814/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1815 on MSDOS, where it conflicts with a system header file. */
1816
1817#define ELIDE_VALLOC
1818
1819#endif
1820
1821#ifndef ELIDE_VALLOC
1822
1823#if defined (__GNU_LIBRARY__) || defined (_LIBC)
1824#include <stddef.h>
1825#include <sys/cdefs.h>
47582ab3
KH
1826#if defined (__GLIBC__) && __GLIBC__ >= 2
1827/* __getpagesize is already declared in <unistd.h> with return type int */
1828#else
0a27e8ed 1829extern size_t __getpagesize PP ((void));
47582ab3 1830#endif
74ad5c7f
KH
1831#else
1832#include "getpagesize.h"
5e617bc2 1833#define __getpagesize() getpagesize ()
74ad5c7f
KH
1834#endif
1835
1836#ifndef _MALLOC_INTERNAL
1837#define _MALLOC_INTERNAL
1838#include <malloc.h>
1839#endif
1840
1841static __malloc_size_t pagesize;
1842
1843__ptr_t
1844valloc (size)
1845 __malloc_size_t size;
1846{
1847 if (pagesize == 0)
1848 pagesize = __getpagesize ();
1849
1850 return memalign (pagesize, size);
1851}
1852
1853#endif /* Not ELIDE_VALLOC. */
a3ba27da
GM
1854
1855#ifdef GC_MCHECK
1856
1857/* Standard debugging hooks for `malloc'.
1858 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1859 Written May 1989 by Mike Haertel.
1860
1861This library is free software; you can redistribute it and/or
423a1f3c 1862modify it under the terms of the GNU General Public License as
a3ba27da
GM
1863published by the Free Software Foundation; either version 2 of the
1864License, or (at your option) any later version.
1865
1866This library is distributed in the hope that it will be useful,
1867but WITHOUT ANY WARRANTY; without even the implied warranty of
1868MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1869General Public License for more details.
a3ba27da 1870
423a1f3c
JB
1871You should have received a copy of the GNU General Public
1872License along with this library; see the file COPYING. If
3ef97fb6
LK
1873not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1874Fifth Floor, Boston, MA 02110-1301, USA.
a3ba27da
GM
1875
1876 The author may be reached (Email) at the address mike@ai.mit.edu,
1877 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1878
1879#ifdef emacs
1880#include <stdio.h>
1881#else
1882#ifndef _MALLOC_INTERNAL
1883#define _MALLOC_INTERNAL
1884#include <malloc.h>
1885#include <stdio.h>
1886#endif
1887#endif
1888
1889/* Old hook values. */
f57e2426
J
1890static void (*old_free_hook) (__ptr_t ptr);
1891static __ptr_t (*old_malloc_hook) (__malloc_size_t size);
1892static __ptr_t (*old_realloc_hook) (__ptr_t ptr, __malloc_size_t size);
a3ba27da
GM
1893
1894/* Function to call when something awful happens. */
f57e2426 1895static void (*abortfunc) (enum mcheck_status);
a3ba27da
GM
1896
1897/* Arbitrary magical numbers. */
1898#define MAGICWORD 0xfedabeeb
1899#define MAGICFREE 0xd8675309
1900#define MAGICBYTE ((char) 0xd7)
1901#define MALLOCFLOOD ((char) 0x93)
1902#define FREEFLOOD ((char) 0x95)
1903
1904struct hdr
1905 {
1906 __malloc_size_t size; /* Exact size requested by user. */
1907 unsigned long int magic; /* Magic number to check header integrity. */
1908 };
1909
f57e2426 1910static enum mcheck_status checkhdr (const struct hdr *);
a3ba27da
GM
1911static enum mcheck_status
1912checkhdr (hdr)
1913 const struct hdr *hdr;
1914{
1915 enum mcheck_status status;
1916 switch (hdr->magic)
1917 {
1918 default:
1919 status = MCHECK_HEAD;
1920 break;
1921 case MAGICFREE:
1922 status = MCHECK_FREE;
1923 break;
1924 case MAGICWORD:
1925 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1926 status = MCHECK_TAIL;
1927 else
1928 status = MCHECK_OK;
1929 break;
1930 }
1931 if (status != MCHECK_OK)
1932 (*abortfunc) (status);
1933 return status;
1934}
1935
f57e2426 1936static void freehook (__ptr_t);
a3ba27da
GM
1937static void
1938freehook (ptr)
1939 __ptr_t ptr;
1940{
1941 struct hdr *hdr;
177c0ea7 1942
a3ba27da
GM
1943 if (ptr)
1944 {
1945 hdr = ((struct hdr *) ptr) - 1;
1946 checkhdr (hdr);
1947 hdr->magic = MAGICFREE;
0e926e56 1948 memset (ptr, FREEFLOOD, hdr->size);
a3ba27da
GM
1949 }
1950 else
1951 hdr = NULL;
177c0ea7 1952
a3ba27da
GM
1953 __free_hook = old_free_hook;
1954 free (hdr);
1955 __free_hook = freehook;
1956}
1957
f57e2426 1958static __ptr_t mallochook (__malloc_size_t);
a3ba27da
GM
1959static __ptr_t
1960mallochook (size)
1961 __malloc_size_t size;
1962{
1963 struct hdr *hdr;
1964
1965 __malloc_hook = old_malloc_hook;
1966 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
1967 __malloc_hook = mallochook;
1968 if (hdr == NULL)
1969 return NULL;
1970
1971 hdr->size = size;
1972 hdr->magic = MAGICWORD;
1973 ((char *) &hdr[1])[size] = MAGICBYTE;
0e926e56 1974 memset ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
a3ba27da
GM
1975 return (__ptr_t) (hdr + 1);
1976}
1977
f57e2426 1978static __ptr_t reallochook (__ptr_t, __malloc_size_t);
a3ba27da
GM
1979static __ptr_t
1980reallochook (ptr, size)
1981 __ptr_t ptr;
1982 __malloc_size_t size;
1983{
1984 struct hdr *hdr = NULL;
1985 __malloc_size_t osize = 0;
177c0ea7 1986
a3ba27da
GM
1987 if (ptr)
1988 {
1989 hdr = ((struct hdr *) ptr) - 1;
1990 osize = hdr->size;
1991
1992 checkhdr (hdr);
1993 if (size < osize)
0e926e56 1994 memset ((char *) ptr + size, FREEFLOOD, osize - size);
a3ba27da 1995 }
177c0ea7 1996
a3ba27da
GM
1997 __free_hook = old_free_hook;
1998 __malloc_hook = old_malloc_hook;
1999 __realloc_hook = old_realloc_hook;
2000 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
2001 __free_hook = freehook;
2002 __malloc_hook = mallochook;
2003 __realloc_hook = reallochook;
2004 if (hdr == NULL)
2005 return NULL;
2006
2007 hdr->size = size;
2008 hdr->magic = MAGICWORD;
2009 ((char *) &hdr[1])[size] = MAGICBYTE;
2010 if (size > osize)
0e926e56 2011 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
a3ba27da
GM
2012 return (__ptr_t) (hdr + 1);
2013}
2014
2015static void
2016mabort (status)
2017 enum mcheck_status status;
2018{
2019 const char *msg;
2020 switch (status)
2021 {
2022 case MCHECK_OK:
2023 msg = "memory is consistent, library is buggy";
2024 break;
2025 case MCHECK_HEAD:
2026 msg = "memory clobbered before allocated block";
2027 break;
2028 case MCHECK_TAIL:
2029 msg = "memory clobbered past end of allocated block";
2030 break;
2031 case MCHECK_FREE:
2032 msg = "block freed twice";
2033 break;
2034 default:
2035 msg = "bogus mcheck_status, library is buggy";
2036 break;
2037 }
2038#ifdef __GNU_LIBRARY__
2039 __libc_fatal (msg);
2040#else
2041 fprintf (stderr, "mcheck: %s\n", msg);
2042 fflush (stderr);
2043 abort ();
2044#endif
2045}
2046
2047static int mcheck_used = 0;
2048
2049int
2050mcheck (func)
f57e2426 2051 void (*func) (enum mcheck_status);
a3ba27da
GM
2052{
2053 abortfunc = (func != NULL) ? func : &mabort;
2054
2055 /* These hooks may not be safely inserted if malloc is already in use. */
2056 if (!__malloc_initialized && !mcheck_used)
2057 {
2058 old_free_hook = __free_hook;
2059 __free_hook = freehook;
2060 old_malloc_hook = __malloc_hook;
2061 __malloc_hook = mallochook;
2062 old_realloc_hook = __realloc_hook;
2063 __realloc_hook = reallochook;
2064 mcheck_used = 1;
2065 }
2066
2067 return mcheck_used ? 0 : -1;
2068}
2069
2070enum mcheck_status
2071mprobe (__ptr_t ptr)
2072{
2073 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2074}
2075
2076#endif /* GC_MCHECK */