Additional minor fix to (Bug#8539).
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f
KH
1/* This file is no longer automatically generated from libc. */
2
3#define _MALLOC_INTERNAL
4
5/* The malloc headers and source files from the C library follow here. */
6
7/* Declarations for `malloc' and friends.
0b5538bd 8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
4e6835db 9 2005, 2006, 2007 Free Software Foundation, Inc.
74ad5c7f
KH
10 Written May 1989 by Mike Haertel.
11
12This library is free software; you can redistribute it and/or
423a1f3c 13modify it under the terms of the GNU General Public License as
74ad5c7f
KH
14published by the Free Software Foundation; either version 2 of the
15License, or (at your option) any later version.
16
17This library is distributed in the hope that it will be useful,
18but WITHOUT ANY WARRANTY; without even the implied warranty of
19MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 20General Public License for more details.
74ad5c7f 21
423a1f3c
JB
22You should have received a copy of the GNU General Public
23License along with this library; see the file COPYING. If
3ef97fb6
LK
24not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
26
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
29
30#ifndef _MALLOC_H
31
32#define _MALLOC_H 1
33
34#ifdef _MALLOC_INTERNAL
35
36#ifdef HAVE_CONFIG_H
37#include <config.h>
38#endif
39
8d0d84d2
YM
40#ifdef HAVE_GTK_AND_PTHREAD
41#define USE_PTHREAD
42#endif
43
b2e92d3e 44#if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
75934b1d 45 || defined STDC_HEADERS || defined PROTOTYPES))
0a27e8ed
RS
46#undef PP
47#define PP(args) args
74ad5c7f
KH
48#undef __ptr_t
49#define __ptr_t void *
50#else /* Not C++ or ANSI C. */
0a27e8ed
RS
51#undef PP
52#define PP(args) ()
74ad5c7f
KH
53#undef __ptr_t
54#define __ptr_t char *
55#endif /* C++ or ANSI C. */
56
57#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
58#include <string.h>
59#else
60#ifndef memset
61#define memset(s, zero, n) bzero ((s), (n))
62#endif
63#ifndef memcpy
64#define memcpy(d, s, n) bcopy ((s), (d), (n))
65#endif
66#endif
67
ca9c0567 68#ifdef HAVE_LIMITS_H
74ad5c7f 69#include <limits.h>
ca9c0567 70#endif
74ad5c7f
KH
71#ifndef CHAR_BIT
72#define CHAR_BIT 8
73#endif
74ad5c7f 74
74ad5c7f 75#include <unistd.h>
74ad5c7f 76
2f213514
YM
77#ifdef USE_PTHREAD
78#include <pthread.h>
79#endif
80
74ad5c7f
KH
81#endif /* _MALLOC_INTERNAL. */
82
83
84#ifdef __cplusplus
85extern "C"
86{
87#endif
88
ca9c0567 89#ifdef STDC_HEADERS
74ad5c7f
KH
90#include <stddef.h>
91#define __malloc_size_t size_t
92#define __malloc_ptrdiff_t ptrdiff_t
93#else
eec2d1de
EZ
94#ifdef __GNUC__
95#include <stddef.h>
96#ifdef __SIZE_TYPE__
97#define __malloc_size_t __SIZE_TYPE__
98#endif
99#endif
100#ifndef __malloc_size_t
74ad5c7f 101#define __malloc_size_t unsigned int
eec2d1de 102#endif
74ad5c7f
KH
103#define __malloc_ptrdiff_t int
104#endif
105
106#ifndef NULL
107#define NULL 0
108#endif
109
110
111/* Allocate SIZE bytes of memory. */
0a27e8ed 112extern __ptr_t malloc PP ((__malloc_size_t __size));
74ad5c7f
KH
113/* Re-allocate the previously allocated block
114 in __ptr_t, making the new block SIZE bytes long. */
0a27e8ed 115extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f 116/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
0a27e8ed 117extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
74ad5c7f 118/* Free a block allocated by `malloc', `realloc' or `calloc'. */
4624371d 119extern void free PP ((__ptr_t __ptr));
74ad5c7f
KH
120
121/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
ed68db4d 122#if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
0a27e8ed
RS
123extern __ptr_t memalign PP ((__malloc_size_t __alignment,
124 __malloc_size_t __size));
72359c32
YM
125extern int posix_memalign PP ((__ptr_t *, __malloc_size_t,
126 __malloc_size_t size));
74ad5c7f
KH
127#endif
128
129/* Allocate SIZE bytes on a page boundary. */
130#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
0a27e8ed 131extern __ptr_t valloc PP ((__malloc_size_t __size));
74ad5c7f
KH
132#endif
133
3ceeb306
YM
134#ifdef USE_PTHREAD
135/* Set up mutexes and make malloc etc. thread-safe. */
136extern void malloc_enable_thread PP ((void));
137#endif
74ad5c7f
KH
138
139#ifdef _MALLOC_INTERNAL
140
141/* The allocator divides the heap into blocks of fixed size; large
142 requests receive one or more whole blocks, and small requests
143 receive a fragment of a block. Fragment sizes are powers of two,
144 and all fragments of a block are the same size. When all the
145 fragments in a block have been freed, the block itself is freed. */
146#define INT_BIT (CHAR_BIT * sizeof(int))
147#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
148#define BLOCKSIZE (1 << BLOCKLOG)
149#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
150
151/* Determine the amount of memory spanned by the initial heap table
152 (not an absolute limit). */
153#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
154
155/* Number of contiguous free blocks allowed to build up at the end of
156 memory before they will be returned to the system. */
157#define FINAL_FREE_BLOCKS 8
158
159/* Data structure giving per-block information. */
160typedef union
161 {
162 /* Heap information for a busy block. */
163 struct
164 {
165 /* Zero for a large (multiblock) object, or positive giving the
166 logarithm to the base two of the fragment size. */
167 int type;
168 union
169 {
170 struct
171 {
172 __malloc_size_t nfree; /* Free frags in a fragmented block. */
173 __malloc_size_t first; /* First free fragment of the block. */
174 } frag;
175 /* For a large object, in its first block, this has the number
176 of blocks in the object. In the other blocks, this has a
177 negative number which says how far back the first block is. */
178 __malloc_ptrdiff_t size;
179 } info;
180 } busy;
181 /* Heap information for a free block
182 (that may be the first of a free cluster). */
183 struct
184 {
185 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
186 __malloc_size_t next; /* Index of next free cluster. */
187 __malloc_size_t prev; /* Index of previous free cluster. */
188 } free;
189 } malloc_info;
190
191/* Pointer to first block of the heap. */
192extern char *_heapbase;
193
194/* Table indexed by block number giving per-block information. */
195extern malloc_info *_heapinfo;
196
197/* Address to block number and vice versa. */
198#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
199#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
200
201/* Current search index for the heap table. */
202extern __malloc_size_t _heapindex;
203
204/* Limit of valid info table indices. */
205extern __malloc_size_t _heaplimit;
206
207/* Doubly linked lists of free fragments. */
208struct list
209 {
210 struct list *next;
211 struct list *prev;
212 };
213
214/* Free list headers for each fragment size. */
215extern struct list _fraghead[];
216
217/* List of blocks allocated with `memalign' (or `valloc'). */
218struct alignlist
219 {
220 struct alignlist *next;
221 __ptr_t aligned; /* The address that memaligned returned. */
222 __ptr_t exact; /* The address that malloc returned. */
223 };
224extern struct alignlist *_aligned_blocks;
225
226/* Instrumentation. */
227extern __malloc_size_t _chunks_used;
228extern __malloc_size_t _bytes_used;
229extern __malloc_size_t _chunks_free;
230extern __malloc_size_t _bytes_free;
231
232/* Internal versions of `malloc', `realloc', and `free'
233 used when these functions need to call each other.
234 They are the same but don't call the hooks. */
0a27e8ed
RS
235extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
236extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
237extern void _free_internal PP ((__ptr_t __ptr));
8d0d84d2
YM
238extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
239extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
240extern void _free_internal_nolock PP ((__ptr_t __ptr));
74ad5c7f 241
2f213514 242#ifdef USE_PTHREAD
8d0d84d2 243extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
3ceeb306
YM
244extern int _malloc_thread_enabled_p;
245#define LOCK() \
246 do { \
247 if (_malloc_thread_enabled_p) \
248 pthread_mutex_lock (&_malloc_mutex); \
249 } while (0)
250#define UNLOCK() \
251 do { \
252 if (_malloc_thread_enabled_p) \
253 pthread_mutex_unlock (&_malloc_mutex); \
254 } while (0)
255#define LOCK_ALIGNED_BLOCKS() \
256 do { \
257 if (_malloc_thread_enabled_p) \
258 pthread_mutex_lock (&_aligned_blocks_mutex); \
259 } while (0)
260#define UNLOCK_ALIGNED_BLOCKS() \
261 do { \
262 if (_malloc_thread_enabled_p) \
263 pthread_mutex_unlock (&_aligned_blocks_mutex); \
264 } while (0)
2f213514
YM
265#else
266#define LOCK()
267#define UNLOCK()
8d0d84d2
YM
268#define LOCK_ALIGNED_BLOCKS()
269#define UNLOCK_ALIGNED_BLOCKS()
2f213514
YM
270#endif
271
74ad5c7f
KH
272#endif /* _MALLOC_INTERNAL. */
273
274/* Given an address in the middle of a malloc'd object,
275 return the address of the beginning of the object. */
0a27e8ed 276extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
74ad5c7f
KH
277
278/* Underlying allocation function; successive calls should
279 return contiguous pieces of memory. */
0a27e8ed 280extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
281
282/* Default value of `__morecore'. */
0a27e8ed 283extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
284
285/* If not NULL, this function is called after each time
286 `__morecore' is called to increase the data size. */
0a27e8ed 287extern void (*__after_morecore_hook) PP ((void));
74ad5c7f
KH
288
289/* Number of extra blocks to get each time we ask for more core.
290 This reduces the frequency of calling `(*__morecore)'. */
291extern __malloc_size_t __malloc_extra_blocks;
292
293/* Nonzero if `malloc' has been called and done its initialization. */
294extern int __malloc_initialized;
295/* Function called to initialize malloc data structures. */
0a27e8ed 296extern int __malloc_initialize PP ((void));
74ad5c7f
KH
297
298/* Hooks for debugging versions. */
0a27e8ed
RS
299extern void (*__malloc_initialize_hook) PP ((void));
300extern void (*__free_hook) PP ((__ptr_t __ptr));
301extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
302extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
303extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
304 __malloc_size_t __alignment));
74ad5c7f
KH
305
306/* Return values for `mprobe': these are the kinds of inconsistencies that
307 `mcheck' enables detection of. */
308enum mcheck_status
309 {
310 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
311 MCHECK_OK, /* Block is fine. */
312 MCHECK_FREE, /* Block freed twice. */
313 MCHECK_HEAD, /* Memory before the block was clobbered. */
314 MCHECK_TAIL /* Memory after the block was clobbered. */
315 };
316
317/* Activate a standard collection of debugging hooks. This must be called
318 before `malloc' is ever called. ABORTFUNC is called with an error code
319 (see enum above) when an inconsistency is detected. If ABORTFUNC is
320 null, the standard function prints on stderr and then calls `abort'. */
0a27e8ed 321extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
74ad5c7f
KH
322
323/* Check for aberrations in a particular malloc'd block. You must have
324 called `mcheck' already. These are the same checks that `mcheck' does
325 when you free or reallocate a block. */
0a27e8ed 326extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
74ad5c7f
KH
327
328/* Activate a standard collection of tracing hooks. */
0a27e8ed
RS
329extern void mtrace PP ((void));
330extern void muntrace PP ((void));
74ad5c7f
KH
331
332/* Statistics available to the user. */
333struct mstats
334 {
335 __malloc_size_t bytes_total; /* Total size of the heap. */
336 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
337 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
338 __malloc_size_t chunks_free; /* Chunks in the free list. */
339 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
340 };
341
342/* Pick up the current statistics. */
0a27e8ed 343extern struct mstats mstats PP ((void));
74ad5c7f
KH
344
345/* Call WARNFUN with a warning message when memory usage is high. */
0a27e8ed
RS
346extern void memory_warnings PP ((__ptr_t __start,
347 void (*__warnfun) PP ((const char *))));
74ad5c7f
KH
348
349
350/* Relocating allocator. */
351
352/* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
0a27e8ed 353extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
354
355/* Free the storage allocated in HANDLEPTR. */
0a27e8ed 356extern void r_alloc_free PP ((__ptr_t *__handleptr));
74ad5c7f
KH
357
358/* Adjust the block at HANDLEPTR to be SIZE bytes long. */
0a27e8ed 359extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
360
361
362#ifdef __cplusplus
363}
364#endif
365
366#endif /* malloc.h */
367/* Memory allocator `malloc'.
368 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
369 Written May 1989 by Mike Haertel.
370
371This library is free software; you can redistribute it and/or
423a1f3c 372modify it under the terms of the GNU General Public License as
74ad5c7f
KH
373published by the Free Software Foundation; either version 2 of the
374License, or (at your option) any later version.
375
376This library is distributed in the hope that it will be useful,
377but WITHOUT ANY WARRANTY; without even the implied warranty of
378MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 379General Public License for more details.
74ad5c7f 380
423a1f3c
JB
381You should have received a copy of the GNU General Public
382License along with this library; see the file COPYING. If
3ef97fb6
LK
383not, write to the Free Software Foundation, Inc., 51 Franklin Street,
384Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
385
386 The author may be reached (Email) at the address mike@ai.mit.edu,
387 or (US mail) as Mike Haertel c/o Free Software Foundation. */
388
389#ifndef _MALLOC_INTERNAL
390#define _MALLOC_INTERNAL
391#include <malloc.h>
392#endif
393#include <errno.h>
394
395/* How to really get more memory. */
ef6d1039
SM
396#if defined(CYGWIN)
397extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
398extern int bss_sbrk_did_unexec;
399#endif
3cacba85 400__ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
74ad5c7f
KH
401
402/* Debugging hook for `malloc'. */
0a27e8ed 403__ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
74ad5c7f
KH
404
405/* Pointer to the base of the first block. */
406char *_heapbase;
407
408/* Block information table. Allocated with align/__free (not malloc/free). */
409malloc_info *_heapinfo;
410
411/* Number of info entries. */
412static __malloc_size_t heapsize;
413
414/* Search index in the info table. */
415__malloc_size_t _heapindex;
416
417/* Limit of valid info table indices. */
418__malloc_size_t _heaplimit;
419
420/* Free lists for each fragment size. */
421struct list _fraghead[BLOCKLOG];
422
423/* Instrumentation. */
424__malloc_size_t _chunks_used;
425__malloc_size_t _bytes_used;
426__malloc_size_t _chunks_free;
427__malloc_size_t _bytes_free;
428
429/* Are you experienced? */
430int __malloc_initialized;
431
432__malloc_size_t __malloc_extra_blocks;
433
0a27e8ed
RS
434void (*__malloc_initialize_hook) PP ((void));
435void (*__after_morecore_hook) PP ((void));
74ad5c7f 436
5dcab13e
GM
437#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
438
439/* Some code for hunting a bug writing into _heapinfo.
440
441 Call this macro with argument PROT non-zero to protect internal
442 malloc state against writing to it, call it with a zero argument to
443 make it readable and writable.
444
445 Note that this only works if BLOCKSIZE == page size, which is
446 the case on the i386. */
447
448#include <sys/types.h>
449#include <sys/mman.h>
450
451static int state_protected_p;
452static __malloc_size_t last_state_size;
453static malloc_info *last_heapinfo;
454
455void
456protect_malloc_state (protect_p)
457 int protect_p;
458{
459 /* If _heapinfo has been relocated, make sure its old location
460 isn't left read-only; it will be reused by malloc. */
461 if (_heapinfo != last_heapinfo
462 && last_heapinfo
463 && state_protected_p)
464 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
465
466 last_state_size = _heaplimit * sizeof *_heapinfo;
467 last_heapinfo = _heapinfo;
177c0ea7 468
5dcab13e
GM
469 if (protect_p != state_protected_p)
470 {
471 state_protected_p = protect_p;
472 if (mprotect (_heapinfo, last_state_size,
473 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
474 abort ();
475 }
476}
477
478#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
479
480#else
481#define PROTECT_MALLOC_STATE(PROT) /* empty */
482#endif
483
74ad5c7f
KH
484
485/* Aligned allocation. */
0a27e8ed 486static __ptr_t align PP ((__malloc_size_t));
74ad5c7f
KH
487static __ptr_t
488align (size)
489 __malloc_size_t size;
490{
491 __ptr_t result;
492 unsigned long int adj;
493
ceeb3d7d
EZ
494 /* align accepts an unsigned argument, but __morecore accepts a
495 signed one. This could lead to trouble if SIZE overflows a
496 signed int type accepted by __morecore. We just punt in that
497 case, since they are requesting a ludicrous amount anyway. */
498 if ((__malloc_ptrdiff_t)size < 0)
499 result = 0;
500 else
501 result = (*__morecore) (size);
74ad5c7f
KH
502 adj = (unsigned long int) ((unsigned long int) ((char *) result -
503 (char *) NULL)) % BLOCKSIZE;
504 if (adj != 0)
505 {
506 __ptr_t new;
507 adj = BLOCKSIZE - adj;
508 new = (*__morecore) (adj);
509 result = (char *) result + adj;
510 }
511
512 if (__after_morecore_hook)
513 (*__after_morecore_hook) ();
514
515 return result;
516}
517
518/* Get SIZE bytes, if we can get them starting at END.
519 Return the address of the space we got.
520 If we cannot get space at END, fail and return 0. */
0a27e8ed 521static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
74ad5c7f
KH
522static __ptr_t
523get_contiguous_space (size, position)
524 __malloc_ptrdiff_t size;
525 __ptr_t position;
526{
527 __ptr_t before;
528 __ptr_t after;
529
530 before = (*__morecore) (0);
531 /* If we can tell in advance that the break is at the wrong place,
532 fail now. */
533 if (before != position)
534 return 0;
535
536 /* Allocate SIZE bytes and get the address of them. */
537 after = (*__morecore) (size);
538 if (!after)
539 return 0;
540
541 /* It was not contiguous--reject it. */
542 if (after != position)
543 {
544 (*__morecore) (- size);
545 return 0;
546 }
547
548 return after;
549}
550
551
552/* This is called when `_heapinfo' and `heapsize' have just
553 been set to describe a new info table. Set up the table
554 to describe itself and account for it in the statistics. */
0a27e8ed 555static void register_heapinfo PP ((void));
74ad5c7f
KH
556#ifdef __GNUC__
557__inline__
558#endif
559static void
560register_heapinfo ()
561{
562 __malloc_size_t block, blocks;
563
564 block = BLOCK (_heapinfo);
565 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
566
567 /* Account for the _heapinfo block itself in the statistics. */
568 _bytes_used += blocks * BLOCKSIZE;
569 ++_chunks_used;
570
571 /* Describe the heapinfo block itself in the heapinfo. */
572 _heapinfo[block].busy.type = 0;
573 _heapinfo[block].busy.info.size = blocks;
574 /* Leave back-pointers for malloc_find_address. */
575 while (--blocks > 0)
576 _heapinfo[block + blocks].busy.info.size = -blocks;
577}
578
2f213514 579#ifdef USE_PTHREAD
8d0d84d2
YM
580pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
581pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
3ceeb306
YM
582int _malloc_thread_enabled_p;
583
584static void
585malloc_atfork_handler_prepare ()
586{
587 LOCK ();
588 LOCK_ALIGNED_BLOCKS ();
589}
590
591static void
592malloc_atfork_handler_parent ()
593{
594 UNLOCK_ALIGNED_BLOCKS ();
595 UNLOCK ();
596}
597
598static void
599malloc_atfork_handler_child ()
600{
601 UNLOCK_ALIGNED_BLOCKS ();
602 UNLOCK ();
603}
604
605/* Set up mutexes and make malloc etc. thread-safe. */
606void
607malloc_enable_thread ()
608{
609 if (_malloc_thread_enabled_p)
610 return;
611
612 /* Some pthread implementations call malloc for statically
613 initialized mutexes when they are used first. To avoid such a
614 situation, we initialize mutexes here while their use is
615 disabled in malloc etc. */
616 pthread_mutex_init (&_malloc_mutex, NULL);
617 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
618 pthread_atfork (malloc_atfork_handler_prepare,
619 malloc_atfork_handler_parent,
620 malloc_atfork_handler_child);
621 _malloc_thread_enabled_p = 1;
622}
2f213514 623#endif
74ad5c7f 624
2f213514
YM
625static void
626malloc_initialize_1 ()
627{
a3ba27da
GM
628#ifdef GC_MCHECK
629 mcheck (NULL);
630#endif
631
74ad5c7f
KH
632 if (__malloc_initialize_hook)
633 (*__malloc_initialize_hook) ();
634
635 heapsize = HEAP / BLOCKSIZE;
636 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
637 if (_heapinfo == NULL)
2f213514 638 return;
74ad5c7f
KH
639 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
640 _heapinfo[0].free.size = 0;
641 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
642 _heapindex = 0;
643 _heapbase = (char *) _heapinfo;
644 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
645
646 register_heapinfo ();
647
648 __malloc_initialized = 1;
5dcab13e 649 PROTECT_MALLOC_STATE (1);
2f213514
YM
650 return;
651}
652
784c1472
JD
653/* Set everything up and remember that we have.
654 main will call malloc which calls this function. That is before any threads
655 or signal handlers has been set up, so we don't need thread protection. */
2f213514
YM
656int
657__malloc_initialize ()
658{
2f213514
YM
659 if (__malloc_initialized)
660 return 0;
661
662 malloc_initialize_1 ();
2f213514
YM
663
664 return __malloc_initialized;
74ad5c7f
KH
665}
666
667static int morecore_recursing;
668
669/* Get neatly aligned memory, initializing or
670 growing the heap info table as necessary. */
8d0d84d2 671static __ptr_t morecore_nolock PP ((__malloc_size_t));
74ad5c7f 672static __ptr_t
8d0d84d2 673morecore_nolock (size)
74ad5c7f
KH
674 __malloc_size_t size;
675{
676 __ptr_t result;
677 malloc_info *newinfo, *oldinfo;
678 __malloc_size_t newsize;
679
680 if (morecore_recursing)
681 /* Avoid recursion. The caller will know how to handle a null return. */
682 return NULL;
683
684 result = align (size);
685 if (result == NULL)
686 return NULL;
687
5dcab13e
GM
688 PROTECT_MALLOC_STATE (0);
689
74ad5c7f
KH
690 /* Check if we need to grow the info table. */
691 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
692 {
693 /* Calculate the new _heapinfo table size. We do not account for the
694 added blocks in the table itself, as we hope to place them in
695 existing free space, which is already covered by part of the
696 existing table. */
697 newsize = heapsize;
698 do
699 newsize *= 2;
700 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
701
702 /* We must not reuse existing core for the new info table when called
703 from realloc in the case of growing a large block, because the
704 block being grown is momentarily marked as free. In this case
705 _heaplimit is zero so we know not to reuse space for internal
706 allocation. */
707 if (_heaplimit != 0)
708 {
709 /* First try to allocate the new info table in core we already
710 have, in the usual way using realloc. If realloc cannot
711 extend it in place or relocate it to existing sufficient core,
712 we will get called again, and the code above will notice the
713 `morecore_recursing' flag and return null. */
714 int save = errno; /* Don't want to clobber errno with ENOMEM. */
715 morecore_recursing = 1;
8d0d84d2 716 newinfo = (malloc_info *) _realloc_internal_nolock
74ad5c7f
KH
717 (_heapinfo, newsize * sizeof (malloc_info));
718 morecore_recursing = 0;
719 if (newinfo == NULL)
720 errno = save;
721 else
722 {
723 /* We found some space in core, and realloc has put the old
724 table's blocks on the free list. Now zero the new part
725 of the table and install the new table location. */
726 memset (&newinfo[heapsize], 0,
727 (newsize - heapsize) * sizeof (malloc_info));
728 _heapinfo = newinfo;
729 heapsize = newsize;
730 goto got_heap;
731 }
732 }
733
734 /* Allocate new space for the malloc info table. */
735 while (1)
736 {
737 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
738
739 /* Did it fail? */
740 if (newinfo == NULL)
741 {
742 (*__morecore) (-size);
743 return NULL;
744 }
745
746 /* Is it big enough to record status for its own space?
747 If so, we win. */
748 if ((__malloc_size_t) BLOCK ((char *) newinfo
749 + newsize * sizeof (malloc_info))
750 < newsize)
751 break;
752
753 /* Must try again. First give back most of what we just got. */
754 (*__morecore) (- newsize * sizeof (malloc_info));
755 newsize *= 2;
756 }
757
758 /* Copy the old table to the beginning of the new,
759 and zero the rest of the new table. */
760 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
761 memset (&newinfo[heapsize], 0,
762 (newsize - heapsize) * sizeof (malloc_info));
763 oldinfo = _heapinfo;
764 _heapinfo = newinfo;
765 heapsize = newsize;
766
767 register_heapinfo ();
768
769 /* Reset _heaplimit so _free_internal never decides
770 it can relocate or resize the info table. */
771 _heaplimit = 0;
8d0d84d2 772 _free_internal_nolock (oldinfo);
5dcab13e 773 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
774
775 /* The new heap limit includes the new table just allocated. */
776 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
777 return result;
778 }
779
780 got_heap:
781 _heaplimit = BLOCK ((char *) result + size);
782 return result;
783}
784
785/* Allocate memory from the heap. */
786__ptr_t
8d0d84d2 787_malloc_internal_nolock (size)
74ad5c7f
KH
788 __malloc_size_t size;
789{
790 __ptr_t result;
791 __malloc_size_t block, blocks, lastblocks, start;
792 register __malloc_size_t i;
793 struct list *next;
794
795 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
796 valid address you can realloc and free (though not dereference).
797
798 It turns out that some extant code (sunrpc, at least Ultrix's version)
799 expects `malloc (0)' to return non-NULL and breaks otherwise.
800 Be compatible. */
801
802#if 0
803 if (size == 0)
804 return NULL;
805#endif
806
5dcab13e
GM
807 PROTECT_MALLOC_STATE (0);
808
74ad5c7f
KH
809 if (size < sizeof (struct list))
810 size = sizeof (struct list);
811
74ad5c7f
KH
812 /* Determine the allocation policy based on the request size. */
813 if (size <= BLOCKSIZE / 2)
814 {
815 /* Small allocation to receive a fragment of a block.
816 Determine the logarithm to base two of the fragment size. */
817 register __malloc_size_t log = 1;
818 --size;
819 while ((size /= 2) != 0)
820 ++log;
821
822 /* Look in the fragment lists for a
823 free fragment of the desired size. */
824 next = _fraghead[log].next;
825 if (next != NULL)
826 {
827 /* There are free fragments of this size.
828 Pop a fragment out of the fragment list and return it.
829 Update the block's nfree and first counters. */
830 result = (__ptr_t) next;
831 next->prev->next = next->next;
832 if (next->next != NULL)
833 next->next->prev = next->prev;
834 block = BLOCK (result);
835 if (--_heapinfo[block].busy.info.frag.nfree != 0)
836 _heapinfo[block].busy.info.frag.first = (unsigned long int)
837 ((unsigned long int) ((char *) next->next - (char *) NULL)
838 % BLOCKSIZE) >> log;
839
840 /* Update the statistics. */
841 ++_chunks_used;
842 _bytes_used += 1 << log;
843 --_chunks_free;
844 _bytes_free -= 1 << log;
845 }
846 else
847 {
848 /* No free fragments of the desired size, so get a new block
849 and break it into fragments, returning the first. */
8094989b 850#ifdef GC_MALLOC_CHECK
8d0d84d2 851 result = _malloc_internal_nolock (BLOCKSIZE);
5dcab13e 852 PROTECT_MALLOC_STATE (0);
8d0d84d2
YM
853#elif defined (USE_PTHREAD)
854 result = _malloc_internal_nolock (BLOCKSIZE);
8094989b 855#else
74ad5c7f 856 result = malloc (BLOCKSIZE);
8094989b 857#endif
74ad5c7f 858 if (result == NULL)
5dcab13e
GM
859 {
860 PROTECT_MALLOC_STATE (1);
2f213514 861 goto out;
5dcab13e 862 }
74ad5c7f
KH
863
864 /* Link all fragments but the first into the free list. */
865 next = (struct list *) ((char *) result + (1 << log));
866 next->next = NULL;
867 next->prev = &_fraghead[log];
868 _fraghead[log].next = next;
869
870 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
871 {
872 next = (struct list *) ((char *) result + (i << log));
873 next->next = _fraghead[log].next;
874 next->prev = &_fraghead[log];
875 next->prev->next = next;
876 next->next->prev = next;
877 }
878
879 /* Initialize the nfree and first counters for this block. */
880 block = BLOCK (result);
881 _heapinfo[block].busy.type = log;
882 _heapinfo[block].busy.info.frag.nfree = i - 1;
883 _heapinfo[block].busy.info.frag.first = i - 1;
884
885 _chunks_free += (BLOCKSIZE >> log) - 1;
886 _bytes_free += BLOCKSIZE - (1 << log);
887 _bytes_used -= BLOCKSIZE - (1 << log);
888 }
889 }
890 else
891 {
892 /* Large allocation to receive one or more blocks.
893 Search the free list in a circle starting at the last place visited.
894 If we loop completely around without finding a large enough
895 space we will have to get more memory from the system. */
896 blocks = BLOCKIFY (size);
897 start = block = _heapindex;
898 while (_heapinfo[block].free.size < blocks)
899 {
900 block = _heapinfo[block].free.next;
901 if (block == start)
902 {
903 /* Need to get more from the system. Get a little extra. */
904 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
905 block = _heapinfo[0].free.prev;
906 lastblocks = _heapinfo[block].free.size;
907 /* Check to see if the new core will be contiguous with the
908 final free block; if so we don't need to get as much. */
909 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
910 /* We can't do this if we will have to make the heap info
cc4a96c6 911 table bigger to accommodate the new space. */
74ad5c7f
KH
912 block + wantblocks <= heapsize &&
913 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
914 ADDRESS (block + lastblocks)))
915 {
916 /* We got it contiguously. Which block we are extending
917 (the `final free block' referred to above) might have
918 changed, if it got combined with a freed info table. */
919 block = _heapinfo[0].free.prev;
920 _heapinfo[block].free.size += (wantblocks - lastblocks);
921 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
922 _heaplimit += wantblocks - lastblocks;
923 continue;
924 }
8d0d84d2 925 result = morecore_nolock (wantblocks * BLOCKSIZE);
74ad5c7f 926 if (result == NULL)
2f213514 927 goto out;
74ad5c7f
KH
928 block = BLOCK (result);
929 /* Put the new block at the end of the free list. */
930 _heapinfo[block].free.size = wantblocks;
931 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
932 _heapinfo[block].free.next = 0;
933 _heapinfo[0].free.prev = block;
934 _heapinfo[_heapinfo[block].free.prev].free.next = block;
935 ++_chunks_free;
936 /* Now loop to use some of that block for this allocation. */
937 }
938 }
939
940 /* At this point we have found a suitable free list entry.
941 Figure out how to remove what we need from the list. */
942 result = ADDRESS (block);
943 if (_heapinfo[block].free.size > blocks)
944 {
945 /* The block we found has a bit left over,
946 so relink the tail end back into the free list. */
947 _heapinfo[block + blocks].free.size
948 = _heapinfo[block].free.size - blocks;
949 _heapinfo[block + blocks].free.next
950 = _heapinfo[block].free.next;
951 _heapinfo[block + blocks].free.prev
952 = _heapinfo[block].free.prev;
953 _heapinfo[_heapinfo[block].free.prev].free.next
954 = _heapinfo[_heapinfo[block].free.next].free.prev
955 = _heapindex = block + blocks;
956 }
957 else
958 {
959 /* The block exactly matches our requirements,
960 so just remove it from the list. */
961 _heapinfo[_heapinfo[block].free.next].free.prev
962 = _heapinfo[block].free.prev;
963 _heapinfo[_heapinfo[block].free.prev].free.next
964 = _heapindex = _heapinfo[block].free.next;
965 --_chunks_free;
966 }
967
968 _heapinfo[block].busy.type = 0;
969 _heapinfo[block].busy.info.size = blocks;
970 ++_chunks_used;
971 _bytes_used += blocks * BLOCKSIZE;
972 _bytes_free -= blocks * BLOCKSIZE;
973
974 /* Mark all the blocks of the object just allocated except for the
975 first with a negative number so you can find the first block by
976 adding that adjustment. */
977 while (--blocks > 0)
978 _heapinfo[block + blocks].busy.info.size = -blocks;
979 }
980
5dcab13e 981 PROTECT_MALLOC_STATE (1);
2f213514 982 out:
8d0d84d2
YM
983 return result;
984}
985
986__ptr_t
987_malloc_internal (size)
988 __malloc_size_t size;
989{
990 __ptr_t result;
991
992 LOCK ();
993 result = _malloc_internal_nolock (size);
2f213514 994 UNLOCK ();
8d0d84d2 995
74ad5c7f
KH
996 return result;
997}
998
999__ptr_t
1000malloc (size)
1001 __malloc_size_t size;
1002{
8d0d84d2
YM
1003 __ptr_t (*hook) (__malloc_size_t);
1004
74ad5c7f
KH
1005 if (!__malloc_initialized && !__malloc_initialize ())
1006 return NULL;
1007
8d0d84d2
YM
1008 /* Copy the value of __malloc_hook to an automatic variable in case
1009 __malloc_hook is modified in another thread between its
1010 NULL-check and the use.
1011
1012 Note: Strictly speaking, this is not a right solution. We should
1013 use mutexes to access non-read-only variables that are shared
1014 among multiple threads. We just leave it for compatibility with
1015 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1016 hook = __malloc_hook;
1017 return (hook != NULL ? *hook : _malloc_internal) (size);
74ad5c7f
KH
1018}
1019\f
1020#ifndef _LIBC
1021
1022/* On some ANSI C systems, some libc functions call _malloc, _free
1023 and _realloc. Make them use the GNU functions. */
1024
1025__ptr_t
1026_malloc (size)
1027 __malloc_size_t size;
1028{
1029 return malloc (size);
1030}
1031
1032void
1033_free (ptr)
1034 __ptr_t ptr;
1035{
1036 free (ptr);
1037}
1038
1039__ptr_t
1040_realloc (ptr, size)
1041 __ptr_t ptr;
1042 __malloc_size_t size;
1043{
1044 return realloc (ptr, size);
1045}
1046
1047#endif
1048/* Free a block of memory allocated by `malloc'.
1049 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1050 Written May 1989 by Mike Haertel.
1051
1052This library is free software; you can redistribute it and/or
423a1f3c 1053modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1054published by the Free Software Foundation; either version 2 of the
1055License, or (at your option) any later version.
1056
1057This library is distributed in the hope that it will be useful,
1058but WITHOUT ANY WARRANTY; without even the implied warranty of
1059MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1060General Public License for more details.
74ad5c7f 1061
423a1f3c
JB
1062You should have received a copy of the GNU General Public
1063License along with this library; see the file COPYING. If
3ef97fb6
LK
1064not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1065Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1066
1067 The author may be reached (Email) at the address mike@ai.mit.edu,
1068 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1069
1070#ifndef _MALLOC_INTERNAL
1071#define _MALLOC_INTERNAL
1072#include <malloc.h>
1073#endif
1074
1075
1076/* Cope with systems lacking `memmove'. */
1077#ifndef memmove
4624371d 1078#if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
74ad5c7f
KH
1079#ifdef emacs
1080#undef __malloc_safe_bcopy
1081#define __malloc_safe_bcopy safe_bcopy
1082#endif
1083/* This function is defined in realloc.c. */
0a27e8ed 1084extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
1085#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1086#endif
1087#endif
1088
1089
1090/* Debugging hook for free. */
0a27e8ed 1091void (*__free_hook) PP ((__ptr_t __ptr));
74ad5c7f
KH
1092
1093/* List of blocks allocated by memalign. */
1094struct alignlist *_aligned_blocks = NULL;
1095
1096/* Return memory to the heap.
8d0d84d2 1097 Like `_free_internal' but don't lock mutex. */
74ad5c7f 1098void
8d0d84d2 1099_free_internal_nolock (ptr)
74ad5c7f
KH
1100 __ptr_t ptr;
1101{
1102 int type;
1103 __malloc_size_t block, blocks;
1104 register __malloc_size_t i;
1105 struct list *prev, *next;
1106 __ptr_t curbrk;
1107 const __malloc_size_t lesscore_threshold
1108 /* Threshold of free space at which we will return some to the system. */
1109 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1110
1111 register struct alignlist *l;
1112
1113 if (ptr == NULL)
1114 return;
1115
5dcab13e 1116 PROTECT_MALLOC_STATE (0);
177c0ea7 1117
8d0d84d2 1118 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1119 for (l = _aligned_blocks; l != NULL; l = l->next)
1120 if (l->aligned == ptr)
1121 {
1122 l->aligned = NULL; /* Mark the slot in the list as free. */
1123 ptr = l->exact;
1124 break;
1125 }
8d0d84d2 1126 UNLOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1127
1128 block = BLOCK (ptr);
1129
1130 type = _heapinfo[block].busy.type;
1131 switch (type)
1132 {
1133 case 0:
1134 /* Get as many statistics as early as we can. */
1135 --_chunks_used;
1136 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1137 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1138
1139 /* Find the free cluster previous to this one in the free list.
1140 Start searching at the last block referenced; this may benefit
1141 programs with locality of allocation. */
1142 i = _heapindex;
1143 if (i > block)
1144 while (i > block)
1145 i = _heapinfo[i].free.prev;
1146 else
1147 {
1148 do
1149 i = _heapinfo[i].free.next;
1150 while (i > 0 && i < block);
1151 i = _heapinfo[i].free.prev;
1152 }
1153
1154 /* Determine how to link this block into the free list. */
1155 if (block == i + _heapinfo[i].free.size)
1156 {
1157 /* Coalesce this block with its predecessor. */
1158 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1159 block = i;
1160 }
1161 else
1162 {
1163 /* Really link this block back into the free list. */
1164 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1165 _heapinfo[block].free.next = _heapinfo[i].free.next;
1166 _heapinfo[block].free.prev = i;
1167 _heapinfo[i].free.next = block;
1168 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1169 ++_chunks_free;
1170 }
1171
1172 /* Now that the block is linked in, see if we can coalesce it
1173 with its successor (by deleting its successor from the list
1174 and adding in its size). */
1175 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1176 {
1177 _heapinfo[block].free.size
1178 += _heapinfo[_heapinfo[block].free.next].free.size;
1179 _heapinfo[block].free.next
1180 = _heapinfo[_heapinfo[block].free.next].free.next;
1181 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1182 --_chunks_free;
1183 }
1184
1185 /* How many trailing free blocks are there now? */
1186 blocks = _heapinfo[block].free.size;
1187
1188 /* Where is the current end of accessible core? */
1189 curbrk = (*__morecore) (0);
1190
1191 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1192 {
1193 /* The end of the malloc heap is at the end of accessible core.
1194 It's possible that moving _heapinfo will allow us to
1195 return some space to the system. */
1196
1197 __malloc_size_t info_block = BLOCK (_heapinfo);
1198 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1199 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1200 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1201 __malloc_size_t next_block = _heapinfo[block].free.next;
1202 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1203
1204 if (/* Win if this block being freed is last in core, the info table
1205 is just before it, the previous free block is just before the
1206 info table, and the two free blocks together form a useful
1207 amount to return to the system. */
1208 (block + blocks == _heaplimit &&
1209 info_block + info_blocks == block &&
1210 prev_block != 0 && prev_block + prev_blocks == info_block &&
1211 blocks + prev_blocks >= lesscore_threshold) ||
1212 /* Nope, not the case. We can also win if this block being
1213 freed is just before the info table, and the table extends
1214 to the end of core or is followed only by a free block,
1215 and the total free space is worth returning to the system. */
1216 (block + blocks == info_block &&
1217 ((info_block + info_blocks == _heaplimit &&
1218 blocks >= lesscore_threshold) ||
1219 (info_block + info_blocks == next_block &&
1220 next_block + next_blocks == _heaplimit &&
1221 blocks + next_blocks >= lesscore_threshold)))
1222 )
1223 {
1224 malloc_info *newinfo;
1225 __malloc_size_t oldlimit = _heaplimit;
1226
1227 /* Free the old info table, clearing _heaplimit to avoid
1228 recursion into this code. We don't want to return the
1229 table's blocks to the system before we have copied them to
1230 the new location. */
1231 _heaplimit = 0;
8d0d84d2 1232 _free_internal_nolock (_heapinfo);
74ad5c7f
KH
1233 _heaplimit = oldlimit;
1234
1235 /* Tell malloc to search from the beginning of the heap for
1236 free blocks, so it doesn't reuse the ones just freed. */
1237 _heapindex = 0;
1238
1239 /* Allocate new space for the info table and move its data. */
8d0d84d2
YM
1240 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1241 * BLOCKSIZE);
5dcab13e 1242 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1243 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1244 _heapinfo = newinfo;
1245
1246 /* We should now have coalesced the free block with the
1247 blocks freed from the old info table. Examine the entire
1248 trailing free block to decide below whether to return some
1249 to the system. */
1250 block = _heapinfo[0].free.prev;
1251 blocks = _heapinfo[block].free.size;
1252 }
1253
1254 /* Now see if we can return stuff to the system. */
1255 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1256 {
1257 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1258 _heaplimit -= blocks;
1259 (*__morecore) (-bytes);
1260 _heapinfo[_heapinfo[block].free.prev].free.next
1261 = _heapinfo[block].free.next;
1262 _heapinfo[_heapinfo[block].free.next].free.prev
1263 = _heapinfo[block].free.prev;
1264 block = _heapinfo[block].free.prev;
1265 --_chunks_free;
1266 _bytes_free -= bytes;
1267 }
1268 }
1269
1270 /* Set the next search to begin at this block. */
1271 _heapindex = block;
1272 break;
1273
1274 default:
1275 /* Do some of the statistics. */
1276 --_chunks_used;
1277 _bytes_used -= 1 << type;
1278 ++_chunks_free;
1279 _bytes_free += 1 << type;
1280
1281 /* Get the address of the first free fragment in this block. */
1282 prev = (struct list *) ((char *) ADDRESS (block) +
1283 (_heapinfo[block].busy.info.frag.first << type));
1284
1285 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1286 {
1287 /* If all fragments of this block are free, remove them
1288 from the fragment list and free the whole block. */
1289 next = prev;
1290 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1291 next = next->next;
1292 prev->prev->next = next;
1293 if (next != NULL)
1294 next->prev = prev->prev;
1295 _heapinfo[block].busy.type = 0;
1296 _heapinfo[block].busy.info.size = 1;
1297
1298 /* Keep the statistics accurate. */
1299 ++_chunks_used;
1300 _bytes_used += BLOCKSIZE;
1301 _chunks_free -= BLOCKSIZE >> type;
1302 _bytes_free -= BLOCKSIZE;
1303
8d0d84d2
YM
1304#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1305 _free_internal_nolock (ADDRESS (block));
8094989b 1306#else
74ad5c7f 1307 free (ADDRESS (block));
8094989b 1308#endif
74ad5c7f
KH
1309 }
1310 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1311 {
1312 /* If some fragments of this block are free, link this
1313 fragment into the fragment list after the first free
1314 fragment of this block. */
1315 next = (struct list *) ptr;
1316 next->next = prev->next;
1317 next->prev = prev;
1318 prev->next = next;
1319 if (next->next != NULL)
1320 next->next->prev = next;
1321 ++_heapinfo[block].busy.info.frag.nfree;
1322 }
1323 else
1324 {
1325 /* No fragments of this block are free, so link this
1326 fragment into the fragment list and announce that
1327 it is the first free fragment of this block. */
1328 prev = (struct list *) ptr;
1329 _heapinfo[block].busy.info.frag.nfree = 1;
1330 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1331 ((unsigned long int) ((char *) ptr - (char *) NULL)
1332 % BLOCKSIZE >> type);
1333 prev->next = _fraghead[type].next;
1334 prev->prev = &_fraghead[type];
1335 prev->prev->next = prev;
1336 if (prev->next != NULL)
1337 prev->next->prev = prev;
1338 }
1339 break;
1340 }
177c0ea7 1341
5dcab13e 1342 PROTECT_MALLOC_STATE (1);
8d0d84d2
YM
1343}
1344
1345/* Return memory to the heap.
1346 Like `free' but don't call a __free_hook if there is one. */
1347void
1348_free_internal (ptr)
1349 __ptr_t ptr;
1350{
1351 LOCK ();
1352 _free_internal_nolock (ptr);
2f213514 1353 UNLOCK ();
74ad5c7f
KH
1354}
1355
1356/* Return memory to the heap. */
ca9c0567 1357
4624371d 1358void
74ad5c7f
KH
1359free (ptr)
1360 __ptr_t ptr;
1361{
8d0d84d2
YM
1362 void (*hook) (__ptr_t) = __free_hook;
1363
1364 if (hook != NULL)
1365 (*hook) (ptr);
74ad5c7f
KH
1366 else
1367 _free_internal (ptr);
1368}
1369
1370/* Define the `cfree' alias for `free'. */
1371#ifdef weak_alias
1372weak_alias (free, cfree)
1373#else
1374void
1375cfree (ptr)
1376 __ptr_t ptr;
1377{
1378 free (ptr);
1379}
1380#endif
1381/* Change the size of a block allocated by `malloc'.
1382 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1383 Written May 1989 by Mike Haertel.
1384
1385This library is free software; you can redistribute it and/or
423a1f3c 1386modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1387published by the Free Software Foundation; either version 2 of the
1388License, or (at your option) any later version.
1389
1390This library is distributed in the hope that it will be useful,
1391but WITHOUT ANY WARRANTY; without even the implied warranty of
1392MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1393General Public License for more details.
74ad5c7f 1394
423a1f3c
JB
1395You should have received a copy of the GNU General Public
1396License along with this library; see the file COPYING. If
3ef97fb6
LK
1397not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1398Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1399
1400 The author may be reached (Email) at the address mike@ai.mit.edu,
1401 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1402
1403#ifndef _MALLOC_INTERNAL
1404#define _MALLOC_INTERNAL
1405#include <malloc.h>
1406#endif
1407
1408
1409
1410/* Cope with systems lacking `memmove'. */
4624371d 1411#if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
74ad5c7f
KH
1412
1413#ifdef emacs
1414#undef __malloc_safe_bcopy
1415#define __malloc_safe_bcopy safe_bcopy
1416#else
1417
1418/* Snarfed directly from Emacs src/dispnew.c:
1419 XXX Should use system bcopy if it handles overlap. */
1420
1421/* Like bcopy except never gets confused by overlap. */
1422
1423void
1424__malloc_safe_bcopy (afrom, ato, size)
1425 __ptr_t afrom;
1426 __ptr_t ato;
1427 __malloc_size_t size;
1428{
1429 char *from = afrom, *to = ato;
1430
1431 if (size <= 0 || from == to)
1432 return;
1433
1434 /* If the source and destination don't overlap, then bcopy can
1435 handle it. If they do overlap, but the destination is lower in
1436 memory than the source, we'll assume bcopy can handle that. */
1437 if (to < from || from + size <= to)
1438 bcopy (from, to, size);
1439
1440 /* Otherwise, we'll copy from the end. */
1441 else
1442 {
1443 register char *endf = from + size;
1444 register char *endt = to + size;
1445
1446 /* If TO - FROM is large, then we should break the copy into
1447 nonoverlapping chunks of TO - FROM bytes each. However, if
1448 TO - FROM is small, then the bcopy function call overhead
1449 makes this not worth it. The crossover point could be about
1450 anywhere. Since I don't think the obvious copy loop is too
1451 bad, I'm trying to err in its favor. */
1452 if (to - from < 64)
1453 {
1454 do
1455 *--endt = *--endf;
1456 while (endf != from);
1457 }
1458 else
1459 {
1460 for (;;)
1461 {
1462 endt -= (to - from);
1463 endf -= (to - from);
1464
1465 if (endt < to)
1466 break;
1467
1468 bcopy (endf, endt, to - from);
1469 }
1470
1471 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1472 little left over. The amount left over is
1473 (endt + (to - from)) - to, which is endt - from. */
1474 bcopy (from, to, endt - from);
1475 }
1476 }
1477}
1478#endif /* emacs */
1479
1480#ifndef memmove
0a27e8ed 1481extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
1482#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1483#endif
1484
1485#endif
1486
1487
1488#define min(A, B) ((A) < (B) ? (A) : (B))
1489
1490/* Debugging hook for realloc. */
0a27e8ed 1491__ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f
KH
1492
1493/* Resize the given region to the new size, returning a pointer
1494 to the (possibly moved) region. This is optimized for speed;
1495 some benchmarks seem to indicate that greater compactness is
1496 achieved by unconditionally allocating and copying to a
1497 new region. This module has incestuous knowledge of the
1498 internals of both free and malloc. */
1499__ptr_t
8d0d84d2 1500_realloc_internal_nolock (ptr, size)
74ad5c7f
KH
1501 __ptr_t ptr;
1502 __malloc_size_t size;
1503{
1504 __ptr_t result;
1505 int type;
1506 __malloc_size_t block, blocks, oldlimit;
1507
1508 if (size == 0)
1509 {
8d0d84d2
YM
1510 _free_internal_nolock (ptr);
1511 return _malloc_internal_nolock (0);
74ad5c7f
KH
1512 }
1513 else if (ptr == NULL)
8d0d84d2 1514 return _malloc_internal_nolock (size);
74ad5c7f
KH
1515
1516 block = BLOCK (ptr);
1517
5dcab13e 1518 PROTECT_MALLOC_STATE (0);
177c0ea7 1519
74ad5c7f
KH
1520 type = _heapinfo[block].busy.type;
1521 switch (type)
1522 {
1523 case 0:
1524 /* Maybe reallocate a large block to a small fragment. */
1525 if (size <= BLOCKSIZE / 2)
1526 {
8d0d84d2 1527 result = _malloc_internal_nolock (size);
74ad5c7f
KH
1528 if (result != NULL)
1529 {
1530 memcpy (result, ptr, size);
8d0d84d2 1531 _free_internal_nolock (ptr);
2f213514 1532 goto out;
74ad5c7f
KH
1533 }
1534 }
1535
1536 /* The new size is a large allocation as well;
1537 see if we can hold it in place. */
1538 blocks = BLOCKIFY (size);
1539 if (blocks < _heapinfo[block].busy.info.size)
1540 {
1541 /* The new size is smaller; return
1542 excess memory to the free list. */
1543 _heapinfo[block + blocks].busy.type = 0;
1544 _heapinfo[block + blocks].busy.info.size
1545 = _heapinfo[block].busy.info.size - blocks;
1546 _heapinfo[block].busy.info.size = blocks;
1547 /* We have just created a new chunk by splitting a chunk in two.
1548 Now we will free this chunk; increment the statistics counter
1549 so it doesn't become wrong when _free_internal decrements it. */
1550 ++_chunks_used;
8d0d84d2 1551 _free_internal_nolock (ADDRESS (block + blocks));
74ad5c7f
KH
1552 result = ptr;
1553 }
1554 else if (blocks == _heapinfo[block].busy.info.size)
1555 /* No size change necessary. */
1556 result = ptr;
1557 else
1558 {
1559 /* Won't fit, so allocate a new region that will.
1560 Free the old region first in case there is sufficient
1561 adjacent free space to grow without moving. */
1562 blocks = _heapinfo[block].busy.info.size;
1563 /* Prevent free from actually returning memory to the system. */
1564 oldlimit = _heaplimit;
1565 _heaplimit = 0;
8d0d84d2
YM
1566 _free_internal_nolock (ptr);
1567 result = _malloc_internal_nolock (size);
5dcab13e 1568 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1569 if (_heaplimit == 0)
1570 _heaplimit = oldlimit;
1571 if (result == NULL)
1572 {
1573 /* Now we're really in trouble. We have to unfree
1574 the thing we just freed. Unfortunately it might
1575 have been coalesced with its neighbors. */
1576 if (_heapindex == block)
8d0d84d2 1577 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
74ad5c7f
KH
1578 else
1579 {
1580 __ptr_t previous
8d0d84d2
YM
1581 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1582 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1583 _free_internal_nolock (previous);
74ad5c7f 1584 }
2f213514 1585 goto out;
74ad5c7f
KH
1586 }
1587 if (ptr != result)
1588 memmove (result, ptr, blocks * BLOCKSIZE);
1589 }
1590 break;
1591
1592 default:
1593 /* Old size is a fragment; type is logarithm
1594 to base two of the fragment size. */
1595 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1596 size <= (__malloc_size_t) (1 << type))
1597 /* The new size is the same kind of fragment. */
1598 result = ptr;
1599 else
1600 {
1601 /* The new size is different; allocate a new space,
1602 and copy the lesser of the new size and the old. */
8d0d84d2 1603 result = _malloc_internal_nolock (size);
74ad5c7f 1604 if (result == NULL)
2f213514 1605 goto out;
74ad5c7f 1606 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
8d0d84d2 1607 _free_internal_nolock (ptr);
74ad5c7f
KH
1608 }
1609 break;
1610 }
1611
5dcab13e 1612 PROTECT_MALLOC_STATE (1);
2f213514 1613 out:
8d0d84d2
YM
1614 return result;
1615}
1616
1617__ptr_t
1618_realloc_internal (ptr, size)
1619 __ptr_t ptr;
1620 __malloc_size_t size;
1621{
1622 __ptr_t result;
1623
1624 LOCK();
1625 result = _realloc_internal_nolock (ptr, size);
2f213514 1626 UNLOCK ();
8d0d84d2 1627
74ad5c7f
KH
1628 return result;
1629}
1630
1631__ptr_t
1632realloc (ptr, size)
1633 __ptr_t ptr;
1634 __malloc_size_t size;
1635{
8d0d84d2
YM
1636 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1637
74ad5c7f
KH
1638 if (!__malloc_initialized && !__malloc_initialize ())
1639 return NULL;
1640
8d0d84d2
YM
1641 hook = __realloc_hook;
1642 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
74ad5c7f
KH
1643}
1644/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1645
1646This library is free software; you can redistribute it and/or
423a1f3c 1647modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1648published by the Free Software Foundation; either version 2 of the
1649License, or (at your option) any later version.
1650
1651This library is distributed in the hope that it will be useful,
1652but WITHOUT ANY WARRANTY; without even the implied warranty of
1653MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1654General Public License for more details.
74ad5c7f 1655
423a1f3c
JB
1656You should have received a copy of the GNU General Public
1657License along with this library; see the file COPYING. If
3ef97fb6
LK
1658not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1659Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1660
1661 The author may be reached (Email) at the address mike@ai.mit.edu,
1662 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1663
1664#ifndef _MALLOC_INTERNAL
1665#define _MALLOC_INTERNAL
1666#include <malloc.h>
1667#endif
1668
1669/* Allocate an array of NMEMB elements each SIZE bytes long.
1670 The entire array is initialized to zeros. */
1671__ptr_t
1672calloc (nmemb, size)
1673 register __malloc_size_t nmemb;
1674 register __malloc_size_t size;
1675{
1676 register __ptr_t result = malloc (nmemb * size);
1677
1678 if (result != NULL)
1679 (void) memset (result, 0, nmemb * size);
1680
1681 return result;
1682}
1683/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1684This file is part of the GNU C Library.
1685
1686The GNU C Library is free software; you can redistribute it and/or modify
1687it under the terms of the GNU General Public License as published by
1688the Free Software Foundation; either version 2, or (at your option)
1689any later version.
1690
1691The GNU C Library is distributed in the hope that it will be useful,
1692but WITHOUT ANY WARRANTY; without even the implied warranty of
1693MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1694GNU General Public License for more details.
1695
1696You should have received a copy of the GNU General Public License
1697along with the GNU C Library; see the file COPYING. If not, write to
3ef97fb6
LK
1698the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1699MA 02110-1301, USA. */
74ad5c7f
KH
1700
1701#ifndef _MALLOC_INTERNAL
1702#define _MALLOC_INTERNAL
1703#include <malloc.h>
1704#endif
1705
65f451d0
DN
1706/* uClibc defines __GNU_LIBRARY__, but it is not completely
1707 compatible. */
1708#if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
74ad5c7f 1709#define __sbrk sbrk
65f451d0 1710#else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1711/* It is best not to declare this and cast its result on foreign operating
1712 systems with potentially hostile include files. */
1713
1714#include <stddef.h>
0a27e8ed 1715extern __ptr_t __sbrk PP ((ptrdiff_t increment));
65f451d0 1716#endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1717
1718#ifndef NULL
1719#define NULL 0
1720#endif
1721
1722/* Allocate INCREMENT more bytes of data space,
1723 and return the start of data space, or NULL on errors.
1724 If INCREMENT is negative, shrink data space. */
1725__ptr_t
1726__default_morecore (increment)
1727 __malloc_ptrdiff_t increment;
1728{
ef6d1039
SM
1729 __ptr_t result;
1730#if defined(CYGWIN)
1731 if (!bss_sbrk_did_unexec)
1732 {
1733 return bss_sbrk (increment);
1734 }
1735#endif
1736 result = (__ptr_t) __sbrk (increment);
74ad5c7f
KH
1737 if (result == (__ptr_t) -1)
1738 return NULL;
1739 return result;
1740}
1741/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1742
1743This library is free software; you can redistribute it and/or
423a1f3c 1744modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1745published by the Free Software Foundation; either version 2 of the
1746License, or (at your option) any later version.
1747
1748This library is distributed in the hope that it will be useful,
1749but WITHOUT ANY WARRANTY; without even the implied warranty of
1750MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1751General Public License for more details.
74ad5c7f 1752
423a1f3c
JB
1753You should have received a copy of the GNU General Public
1754License along with this library; see the file COPYING. If
3ef97fb6
LK
1755not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1756Fifth Floor, Boston, MA 02110-1301, USA. */
74ad5c7f
KH
1757
1758#ifndef _MALLOC_INTERNAL
1759#define _MALLOC_INTERNAL
1760#include <malloc.h>
1761#endif
1762
eec2d1de
EZ
1763__ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1764 __malloc_size_t __alignment));
74ad5c7f
KH
1765
1766__ptr_t
1767memalign (alignment, size)
1768 __malloc_size_t alignment;
1769 __malloc_size_t size;
1770{
1771 __ptr_t result;
1772 unsigned long int adj, lastadj;
8d0d84d2 1773 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
74ad5c7f 1774
8d0d84d2
YM
1775 if (hook)
1776 return (*hook) (alignment, size);
74ad5c7f
KH
1777
1778 /* Allocate a block with enough extra space to pad the block with up to
1779 (ALIGNMENT - 1) bytes if necessary. */
1780 result = malloc (size + alignment - 1);
1781 if (result == NULL)
1782 return NULL;
1783
1784 /* Figure out how much we will need to pad this particular block
1785 to achieve the required alignment. */
1786 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1787
1788 do
1789 {
1790 /* Reallocate the block with only as much excess as it needs. */
1791 free (result);
1792 result = malloc (adj + size);
1793 if (result == NULL) /* Impossible unless interrupted. */
1794 return NULL;
1795
1796 lastadj = adj;
1797 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1798 /* It's conceivable we might have been so unlucky as to get a
1799 different block with weaker alignment. If so, this block is too
1800 short to contain SIZE after alignment correction. So we must
1801 try again and get another block, slightly larger. */
1802 } while (adj > lastadj);
1803
1804 if (adj != 0)
1805 {
1806 /* Record this block in the list of aligned blocks, so that `free'
1807 can identify the pointer it is passed, which will be in the middle
1808 of an allocated block. */
1809
1810 struct alignlist *l;
8d0d84d2 1811 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1812 for (l = _aligned_blocks; l != NULL; l = l->next)
1813 if (l->aligned == NULL)
1814 /* This slot is free. Use it. */
1815 break;
1816 if (l == NULL)
1817 {
1818 l = (struct alignlist *) malloc (sizeof (struct alignlist));
8d0d84d2 1819 if (l != NULL)
74ad5c7f 1820 {
8d0d84d2
YM
1821 l->next = _aligned_blocks;
1822 _aligned_blocks = l;
74ad5c7f 1823 }
74ad5c7f 1824 }
8d0d84d2
YM
1825 if (l != NULL)
1826 {
1827 l->exact = result;
1828 result = l->aligned = (char *) result + alignment - adj;
1829 }
1830 UNLOCK_ALIGNED_BLOCKS ();
1831 if (l == NULL)
1832 {
1833 free (result);
1834 result = NULL;
1835 }
74ad5c7f
KH
1836 }
1837
1838 return result;
1839}
1840
72359c32
YM
1841#ifndef ENOMEM
1842#define ENOMEM 12
1843#endif
1844
1845#ifndef EINVAL
1846#define EINVAL 22
1847#endif
1848
1849int
1850posix_memalign (memptr, alignment, size)
1851 __ptr_t *memptr;
1852 __malloc_size_t alignment;
1853 __malloc_size_t size;
1854{
1855 __ptr_t mem;
1856
1857 if (alignment == 0
1858 || alignment % sizeof (__ptr_t) != 0
1859 || (alignment & (alignment - 1)) != 0)
1860 return EINVAL;
1861
1862 mem = memalign (alignment, size);
1863 if (mem == NULL)
1864 return ENOMEM;
1865
1866 *memptr = mem;
1867
1868 return 0;
1869}
1870
74ad5c7f
KH
1871/* Allocate memory on a page boundary.
1872 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1873
1874This library is free software; you can redistribute it and/or
423a1f3c 1875modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1876published by the Free Software Foundation; either version 2 of the
1877License, or (at your option) any later version.
1878
1879This library is distributed in the hope that it will be useful,
1880but WITHOUT ANY WARRANTY; without even the implied warranty of
1881MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1882General Public License for more details.
74ad5c7f 1883
423a1f3c
JB
1884You should have received a copy of the GNU General Public
1885License along with this library; see the file COPYING. If
3ef97fb6
LK
1886not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1887Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1888
1889 The author may be reached (Email) at the address mike@ai.mit.edu,
1890 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1891
1892#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1893
1894/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1895 on MSDOS, where it conflicts with a system header file. */
1896
1897#define ELIDE_VALLOC
1898
1899#endif
1900
1901#ifndef ELIDE_VALLOC
1902
1903#if defined (__GNU_LIBRARY__) || defined (_LIBC)
1904#include <stddef.h>
1905#include <sys/cdefs.h>
47582ab3
KH
1906#if defined (__GLIBC__) && __GLIBC__ >= 2
1907/* __getpagesize is already declared in <unistd.h> with return type int */
1908#else
0a27e8ed 1909extern size_t __getpagesize PP ((void));
47582ab3 1910#endif
74ad5c7f
KH
1911#else
1912#include "getpagesize.h"
1913#define __getpagesize() getpagesize()
1914#endif
1915
1916#ifndef _MALLOC_INTERNAL
1917#define _MALLOC_INTERNAL
1918#include <malloc.h>
1919#endif
1920
1921static __malloc_size_t pagesize;
1922
1923__ptr_t
1924valloc (size)
1925 __malloc_size_t size;
1926{
1927 if (pagesize == 0)
1928 pagesize = __getpagesize ();
1929
1930 return memalign (pagesize, size);
1931}
1932
1933#endif /* Not ELIDE_VALLOC. */
a3ba27da
GM
1934
1935#ifdef GC_MCHECK
1936
1937/* Standard debugging hooks for `malloc'.
1938 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1939 Written May 1989 by Mike Haertel.
1940
1941This library is free software; you can redistribute it and/or
423a1f3c 1942modify it under the terms of the GNU General Public License as
a3ba27da
GM
1943published by the Free Software Foundation; either version 2 of the
1944License, or (at your option) any later version.
1945
1946This library is distributed in the hope that it will be useful,
1947but WITHOUT ANY WARRANTY; without even the implied warranty of
1948MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1949General Public License for more details.
a3ba27da 1950
423a1f3c
JB
1951You should have received a copy of the GNU General Public
1952License along with this library; see the file COPYING. If
3ef97fb6
LK
1953not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1954Fifth Floor, Boston, MA 02110-1301, USA.
a3ba27da
GM
1955
1956 The author may be reached (Email) at the address mike@ai.mit.edu,
1957 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1958
1959#ifdef emacs
1960#include <stdio.h>
1961#else
1962#ifndef _MALLOC_INTERNAL
1963#define _MALLOC_INTERNAL
1964#include <malloc.h>
1965#include <stdio.h>
1966#endif
1967#endif
1968
1969/* Old hook values. */
f57e2426
J
1970static void (*old_free_hook) (__ptr_t ptr);
1971static __ptr_t (*old_malloc_hook) (__malloc_size_t size);
1972static __ptr_t (*old_realloc_hook) (__ptr_t ptr, __malloc_size_t size);
a3ba27da
GM
1973
1974/* Function to call when something awful happens. */
f57e2426 1975static void (*abortfunc) (enum mcheck_status);
a3ba27da
GM
1976
1977/* Arbitrary magical numbers. */
1978#define MAGICWORD 0xfedabeeb
1979#define MAGICFREE 0xd8675309
1980#define MAGICBYTE ((char) 0xd7)
1981#define MALLOCFLOOD ((char) 0x93)
1982#define FREEFLOOD ((char) 0x95)
1983
1984struct hdr
1985 {
1986 __malloc_size_t size; /* Exact size requested by user. */
1987 unsigned long int magic; /* Magic number to check header integrity. */
1988 };
1989
1990#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1991#define flood memset
1992#else
f57e2426 1993static void flood (__ptr_t, int, __malloc_size_t);
a3ba27da
GM
1994static void
1995flood (ptr, val, size)
1996 __ptr_t ptr;
1997 int val;
1998 __malloc_size_t size;
1999{
2000 char *cp = ptr;
2001 while (size--)
2002 *cp++ = val;
2003}
2004#endif
2005
f57e2426 2006static enum mcheck_status checkhdr (const struct hdr *);
a3ba27da
GM
2007static enum mcheck_status
2008checkhdr (hdr)
2009 const struct hdr *hdr;
2010{
2011 enum mcheck_status status;
2012 switch (hdr->magic)
2013 {
2014 default:
2015 status = MCHECK_HEAD;
2016 break;
2017 case MAGICFREE:
2018 status = MCHECK_FREE;
2019 break;
2020 case MAGICWORD:
2021 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
2022 status = MCHECK_TAIL;
2023 else
2024 status = MCHECK_OK;
2025 break;
2026 }
2027 if (status != MCHECK_OK)
2028 (*abortfunc) (status);
2029 return status;
2030}
2031
f57e2426 2032static void freehook (__ptr_t);
a3ba27da
GM
2033static void
2034freehook (ptr)
2035 __ptr_t ptr;
2036{
2037 struct hdr *hdr;
177c0ea7 2038
a3ba27da
GM
2039 if (ptr)
2040 {
2041 hdr = ((struct hdr *) ptr) - 1;
2042 checkhdr (hdr);
2043 hdr->magic = MAGICFREE;
2044 flood (ptr, FREEFLOOD, hdr->size);
2045 }
2046 else
2047 hdr = NULL;
177c0ea7 2048
a3ba27da
GM
2049 __free_hook = old_free_hook;
2050 free (hdr);
2051 __free_hook = freehook;
2052}
2053
f57e2426 2054static __ptr_t mallochook (__malloc_size_t);
a3ba27da
GM
2055static __ptr_t
2056mallochook (size)
2057 __malloc_size_t size;
2058{
2059 struct hdr *hdr;
2060
2061 __malloc_hook = old_malloc_hook;
2062 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
2063 __malloc_hook = mallochook;
2064 if (hdr == NULL)
2065 return NULL;
2066
2067 hdr->size = size;
2068 hdr->magic = MAGICWORD;
2069 ((char *) &hdr[1])[size] = MAGICBYTE;
2070 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
2071 return (__ptr_t) (hdr + 1);
2072}
2073
f57e2426 2074static __ptr_t reallochook (__ptr_t, __malloc_size_t);
a3ba27da
GM
2075static __ptr_t
2076reallochook (ptr, size)
2077 __ptr_t ptr;
2078 __malloc_size_t size;
2079{
2080 struct hdr *hdr = NULL;
2081 __malloc_size_t osize = 0;
177c0ea7 2082
a3ba27da
GM
2083 if (ptr)
2084 {
2085 hdr = ((struct hdr *) ptr) - 1;
2086 osize = hdr->size;
2087
2088 checkhdr (hdr);
2089 if (size < osize)
2090 flood ((char *) ptr + size, FREEFLOOD, osize - size);
2091 }
177c0ea7 2092
a3ba27da
GM
2093 __free_hook = old_free_hook;
2094 __malloc_hook = old_malloc_hook;
2095 __realloc_hook = old_realloc_hook;
2096 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
2097 __free_hook = freehook;
2098 __malloc_hook = mallochook;
2099 __realloc_hook = reallochook;
2100 if (hdr == NULL)
2101 return NULL;
2102
2103 hdr->size = size;
2104 hdr->magic = MAGICWORD;
2105 ((char *) &hdr[1])[size] = MAGICBYTE;
2106 if (size > osize)
2107 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2108 return (__ptr_t) (hdr + 1);
2109}
2110
2111static void
2112mabort (status)
2113 enum mcheck_status status;
2114{
2115 const char *msg;
2116 switch (status)
2117 {
2118 case MCHECK_OK:
2119 msg = "memory is consistent, library is buggy";
2120 break;
2121 case MCHECK_HEAD:
2122 msg = "memory clobbered before allocated block";
2123 break;
2124 case MCHECK_TAIL:
2125 msg = "memory clobbered past end of allocated block";
2126 break;
2127 case MCHECK_FREE:
2128 msg = "block freed twice";
2129 break;
2130 default:
2131 msg = "bogus mcheck_status, library is buggy";
2132 break;
2133 }
2134#ifdef __GNU_LIBRARY__
2135 __libc_fatal (msg);
2136#else
2137 fprintf (stderr, "mcheck: %s\n", msg);
2138 fflush (stderr);
2139 abort ();
2140#endif
2141}
2142
2143static int mcheck_used = 0;
2144
2145int
2146mcheck (func)
f57e2426 2147 void (*func) (enum mcheck_status);
a3ba27da
GM
2148{
2149 abortfunc = (func != NULL) ? func : &mabort;
2150
2151 /* These hooks may not be safely inserted if malloc is already in use. */
2152 if (!__malloc_initialized && !mcheck_used)
2153 {
2154 old_free_hook = __free_hook;
2155 __free_hook = freehook;
2156 old_malloc_hook = __malloc_hook;
2157 __malloc_hook = mallochook;
2158 old_realloc_hook = __realloc_hook;
2159 __realloc_hook = reallochook;
2160 mcheck_used = 1;
2161 }
2162
2163 return mcheck_used ? 0 : -1;
2164}
2165
2166enum mcheck_status
2167mprobe (__ptr_t ptr)
2168{
2169 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2170}
2171
2172#endif /* GC_MCHECK */
ab5796a9 2173