Remove support for DJGPP v1.x (bug#5813).
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f
KH
1/* This file is no longer automatically generated from libc. */
2
3#define _MALLOC_INTERNAL
4
5/* The malloc headers and source files from the C library follow here. */
6
7/* Declarations for `malloc' and friends.
0b5538bd 8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
4e6835db 9 2005, 2006, 2007 Free Software Foundation, Inc.
74ad5c7f
KH
10 Written May 1989 by Mike Haertel.
11
12This library is free software; you can redistribute it and/or
423a1f3c 13modify it under the terms of the GNU General Public License as
74ad5c7f
KH
14published by the Free Software Foundation; either version 2 of the
15License, or (at your option) any later version.
16
17This library is distributed in the hope that it will be useful,
18but WITHOUT ANY WARRANTY; without even the implied warranty of
19MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 20General Public License for more details.
74ad5c7f 21
423a1f3c
JB
22You should have received a copy of the GNU General Public
23License along with this library; see the file COPYING. If
3ef97fb6
LK
24not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
26
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
29
30#ifndef _MALLOC_H
31
32#define _MALLOC_H 1
33
34#ifdef _MALLOC_INTERNAL
35
36#ifdef HAVE_CONFIG_H
37#include <config.h>
38#endif
39
8d0d84d2
YM
40#ifdef HAVE_GTK_AND_PTHREAD
41#define USE_PTHREAD
42#endif
43
b2e92d3e
RS
44#if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
45 || defined STDC_HEADERS || defined PROTOTYPES) \
46 && ! defined (BROKEN_PROTOTYPES))
0a27e8ed
RS
47#undef PP
48#define PP(args) args
74ad5c7f
KH
49#undef __ptr_t
50#define __ptr_t void *
51#else /* Not C++ or ANSI C. */
0a27e8ed
RS
52#undef PP
53#define PP(args) ()
74ad5c7f
KH
54#undef __ptr_t
55#define __ptr_t char *
56#endif /* C++ or ANSI C. */
57
58#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
59#include <string.h>
60#else
61#ifndef memset
62#define memset(s, zero, n) bzero ((s), (n))
63#endif
64#ifndef memcpy
65#define memcpy(d, s, n) bcopy ((s), (d), (n))
66#endif
67#endif
68
ca9c0567 69#ifdef HAVE_LIMITS_H
74ad5c7f 70#include <limits.h>
ca9c0567 71#endif
74ad5c7f
KH
72#ifndef CHAR_BIT
73#define CHAR_BIT 8
74#endif
74ad5c7f
KH
75
76#ifdef HAVE_UNISTD_H
77#include <unistd.h>
78#endif
79
2f213514
YM
80#ifdef USE_PTHREAD
81#include <pthread.h>
82#endif
83
74ad5c7f
KH
84#endif /* _MALLOC_INTERNAL. */
85
86
87#ifdef __cplusplus
88extern "C"
89{
90#endif
91
ca9c0567 92#ifdef STDC_HEADERS
74ad5c7f
KH
93#include <stddef.h>
94#define __malloc_size_t size_t
95#define __malloc_ptrdiff_t ptrdiff_t
96#else
eec2d1de
EZ
97#ifdef __GNUC__
98#include <stddef.h>
99#ifdef __SIZE_TYPE__
100#define __malloc_size_t __SIZE_TYPE__
101#endif
102#endif
103#ifndef __malloc_size_t
74ad5c7f 104#define __malloc_size_t unsigned int
eec2d1de 105#endif
74ad5c7f
KH
106#define __malloc_ptrdiff_t int
107#endif
108
109#ifndef NULL
110#define NULL 0
111#endif
112
113
114/* Allocate SIZE bytes of memory. */
0a27e8ed 115extern __ptr_t malloc PP ((__malloc_size_t __size));
74ad5c7f
KH
116/* Re-allocate the previously allocated block
117 in __ptr_t, making the new block SIZE bytes long. */
0a27e8ed 118extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f 119/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
0a27e8ed 120extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
74ad5c7f 121/* Free a block allocated by `malloc', `realloc' or `calloc'. */
4624371d 122extern void free PP ((__ptr_t __ptr));
74ad5c7f
KH
123
124/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
ed68db4d 125#if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
0a27e8ed
RS
126extern __ptr_t memalign PP ((__malloc_size_t __alignment,
127 __malloc_size_t __size));
72359c32
YM
128extern int posix_memalign PP ((__ptr_t *, __malloc_size_t,
129 __malloc_size_t size));
74ad5c7f
KH
130#endif
131
132/* Allocate SIZE bytes on a page boundary. */
133#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
0a27e8ed 134extern __ptr_t valloc PP ((__malloc_size_t __size));
74ad5c7f
KH
135#endif
136
3ceeb306
YM
137#ifdef USE_PTHREAD
138/* Set up mutexes and make malloc etc. thread-safe. */
139extern void malloc_enable_thread PP ((void));
140#endif
74ad5c7f
KH
141
142#ifdef _MALLOC_INTERNAL
143
144/* The allocator divides the heap into blocks of fixed size; large
145 requests receive one or more whole blocks, and small requests
146 receive a fragment of a block. Fragment sizes are powers of two,
147 and all fragments of a block are the same size. When all the
148 fragments in a block have been freed, the block itself is freed. */
149#define INT_BIT (CHAR_BIT * sizeof(int))
150#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
151#define BLOCKSIZE (1 << BLOCKLOG)
152#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
153
154/* Determine the amount of memory spanned by the initial heap table
155 (not an absolute limit). */
156#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
157
158/* Number of contiguous free blocks allowed to build up at the end of
159 memory before they will be returned to the system. */
160#define FINAL_FREE_BLOCKS 8
161
162/* Data structure giving per-block information. */
163typedef union
164 {
165 /* Heap information for a busy block. */
166 struct
167 {
168 /* Zero for a large (multiblock) object, or positive giving the
169 logarithm to the base two of the fragment size. */
170 int type;
171 union
172 {
173 struct
174 {
175 __malloc_size_t nfree; /* Free frags in a fragmented block. */
176 __malloc_size_t first; /* First free fragment of the block. */
177 } frag;
178 /* For a large object, in its first block, this has the number
179 of blocks in the object. In the other blocks, this has a
180 negative number which says how far back the first block is. */
181 __malloc_ptrdiff_t size;
182 } info;
183 } busy;
184 /* Heap information for a free block
185 (that may be the first of a free cluster). */
186 struct
187 {
188 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
189 __malloc_size_t next; /* Index of next free cluster. */
190 __malloc_size_t prev; /* Index of previous free cluster. */
191 } free;
192 } malloc_info;
193
194/* Pointer to first block of the heap. */
195extern char *_heapbase;
196
197/* Table indexed by block number giving per-block information. */
198extern malloc_info *_heapinfo;
199
200/* Address to block number and vice versa. */
201#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
202#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
203
204/* Current search index for the heap table. */
205extern __malloc_size_t _heapindex;
206
207/* Limit of valid info table indices. */
208extern __malloc_size_t _heaplimit;
209
210/* Doubly linked lists of free fragments. */
211struct list
212 {
213 struct list *next;
214 struct list *prev;
215 };
216
217/* Free list headers for each fragment size. */
218extern struct list _fraghead[];
219
220/* List of blocks allocated with `memalign' (or `valloc'). */
221struct alignlist
222 {
223 struct alignlist *next;
224 __ptr_t aligned; /* The address that memaligned returned. */
225 __ptr_t exact; /* The address that malloc returned. */
226 };
227extern struct alignlist *_aligned_blocks;
228
229/* Instrumentation. */
230extern __malloc_size_t _chunks_used;
231extern __malloc_size_t _bytes_used;
232extern __malloc_size_t _chunks_free;
233extern __malloc_size_t _bytes_free;
234
235/* Internal versions of `malloc', `realloc', and `free'
236 used when these functions need to call each other.
237 They are the same but don't call the hooks. */
0a27e8ed
RS
238extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
239extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
240extern void _free_internal PP ((__ptr_t __ptr));
8d0d84d2
YM
241extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
242extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
243extern void _free_internal_nolock PP ((__ptr_t __ptr));
74ad5c7f 244
2f213514 245#ifdef USE_PTHREAD
8d0d84d2 246extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
3ceeb306
YM
247extern int _malloc_thread_enabled_p;
248#define LOCK() \
249 do { \
250 if (_malloc_thread_enabled_p) \
251 pthread_mutex_lock (&_malloc_mutex); \
252 } while (0)
253#define UNLOCK() \
254 do { \
255 if (_malloc_thread_enabled_p) \
256 pthread_mutex_unlock (&_malloc_mutex); \
257 } while (0)
258#define LOCK_ALIGNED_BLOCKS() \
259 do { \
260 if (_malloc_thread_enabled_p) \
261 pthread_mutex_lock (&_aligned_blocks_mutex); \
262 } while (0)
263#define UNLOCK_ALIGNED_BLOCKS() \
264 do { \
265 if (_malloc_thread_enabled_p) \
266 pthread_mutex_unlock (&_aligned_blocks_mutex); \
267 } while (0)
2f213514
YM
268#else
269#define LOCK()
270#define UNLOCK()
8d0d84d2
YM
271#define LOCK_ALIGNED_BLOCKS()
272#define UNLOCK_ALIGNED_BLOCKS()
2f213514
YM
273#endif
274
74ad5c7f
KH
275#endif /* _MALLOC_INTERNAL. */
276
277/* Given an address in the middle of a malloc'd object,
278 return the address of the beginning of the object. */
0a27e8ed 279extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
74ad5c7f
KH
280
281/* Underlying allocation function; successive calls should
282 return contiguous pieces of memory. */
0a27e8ed 283extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
284
285/* Default value of `__morecore'. */
0a27e8ed 286extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
287
288/* If not NULL, this function is called after each time
289 `__morecore' is called to increase the data size. */
0a27e8ed 290extern void (*__after_morecore_hook) PP ((void));
74ad5c7f
KH
291
292/* Number of extra blocks to get each time we ask for more core.
293 This reduces the frequency of calling `(*__morecore)'. */
294extern __malloc_size_t __malloc_extra_blocks;
295
296/* Nonzero if `malloc' has been called and done its initialization. */
297extern int __malloc_initialized;
298/* Function called to initialize malloc data structures. */
0a27e8ed 299extern int __malloc_initialize PP ((void));
74ad5c7f
KH
300
301/* Hooks for debugging versions. */
0a27e8ed
RS
302extern void (*__malloc_initialize_hook) PP ((void));
303extern void (*__free_hook) PP ((__ptr_t __ptr));
304extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
305extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
306extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
307 __malloc_size_t __alignment));
74ad5c7f
KH
308
309/* Return values for `mprobe': these are the kinds of inconsistencies that
310 `mcheck' enables detection of. */
311enum mcheck_status
312 {
313 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
314 MCHECK_OK, /* Block is fine. */
315 MCHECK_FREE, /* Block freed twice. */
316 MCHECK_HEAD, /* Memory before the block was clobbered. */
317 MCHECK_TAIL /* Memory after the block was clobbered. */
318 };
319
320/* Activate a standard collection of debugging hooks. This must be called
321 before `malloc' is ever called. ABORTFUNC is called with an error code
322 (see enum above) when an inconsistency is detected. If ABORTFUNC is
323 null, the standard function prints on stderr and then calls `abort'. */
0a27e8ed 324extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
74ad5c7f
KH
325
326/* Check for aberrations in a particular malloc'd block. You must have
327 called `mcheck' already. These are the same checks that `mcheck' does
328 when you free or reallocate a block. */
0a27e8ed 329extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
74ad5c7f
KH
330
331/* Activate a standard collection of tracing hooks. */
0a27e8ed
RS
332extern void mtrace PP ((void));
333extern void muntrace PP ((void));
74ad5c7f
KH
334
335/* Statistics available to the user. */
336struct mstats
337 {
338 __malloc_size_t bytes_total; /* Total size of the heap. */
339 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
340 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
341 __malloc_size_t chunks_free; /* Chunks in the free list. */
342 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
343 };
344
345/* Pick up the current statistics. */
0a27e8ed 346extern struct mstats mstats PP ((void));
74ad5c7f
KH
347
348/* Call WARNFUN with a warning message when memory usage is high. */
0a27e8ed
RS
349extern void memory_warnings PP ((__ptr_t __start,
350 void (*__warnfun) PP ((const char *))));
74ad5c7f
KH
351
352
353/* Relocating allocator. */
354
355/* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
0a27e8ed 356extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
357
358/* Free the storage allocated in HANDLEPTR. */
0a27e8ed 359extern void r_alloc_free PP ((__ptr_t *__handleptr));
74ad5c7f
KH
360
361/* Adjust the block at HANDLEPTR to be SIZE bytes long. */
0a27e8ed 362extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
363
364
365#ifdef __cplusplus
366}
367#endif
368
369#endif /* malloc.h */
370/* Memory allocator `malloc'.
371 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
372 Written May 1989 by Mike Haertel.
373
374This library is free software; you can redistribute it and/or
423a1f3c 375modify it under the terms of the GNU General Public License as
74ad5c7f
KH
376published by the Free Software Foundation; either version 2 of the
377License, or (at your option) any later version.
378
379This library is distributed in the hope that it will be useful,
380but WITHOUT ANY WARRANTY; without even the implied warranty of
381MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 382General Public License for more details.
74ad5c7f 383
423a1f3c
JB
384You should have received a copy of the GNU General Public
385License along with this library; see the file COPYING. If
3ef97fb6
LK
386not, write to the Free Software Foundation, Inc., 51 Franklin Street,
387Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
388
389 The author may be reached (Email) at the address mike@ai.mit.edu,
390 or (US mail) as Mike Haertel c/o Free Software Foundation. */
391
392#ifndef _MALLOC_INTERNAL
393#define _MALLOC_INTERNAL
394#include <malloc.h>
395#endif
396#include <errno.h>
397
398/* How to really get more memory. */
ef6d1039
SM
399#if defined(CYGWIN)
400extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
401extern int bss_sbrk_did_unexec;
402#endif
3cacba85 403__ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
74ad5c7f
KH
404
405/* Debugging hook for `malloc'. */
0a27e8ed 406__ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
74ad5c7f
KH
407
408/* Pointer to the base of the first block. */
409char *_heapbase;
410
411/* Block information table. Allocated with align/__free (not malloc/free). */
412malloc_info *_heapinfo;
413
414/* Number of info entries. */
415static __malloc_size_t heapsize;
416
417/* Search index in the info table. */
418__malloc_size_t _heapindex;
419
420/* Limit of valid info table indices. */
421__malloc_size_t _heaplimit;
422
423/* Free lists for each fragment size. */
424struct list _fraghead[BLOCKLOG];
425
426/* Instrumentation. */
427__malloc_size_t _chunks_used;
428__malloc_size_t _bytes_used;
429__malloc_size_t _chunks_free;
430__malloc_size_t _bytes_free;
431
432/* Are you experienced? */
433int __malloc_initialized;
434
435__malloc_size_t __malloc_extra_blocks;
436
0a27e8ed
RS
437void (*__malloc_initialize_hook) PP ((void));
438void (*__after_morecore_hook) PP ((void));
74ad5c7f 439
5dcab13e
GM
440#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
441
442/* Some code for hunting a bug writing into _heapinfo.
443
444 Call this macro with argument PROT non-zero to protect internal
445 malloc state against writing to it, call it with a zero argument to
446 make it readable and writable.
447
448 Note that this only works if BLOCKSIZE == page size, which is
449 the case on the i386. */
450
451#include <sys/types.h>
452#include <sys/mman.h>
453
454static int state_protected_p;
455static __malloc_size_t last_state_size;
456static malloc_info *last_heapinfo;
457
458void
459protect_malloc_state (protect_p)
460 int protect_p;
461{
462 /* If _heapinfo has been relocated, make sure its old location
463 isn't left read-only; it will be reused by malloc. */
464 if (_heapinfo != last_heapinfo
465 && last_heapinfo
466 && state_protected_p)
467 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
468
469 last_state_size = _heaplimit * sizeof *_heapinfo;
470 last_heapinfo = _heapinfo;
177c0ea7 471
5dcab13e
GM
472 if (protect_p != state_protected_p)
473 {
474 state_protected_p = protect_p;
475 if (mprotect (_heapinfo, last_state_size,
476 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
477 abort ();
478 }
479}
480
481#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
482
483#else
484#define PROTECT_MALLOC_STATE(PROT) /* empty */
485#endif
486
74ad5c7f
KH
487
488/* Aligned allocation. */
0a27e8ed 489static __ptr_t align PP ((__malloc_size_t));
74ad5c7f
KH
490static __ptr_t
491align (size)
492 __malloc_size_t size;
493{
494 __ptr_t result;
495 unsigned long int adj;
496
ceeb3d7d
EZ
497 /* align accepts an unsigned argument, but __morecore accepts a
498 signed one. This could lead to trouble if SIZE overflows a
499 signed int type accepted by __morecore. We just punt in that
500 case, since they are requesting a ludicrous amount anyway. */
501 if ((__malloc_ptrdiff_t)size < 0)
502 result = 0;
503 else
504 result = (*__morecore) (size);
74ad5c7f
KH
505 adj = (unsigned long int) ((unsigned long int) ((char *) result -
506 (char *) NULL)) % BLOCKSIZE;
507 if (adj != 0)
508 {
509 __ptr_t new;
510 adj = BLOCKSIZE - adj;
511 new = (*__morecore) (adj);
512 result = (char *) result + adj;
513 }
514
515 if (__after_morecore_hook)
516 (*__after_morecore_hook) ();
517
518 return result;
519}
520
521/* Get SIZE bytes, if we can get them starting at END.
522 Return the address of the space we got.
523 If we cannot get space at END, fail and return 0. */
0a27e8ed 524static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
74ad5c7f
KH
525static __ptr_t
526get_contiguous_space (size, position)
527 __malloc_ptrdiff_t size;
528 __ptr_t position;
529{
530 __ptr_t before;
531 __ptr_t after;
532
533 before = (*__morecore) (0);
534 /* If we can tell in advance that the break is at the wrong place,
535 fail now. */
536 if (before != position)
537 return 0;
538
539 /* Allocate SIZE bytes and get the address of them. */
540 after = (*__morecore) (size);
541 if (!after)
542 return 0;
543
544 /* It was not contiguous--reject it. */
545 if (after != position)
546 {
547 (*__morecore) (- size);
548 return 0;
549 }
550
551 return after;
552}
553
554
555/* This is called when `_heapinfo' and `heapsize' have just
556 been set to describe a new info table. Set up the table
557 to describe itself and account for it in the statistics. */
0a27e8ed 558static void register_heapinfo PP ((void));
74ad5c7f
KH
559#ifdef __GNUC__
560__inline__
561#endif
562static void
563register_heapinfo ()
564{
565 __malloc_size_t block, blocks;
566
567 block = BLOCK (_heapinfo);
568 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
569
570 /* Account for the _heapinfo block itself in the statistics. */
571 _bytes_used += blocks * BLOCKSIZE;
572 ++_chunks_used;
573
574 /* Describe the heapinfo block itself in the heapinfo. */
575 _heapinfo[block].busy.type = 0;
576 _heapinfo[block].busy.info.size = blocks;
577 /* Leave back-pointers for malloc_find_address. */
578 while (--blocks > 0)
579 _heapinfo[block + blocks].busy.info.size = -blocks;
580}
581
2f213514 582#ifdef USE_PTHREAD
8d0d84d2
YM
583pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
584pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
3ceeb306
YM
585int _malloc_thread_enabled_p;
586
587static void
588malloc_atfork_handler_prepare ()
589{
590 LOCK ();
591 LOCK_ALIGNED_BLOCKS ();
592}
593
594static void
595malloc_atfork_handler_parent ()
596{
597 UNLOCK_ALIGNED_BLOCKS ();
598 UNLOCK ();
599}
600
601static void
602malloc_atfork_handler_child ()
603{
604 UNLOCK_ALIGNED_BLOCKS ();
605 UNLOCK ();
606}
607
608/* Set up mutexes and make malloc etc. thread-safe. */
609void
610malloc_enable_thread ()
611{
612 if (_malloc_thread_enabled_p)
613 return;
614
615 /* Some pthread implementations call malloc for statically
616 initialized mutexes when they are used first. To avoid such a
617 situation, we initialize mutexes here while their use is
618 disabled in malloc etc. */
619 pthread_mutex_init (&_malloc_mutex, NULL);
620 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
621 pthread_atfork (malloc_atfork_handler_prepare,
622 malloc_atfork_handler_parent,
623 malloc_atfork_handler_child);
624 _malloc_thread_enabled_p = 1;
625}
2f213514 626#endif
74ad5c7f 627
2f213514
YM
628static void
629malloc_initialize_1 ()
630{
a3ba27da
GM
631#ifdef GC_MCHECK
632 mcheck (NULL);
633#endif
634
74ad5c7f
KH
635 if (__malloc_initialize_hook)
636 (*__malloc_initialize_hook) ();
637
638 heapsize = HEAP / BLOCKSIZE;
639 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
640 if (_heapinfo == NULL)
2f213514 641 return;
74ad5c7f
KH
642 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
643 _heapinfo[0].free.size = 0;
644 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
645 _heapindex = 0;
646 _heapbase = (char *) _heapinfo;
647 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
648
649 register_heapinfo ();
650
651 __malloc_initialized = 1;
5dcab13e 652 PROTECT_MALLOC_STATE (1);
2f213514
YM
653 return;
654}
655
784c1472
JD
656/* Set everything up and remember that we have.
657 main will call malloc which calls this function. That is before any threads
658 or signal handlers has been set up, so we don't need thread protection. */
2f213514
YM
659int
660__malloc_initialize ()
661{
2f213514
YM
662 if (__malloc_initialized)
663 return 0;
664
665 malloc_initialize_1 ();
2f213514
YM
666
667 return __malloc_initialized;
74ad5c7f
KH
668}
669
670static int morecore_recursing;
671
672/* Get neatly aligned memory, initializing or
673 growing the heap info table as necessary. */
8d0d84d2 674static __ptr_t morecore_nolock PP ((__malloc_size_t));
74ad5c7f 675static __ptr_t
8d0d84d2 676morecore_nolock (size)
74ad5c7f
KH
677 __malloc_size_t size;
678{
679 __ptr_t result;
680 malloc_info *newinfo, *oldinfo;
681 __malloc_size_t newsize;
682
683 if (morecore_recursing)
684 /* Avoid recursion. The caller will know how to handle a null return. */
685 return NULL;
686
687 result = align (size);
688 if (result == NULL)
689 return NULL;
690
5dcab13e
GM
691 PROTECT_MALLOC_STATE (0);
692
74ad5c7f
KH
693 /* Check if we need to grow the info table. */
694 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
695 {
696 /* Calculate the new _heapinfo table size. We do not account for the
697 added blocks in the table itself, as we hope to place them in
698 existing free space, which is already covered by part of the
699 existing table. */
700 newsize = heapsize;
701 do
702 newsize *= 2;
703 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
704
705 /* We must not reuse existing core for the new info table when called
706 from realloc in the case of growing a large block, because the
707 block being grown is momentarily marked as free. In this case
708 _heaplimit is zero so we know not to reuse space for internal
709 allocation. */
710 if (_heaplimit != 0)
711 {
712 /* First try to allocate the new info table in core we already
713 have, in the usual way using realloc. If realloc cannot
714 extend it in place or relocate it to existing sufficient core,
715 we will get called again, and the code above will notice the
716 `morecore_recursing' flag and return null. */
717 int save = errno; /* Don't want to clobber errno with ENOMEM. */
718 morecore_recursing = 1;
8d0d84d2 719 newinfo = (malloc_info *) _realloc_internal_nolock
74ad5c7f
KH
720 (_heapinfo, newsize * sizeof (malloc_info));
721 morecore_recursing = 0;
722 if (newinfo == NULL)
723 errno = save;
724 else
725 {
726 /* We found some space in core, and realloc has put the old
727 table's blocks on the free list. Now zero the new part
728 of the table and install the new table location. */
729 memset (&newinfo[heapsize], 0,
730 (newsize - heapsize) * sizeof (malloc_info));
731 _heapinfo = newinfo;
732 heapsize = newsize;
733 goto got_heap;
734 }
735 }
736
737 /* Allocate new space for the malloc info table. */
738 while (1)
739 {
740 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
741
742 /* Did it fail? */
743 if (newinfo == NULL)
744 {
745 (*__morecore) (-size);
746 return NULL;
747 }
748
749 /* Is it big enough to record status for its own space?
750 If so, we win. */
751 if ((__malloc_size_t) BLOCK ((char *) newinfo
752 + newsize * sizeof (malloc_info))
753 < newsize)
754 break;
755
756 /* Must try again. First give back most of what we just got. */
757 (*__morecore) (- newsize * sizeof (malloc_info));
758 newsize *= 2;
759 }
760
761 /* Copy the old table to the beginning of the new,
762 and zero the rest of the new table. */
763 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
764 memset (&newinfo[heapsize], 0,
765 (newsize - heapsize) * sizeof (malloc_info));
766 oldinfo = _heapinfo;
767 _heapinfo = newinfo;
768 heapsize = newsize;
769
770 register_heapinfo ();
771
772 /* Reset _heaplimit so _free_internal never decides
773 it can relocate or resize the info table. */
774 _heaplimit = 0;
8d0d84d2 775 _free_internal_nolock (oldinfo);
5dcab13e 776 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
777
778 /* The new heap limit includes the new table just allocated. */
779 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
780 return result;
781 }
782
783 got_heap:
784 _heaplimit = BLOCK ((char *) result + size);
785 return result;
786}
787
788/* Allocate memory from the heap. */
789__ptr_t
8d0d84d2 790_malloc_internal_nolock (size)
74ad5c7f
KH
791 __malloc_size_t size;
792{
793 __ptr_t result;
794 __malloc_size_t block, blocks, lastblocks, start;
795 register __malloc_size_t i;
796 struct list *next;
797
798 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
799 valid address you can realloc and free (though not dereference).
800
801 It turns out that some extant code (sunrpc, at least Ultrix's version)
802 expects `malloc (0)' to return non-NULL and breaks otherwise.
803 Be compatible. */
804
805#if 0
806 if (size == 0)
807 return NULL;
808#endif
809
5dcab13e
GM
810 PROTECT_MALLOC_STATE (0);
811
74ad5c7f
KH
812 if (size < sizeof (struct list))
813 size = sizeof (struct list);
814
74ad5c7f
KH
815 /* Determine the allocation policy based on the request size. */
816 if (size <= BLOCKSIZE / 2)
817 {
818 /* Small allocation to receive a fragment of a block.
819 Determine the logarithm to base two of the fragment size. */
820 register __malloc_size_t log = 1;
821 --size;
822 while ((size /= 2) != 0)
823 ++log;
824
825 /* Look in the fragment lists for a
826 free fragment of the desired size. */
827 next = _fraghead[log].next;
828 if (next != NULL)
829 {
830 /* There are free fragments of this size.
831 Pop a fragment out of the fragment list and return it.
832 Update the block's nfree and first counters. */
833 result = (__ptr_t) next;
834 next->prev->next = next->next;
835 if (next->next != NULL)
836 next->next->prev = next->prev;
837 block = BLOCK (result);
838 if (--_heapinfo[block].busy.info.frag.nfree != 0)
839 _heapinfo[block].busy.info.frag.first = (unsigned long int)
840 ((unsigned long int) ((char *) next->next - (char *) NULL)
841 % BLOCKSIZE) >> log;
842
843 /* Update the statistics. */
844 ++_chunks_used;
845 _bytes_used += 1 << log;
846 --_chunks_free;
847 _bytes_free -= 1 << log;
848 }
849 else
850 {
851 /* No free fragments of the desired size, so get a new block
852 and break it into fragments, returning the first. */
8094989b 853#ifdef GC_MALLOC_CHECK
8d0d84d2 854 result = _malloc_internal_nolock (BLOCKSIZE);
5dcab13e 855 PROTECT_MALLOC_STATE (0);
8d0d84d2
YM
856#elif defined (USE_PTHREAD)
857 result = _malloc_internal_nolock (BLOCKSIZE);
8094989b 858#else
74ad5c7f 859 result = malloc (BLOCKSIZE);
8094989b 860#endif
74ad5c7f 861 if (result == NULL)
5dcab13e
GM
862 {
863 PROTECT_MALLOC_STATE (1);
2f213514 864 goto out;
5dcab13e 865 }
74ad5c7f
KH
866
867 /* Link all fragments but the first into the free list. */
868 next = (struct list *) ((char *) result + (1 << log));
869 next->next = NULL;
870 next->prev = &_fraghead[log];
871 _fraghead[log].next = next;
872
873 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
874 {
875 next = (struct list *) ((char *) result + (i << log));
876 next->next = _fraghead[log].next;
877 next->prev = &_fraghead[log];
878 next->prev->next = next;
879 next->next->prev = next;
880 }
881
882 /* Initialize the nfree and first counters for this block. */
883 block = BLOCK (result);
884 _heapinfo[block].busy.type = log;
885 _heapinfo[block].busy.info.frag.nfree = i - 1;
886 _heapinfo[block].busy.info.frag.first = i - 1;
887
888 _chunks_free += (BLOCKSIZE >> log) - 1;
889 _bytes_free += BLOCKSIZE - (1 << log);
890 _bytes_used -= BLOCKSIZE - (1 << log);
891 }
892 }
893 else
894 {
895 /* Large allocation to receive one or more blocks.
896 Search the free list in a circle starting at the last place visited.
897 If we loop completely around without finding a large enough
898 space we will have to get more memory from the system. */
899 blocks = BLOCKIFY (size);
900 start = block = _heapindex;
901 while (_heapinfo[block].free.size < blocks)
902 {
903 block = _heapinfo[block].free.next;
904 if (block == start)
905 {
906 /* Need to get more from the system. Get a little extra. */
907 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
908 block = _heapinfo[0].free.prev;
909 lastblocks = _heapinfo[block].free.size;
910 /* Check to see if the new core will be contiguous with the
911 final free block; if so we don't need to get as much. */
912 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
913 /* We can't do this if we will have to make the heap info
cc4a96c6 914 table bigger to accommodate the new space. */
74ad5c7f
KH
915 block + wantblocks <= heapsize &&
916 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
917 ADDRESS (block + lastblocks)))
918 {
919 /* We got it contiguously. Which block we are extending
920 (the `final free block' referred to above) might have
921 changed, if it got combined with a freed info table. */
922 block = _heapinfo[0].free.prev;
923 _heapinfo[block].free.size += (wantblocks - lastblocks);
924 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
925 _heaplimit += wantblocks - lastblocks;
926 continue;
927 }
8d0d84d2 928 result = morecore_nolock (wantblocks * BLOCKSIZE);
74ad5c7f 929 if (result == NULL)
2f213514 930 goto out;
74ad5c7f
KH
931 block = BLOCK (result);
932 /* Put the new block at the end of the free list. */
933 _heapinfo[block].free.size = wantblocks;
934 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
935 _heapinfo[block].free.next = 0;
936 _heapinfo[0].free.prev = block;
937 _heapinfo[_heapinfo[block].free.prev].free.next = block;
938 ++_chunks_free;
939 /* Now loop to use some of that block for this allocation. */
940 }
941 }
942
943 /* At this point we have found a suitable free list entry.
944 Figure out how to remove what we need from the list. */
945 result = ADDRESS (block);
946 if (_heapinfo[block].free.size > blocks)
947 {
948 /* The block we found has a bit left over,
949 so relink the tail end back into the free list. */
950 _heapinfo[block + blocks].free.size
951 = _heapinfo[block].free.size - blocks;
952 _heapinfo[block + blocks].free.next
953 = _heapinfo[block].free.next;
954 _heapinfo[block + blocks].free.prev
955 = _heapinfo[block].free.prev;
956 _heapinfo[_heapinfo[block].free.prev].free.next
957 = _heapinfo[_heapinfo[block].free.next].free.prev
958 = _heapindex = block + blocks;
959 }
960 else
961 {
962 /* The block exactly matches our requirements,
963 so just remove it from the list. */
964 _heapinfo[_heapinfo[block].free.next].free.prev
965 = _heapinfo[block].free.prev;
966 _heapinfo[_heapinfo[block].free.prev].free.next
967 = _heapindex = _heapinfo[block].free.next;
968 --_chunks_free;
969 }
970
971 _heapinfo[block].busy.type = 0;
972 _heapinfo[block].busy.info.size = blocks;
973 ++_chunks_used;
974 _bytes_used += blocks * BLOCKSIZE;
975 _bytes_free -= blocks * BLOCKSIZE;
976
977 /* Mark all the blocks of the object just allocated except for the
978 first with a negative number so you can find the first block by
979 adding that adjustment. */
980 while (--blocks > 0)
981 _heapinfo[block + blocks].busy.info.size = -blocks;
982 }
983
5dcab13e 984 PROTECT_MALLOC_STATE (1);
2f213514 985 out:
8d0d84d2
YM
986 return result;
987}
988
989__ptr_t
990_malloc_internal (size)
991 __malloc_size_t size;
992{
993 __ptr_t result;
994
995 LOCK ();
996 result = _malloc_internal_nolock (size);
2f213514 997 UNLOCK ();
8d0d84d2 998
74ad5c7f
KH
999 return result;
1000}
1001
1002__ptr_t
1003malloc (size)
1004 __malloc_size_t size;
1005{
8d0d84d2
YM
1006 __ptr_t (*hook) (__malloc_size_t);
1007
74ad5c7f
KH
1008 if (!__malloc_initialized && !__malloc_initialize ())
1009 return NULL;
1010
8d0d84d2
YM
1011 /* Copy the value of __malloc_hook to an automatic variable in case
1012 __malloc_hook is modified in another thread between its
1013 NULL-check and the use.
1014
1015 Note: Strictly speaking, this is not a right solution. We should
1016 use mutexes to access non-read-only variables that are shared
1017 among multiple threads. We just leave it for compatibility with
1018 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1019 hook = __malloc_hook;
1020 return (hook != NULL ? *hook : _malloc_internal) (size);
74ad5c7f
KH
1021}
1022\f
1023#ifndef _LIBC
1024
1025/* On some ANSI C systems, some libc functions call _malloc, _free
1026 and _realloc. Make them use the GNU functions. */
1027
1028__ptr_t
1029_malloc (size)
1030 __malloc_size_t size;
1031{
1032 return malloc (size);
1033}
1034
1035void
1036_free (ptr)
1037 __ptr_t ptr;
1038{
1039 free (ptr);
1040}
1041
1042__ptr_t
1043_realloc (ptr, size)
1044 __ptr_t ptr;
1045 __malloc_size_t size;
1046{
1047 return realloc (ptr, size);
1048}
1049
1050#endif
1051/* Free a block of memory allocated by `malloc'.
1052 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1053 Written May 1989 by Mike Haertel.
1054
1055This library is free software; you can redistribute it and/or
423a1f3c 1056modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1057published by the Free Software Foundation; either version 2 of the
1058License, or (at your option) any later version.
1059
1060This library is distributed in the hope that it will be useful,
1061but WITHOUT ANY WARRANTY; without even the implied warranty of
1062MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1063General Public License for more details.
74ad5c7f 1064
423a1f3c
JB
1065You should have received a copy of the GNU General Public
1066License along with this library; see the file COPYING. If
3ef97fb6
LK
1067not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1068Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1069
1070 The author may be reached (Email) at the address mike@ai.mit.edu,
1071 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1072
1073#ifndef _MALLOC_INTERNAL
1074#define _MALLOC_INTERNAL
1075#include <malloc.h>
1076#endif
1077
1078
1079/* Cope with systems lacking `memmove'. */
1080#ifndef memmove
4624371d 1081#if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
74ad5c7f
KH
1082#ifdef emacs
1083#undef __malloc_safe_bcopy
1084#define __malloc_safe_bcopy safe_bcopy
1085#endif
1086/* This function is defined in realloc.c. */
0a27e8ed 1087extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
1088#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1089#endif
1090#endif
1091
1092
1093/* Debugging hook for free. */
0a27e8ed 1094void (*__free_hook) PP ((__ptr_t __ptr));
74ad5c7f
KH
1095
1096/* List of blocks allocated by memalign. */
1097struct alignlist *_aligned_blocks = NULL;
1098
1099/* Return memory to the heap.
8d0d84d2 1100 Like `_free_internal' but don't lock mutex. */
74ad5c7f 1101void
8d0d84d2 1102_free_internal_nolock (ptr)
74ad5c7f
KH
1103 __ptr_t ptr;
1104{
1105 int type;
1106 __malloc_size_t block, blocks;
1107 register __malloc_size_t i;
1108 struct list *prev, *next;
1109 __ptr_t curbrk;
1110 const __malloc_size_t lesscore_threshold
1111 /* Threshold of free space at which we will return some to the system. */
1112 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1113
1114 register struct alignlist *l;
1115
1116 if (ptr == NULL)
1117 return;
1118
5dcab13e 1119 PROTECT_MALLOC_STATE (0);
177c0ea7 1120
8d0d84d2 1121 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1122 for (l = _aligned_blocks; l != NULL; l = l->next)
1123 if (l->aligned == ptr)
1124 {
1125 l->aligned = NULL; /* Mark the slot in the list as free. */
1126 ptr = l->exact;
1127 break;
1128 }
8d0d84d2 1129 UNLOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1130
1131 block = BLOCK (ptr);
1132
1133 type = _heapinfo[block].busy.type;
1134 switch (type)
1135 {
1136 case 0:
1137 /* Get as many statistics as early as we can. */
1138 --_chunks_used;
1139 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1140 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1141
1142 /* Find the free cluster previous to this one in the free list.
1143 Start searching at the last block referenced; this may benefit
1144 programs with locality of allocation. */
1145 i = _heapindex;
1146 if (i > block)
1147 while (i > block)
1148 i = _heapinfo[i].free.prev;
1149 else
1150 {
1151 do
1152 i = _heapinfo[i].free.next;
1153 while (i > 0 && i < block);
1154 i = _heapinfo[i].free.prev;
1155 }
1156
1157 /* Determine how to link this block into the free list. */
1158 if (block == i + _heapinfo[i].free.size)
1159 {
1160 /* Coalesce this block with its predecessor. */
1161 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1162 block = i;
1163 }
1164 else
1165 {
1166 /* Really link this block back into the free list. */
1167 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1168 _heapinfo[block].free.next = _heapinfo[i].free.next;
1169 _heapinfo[block].free.prev = i;
1170 _heapinfo[i].free.next = block;
1171 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1172 ++_chunks_free;
1173 }
1174
1175 /* Now that the block is linked in, see if we can coalesce it
1176 with its successor (by deleting its successor from the list
1177 and adding in its size). */
1178 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1179 {
1180 _heapinfo[block].free.size
1181 += _heapinfo[_heapinfo[block].free.next].free.size;
1182 _heapinfo[block].free.next
1183 = _heapinfo[_heapinfo[block].free.next].free.next;
1184 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1185 --_chunks_free;
1186 }
1187
1188 /* How many trailing free blocks are there now? */
1189 blocks = _heapinfo[block].free.size;
1190
1191 /* Where is the current end of accessible core? */
1192 curbrk = (*__morecore) (0);
1193
1194 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1195 {
1196 /* The end of the malloc heap is at the end of accessible core.
1197 It's possible that moving _heapinfo will allow us to
1198 return some space to the system. */
1199
1200 __malloc_size_t info_block = BLOCK (_heapinfo);
1201 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1202 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1203 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1204 __malloc_size_t next_block = _heapinfo[block].free.next;
1205 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1206
1207 if (/* Win if this block being freed is last in core, the info table
1208 is just before it, the previous free block is just before the
1209 info table, and the two free blocks together form a useful
1210 amount to return to the system. */
1211 (block + blocks == _heaplimit &&
1212 info_block + info_blocks == block &&
1213 prev_block != 0 && prev_block + prev_blocks == info_block &&
1214 blocks + prev_blocks >= lesscore_threshold) ||
1215 /* Nope, not the case. We can also win if this block being
1216 freed is just before the info table, and the table extends
1217 to the end of core or is followed only by a free block,
1218 and the total free space is worth returning to the system. */
1219 (block + blocks == info_block &&
1220 ((info_block + info_blocks == _heaplimit &&
1221 blocks >= lesscore_threshold) ||
1222 (info_block + info_blocks == next_block &&
1223 next_block + next_blocks == _heaplimit &&
1224 blocks + next_blocks >= lesscore_threshold)))
1225 )
1226 {
1227 malloc_info *newinfo;
1228 __malloc_size_t oldlimit = _heaplimit;
1229
1230 /* Free the old info table, clearing _heaplimit to avoid
1231 recursion into this code. We don't want to return the
1232 table's blocks to the system before we have copied them to
1233 the new location. */
1234 _heaplimit = 0;
8d0d84d2 1235 _free_internal_nolock (_heapinfo);
74ad5c7f
KH
1236 _heaplimit = oldlimit;
1237
1238 /* Tell malloc to search from the beginning of the heap for
1239 free blocks, so it doesn't reuse the ones just freed. */
1240 _heapindex = 0;
1241
1242 /* Allocate new space for the info table and move its data. */
8d0d84d2
YM
1243 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1244 * BLOCKSIZE);
5dcab13e 1245 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1246 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1247 _heapinfo = newinfo;
1248
1249 /* We should now have coalesced the free block with the
1250 blocks freed from the old info table. Examine the entire
1251 trailing free block to decide below whether to return some
1252 to the system. */
1253 block = _heapinfo[0].free.prev;
1254 blocks = _heapinfo[block].free.size;
1255 }
1256
1257 /* Now see if we can return stuff to the system. */
1258 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1259 {
1260 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1261 _heaplimit -= blocks;
1262 (*__morecore) (-bytes);
1263 _heapinfo[_heapinfo[block].free.prev].free.next
1264 = _heapinfo[block].free.next;
1265 _heapinfo[_heapinfo[block].free.next].free.prev
1266 = _heapinfo[block].free.prev;
1267 block = _heapinfo[block].free.prev;
1268 --_chunks_free;
1269 _bytes_free -= bytes;
1270 }
1271 }
1272
1273 /* Set the next search to begin at this block. */
1274 _heapindex = block;
1275 break;
1276
1277 default:
1278 /* Do some of the statistics. */
1279 --_chunks_used;
1280 _bytes_used -= 1 << type;
1281 ++_chunks_free;
1282 _bytes_free += 1 << type;
1283
1284 /* Get the address of the first free fragment in this block. */
1285 prev = (struct list *) ((char *) ADDRESS (block) +
1286 (_heapinfo[block].busy.info.frag.first << type));
1287
1288 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1289 {
1290 /* If all fragments of this block are free, remove them
1291 from the fragment list and free the whole block. */
1292 next = prev;
1293 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1294 next = next->next;
1295 prev->prev->next = next;
1296 if (next != NULL)
1297 next->prev = prev->prev;
1298 _heapinfo[block].busy.type = 0;
1299 _heapinfo[block].busy.info.size = 1;
1300
1301 /* Keep the statistics accurate. */
1302 ++_chunks_used;
1303 _bytes_used += BLOCKSIZE;
1304 _chunks_free -= BLOCKSIZE >> type;
1305 _bytes_free -= BLOCKSIZE;
1306
8d0d84d2
YM
1307#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1308 _free_internal_nolock (ADDRESS (block));
8094989b 1309#else
74ad5c7f 1310 free (ADDRESS (block));
8094989b 1311#endif
74ad5c7f
KH
1312 }
1313 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1314 {
1315 /* If some fragments of this block are free, link this
1316 fragment into the fragment list after the first free
1317 fragment of this block. */
1318 next = (struct list *) ptr;
1319 next->next = prev->next;
1320 next->prev = prev;
1321 prev->next = next;
1322 if (next->next != NULL)
1323 next->next->prev = next;
1324 ++_heapinfo[block].busy.info.frag.nfree;
1325 }
1326 else
1327 {
1328 /* No fragments of this block are free, so link this
1329 fragment into the fragment list and announce that
1330 it is the first free fragment of this block. */
1331 prev = (struct list *) ptr;
1332 _heapinfo[block].busy.info.frag.nfree = 1;
1333 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1334 ((unsigned long int) ((char *) ptr - (char *) NULL)
1335 % BLOCKSIZE >> type);
1336 prev->next = _fraghead[type].next;
1337 prev->prev = &_fraghead[type];
1338 prev->prev->next = prev;
1339 if (prev->next != NULL)
1340 prev->next->prev = prev;
1341 }
1342 break;
1343 }
177c0ea7 1344
5dcab13e 1345 PROTECT_MALLOC_STATE (1);
8d0d84d2
YM
1346}
1347
1348/* Return memory to the heap.
1349 Like `free' but don't call a __free_hook if there is one. */
1350void
1351_free_internal (ptr)
1352 __ptr_t ptr;
1353{
1354 LOCK ();
1355 _free_internal_nolock (ptr);
2f213514 1356 UNLOCK ();
74ad5c7f
KH
1357}
1358
1359/* Return memory to the heap. */
ca9c0567 1360
4624371d 1361void
74ad5c7f
KH
1362free (ptr)
1363 __ptr_t ptr;
1364{
8d0d84d2
YM
1365 void (*hook) (__ptr_t) = __free_hook;
1366
1367 if (hook != NULL)
1368 (*hook) (ptr);
74ad5c7f
KH
1369 else
1370 _free_internal (ptr);
1371}
1372
1373/* Define the `cfree' alias for `free'. */
1374#ifdef weak_alias
1375weak_alias (free, cfree)
1376#else
1377void
1378cfree (ptr)
1379 __ptr_t ptr;
1380{
1381 free (ptr);
1382}
1383#endif
1384/* Change the size of a block allocated by `malloc'.
1385 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1386 Written May 1989 by Mike Haertel.
1387
1388This library is free software; you can redistribute it and/or
423a1f3c 1389modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1390published by the Free Software Foundation; either version 2 of the
1391License, or (at your option) any later version.
1392
1393This library is distributed in the hope that it will be useful,
1394but WITHOUT ANY WARRANTY; without even the implied warranty of
1395MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1396General Public License for more details.
74ad5c7f 1397
423a1f3c
JB
1398You should have received a copy of the GNU General Public
1399License along with this library; see the file COPYING. If
3ef97fb6
LK
1400not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1401Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1402
1403 The author may be reached (Email) at the address mike@ai.mit.edu,
1404 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1405
1406#ifndef _MALLOC_INTERNAL
1407#define _MALLOC_INTERNAL
1408#include <malloc.h>
1409#endif
1410
1411
1412
1413/* Cope with systems lacking `memmove'. */
4624371d 1414#if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
74ad5c7f
KH
1415
1416#ifdef emacs
1417#undef __malloc_safe_bcopy
1418#define __malloc_safe_bcopy safe_bcopy
1419#else
1420
1421/* Snarfed directly from Emacs src/dispnew.c:
1422 XXX Should use system bcopy if it handles overlap. */
1423
1424/* Like bcopy except never gets confused by overlap. */
1425
1426void
1427__malloc_safe_bcopy (afrom, ato, size)
1428 __ptr_t afrom;
1429 __ptr_t ato;
1430 __malloc_size_t size;
1431{
1432 char *from = afrom, *to = ato;
1433
1434 if (size <= 0 || from == to)
1435 return;
1436
1437 /* If the source and destination don't overlap, then bcopy can
1438 handle it. If they do overlap, but the destination is lower in
1439 memory than the source, we'll assume bcopy can handle that. */
1440 if (to < from || from + size <= to)
1441 bcopy (from, to, size);
1442
1443 /* Otherwise, we'll copy from the end. */
1444 else
1445 {
1446 register char *endf = from + size;
1447 register char *endt = to + size;
1448
1449 /* If TO - FROM is large, then we should break the copy into
1450 nonoverlapping chunks of TO - FROM bytes each. However, if
1451 TO - FROM is small, then the bcopy function call overhead
1452 makes this not worth it. The crossover point could be about
1453 anywhere. Since I don't think the obvious copy loop is too
1454 bad, I'm trying to err in its favor. */
1455 if (to - from < 64)
1456 {
1457 do
1458 *--endt = *--endf;
1459 while (endf != from);
1460 }
1461 else
1462 {
1463 for (;;)
1464 {
1465 endt -= (to - from);
1466 endf -= (to - from);
1467
1468 if (endt < to)
1469 break;
1470
1471 bcopy (endf, endt, to - from);
1472 }
1473
1474 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1475 little left over. The amount left over is
1476 (endt + (to - from)) - to, which is endt - from. */
1477 bcopy (from, to, endt - from);
1478 }
1479 }
1480}
1481#endif /* emacs */
1482
1483#ifndef memmove
0a27e8ed 1484extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
1485#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1486#endif
1487
1488#endif
1489
1490
1491#define min(A, B) ((A) < (B) ? (A) : (B))
1492
1493/* Debugging hook for realloc. */
0a27e8ed 1494__ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f
KH
1495
1496/* Resize the given region to the new size, returning a pointer
1497 to the (possibly moved) region. This is optimized for speed;
1498 some benchmarks seem to indicate that greater compactness is
1499 achieved by unconditionally allocating and copying to a
1500 new region. This module has incestuous knowledge of the
1501 internals of both free and malloc. */
1502__ptr_t
8d0d84d2 1503_realloc_internal_nolock (ptr, size)
74ad5c7f
KH
1504 __ptr_t ptr;
1505 __malloc_size_t size;
1506{
1507 __ptr_t result;
1508 int type;
1509 __malloc_size_t block, blocks, oldlimit;
1510
1511 if (size == 0)
1512 {
8d0d84d2
YM
1513 _free_internal_nolock (ptr);
1514 return _malloc_internal_nolock (0);
74ad5c7f
KH
1515 }
1516 else if (ptr == NULL)
8d0d84d2 1517 return _malloc_internal_nolock (size);
74ad5c7f
KH
1518
1519 block = BLOCK (ptr);
1520
5dcab13e 1521 PROTECT_MALLOC_STATE (0);
177c0ea7 1522
74ad5c7f
KH
1523 type = _heapinfo[block].busy.type;
1524 switch (type)
1525 {
1526 case 0:
1527 /* Maybe reallocate a large block to a small fragment. */
1528 if (size <= BLOCKSIZE / 2)
1529 {
8d0d84d2 1530 result = _malloc_internal_nolock (size);
74ad5c7f
KH
1531 if (result != NULL)
1532 {
1533 memcpy (result, ptr, size);
8d0d84d2 1534 _free_internal_nolock (ptr);
2f213514 1535 goto out;
74ad5c7f
KH
1536 }
1537 }
1538
1539 /* The new size is a large allocation as well;
1540 see if we can hold it in place. */
1541 blocks = BLOCKIFY (size);
1542 if (blocks < _heapinfo[block].busy.info.size)
1543 {
1544 /* The new size is smaller; return
1545 excess memory to the free list. */
1546 _heapinfo[block + blocks].busy.type = 0;
1547 _heapinfo[block + blocks].busy.info.size
1548 = _heapinfo[block].busy.info.size - blocks;
1549 _heapinfo[block].busy.info.size = blocks;
1550 /* We have just created a new chunk by splitting a chunk in two.
1551 Now we will free this chunk; increment the statistics counter
1552 so it doesn't become wrong when _free_internal decrements it. */
1553 ++_chunks_used;
8d0d84d2 1554 _free_internal_nolock (ADDRESS (block + blocks));
74ad5c7f
KH
1555 result = ptr;
1556 }
1557 else if (blocks == _heapinfo[block].busy.info.size)
1558 /* No size change necessary. */
1559 result = ptr;
1560 else
1561 {
1562 /* Won't fit, so allocate a new region that will.
1563 Free the old region first in case there is sufficient
1564 adjacent free space to grow without moving. */
1565 blocks = _heapinfo[block].busy.info.size;
1566 /* Prevent free from actually returning memory to the system. */
1567 oldlimit = _heaplimit;
1568 _heaplimit = 0;
8d0d84d2
YM
1569 _free_internal_nolock (ptr);
1570 result = _malloc_internal_nolock (size);
5dcab13e 1571 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1572 if (_heaplimit == 0)
1573 _heaplimit = oldlimit;
1574 if (result == NULL)
1575 {
1576 /* Now we're really in trouble. We have to unfree
1577 the thing we just freed. Unfortunately it might
1578 have been coalesced with its neighbors. */
1579 if (_heapindex == block)
8d0d84d2 1580 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
74ad5c7f
KH
1581 else
1582 {
1583 __ptr_t previous
8d0d84d2
YM
1584 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1585 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1586 _free_internal_nolock (previous);
74ad5c7f 1587 }
2f213514 1588 goto out;
74ad5c7f
KH
1589 }
1590 if (ptr != result)
1591 memmove (result, ptr, blocks * BLOCKSIZE);
1592 }
1593 break;
1594
1595 default:
1596 /* Old size is a fragment; type is logarithm
1597 to base two of the fragment size. */
1598 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1599 size <= (__malloc_size_t) (1 << type))
1600 /* The new size is the same kind of fragment. */
1601 result = ptr;
1602 else
1603 {
1604 /* The new size is different; allocate a new space,
1605 and copy the lesser of the new size and the old. */
8d0d84d2 1606 result = _malloc_internal_nolock (size);
74ad5c7f 1607 if (result == NULL)
2f213514 1608 goto out;
74ad5c7f 1609 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
8d0d84d2 1610 _free_internal_nolock (ptr);
74ad5c7f
KH
1611 }
1612 break;
1613 }
1614
5dcab13e 1615 PROTECT_MALLOC_STATE (1);
2f213514 1616 out:
8d0d84d2
YM
1617 return result;
1618}
1619
1620__ptr_t
1621_realloc_internal (ptr, size)
1622 __ptr_t ptr;
1623 __malloc_size_t size;
1624{
1625 __ptr_t result;
1626
1627 LOCK();
1628 result = _realloc_internal_nolock (ptr, size);
2f213514 1629 UNLOCK ();
8d0d84d2 1630
74ad5c7f
KH
1631 return result;
1632}
1633
1634__ptr_t
1635realloc (ptr, size)
1636 __ptr_t ptr;
1637 __malloc_size_t size;
1638{
8d0d84d2
YM
1639 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1640
74ad5c7f
KH
1641 if (!__malloc_initialized && !__malloc_initialize ())
1642 return NULL;
1643
8d0d84d2
YM
1644 hook = __realloc_hook;
1645 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
74ad5c7f
KH
1646}
1647/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1648
1649This library is free software; you can redistribute it and/or
423a1f3c 1650modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1651published by the Free Software Foundation; either version 2 of the
1652License, or (at your option) any later version.
1653
1654This library is distributed in the hope that it will be useful,
1655but WITHOUT ANY WARRANTY; without even the implied warranty of
1656MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1657General Public License for more details.
74ad5c7f 1658
423a1f3c
JB
1659You should have received a copy of the GNU General Public
1660License along with this library; see the file COPYING. If
3ef97fb6
LK
1661not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1662Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1663
1664 The author may be reached (Email) at the address mike@ai.mit.edu,
1665 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1666
1667#ifndef _MALLOC_INTERNAL
1668#define _MALLOC_INTERNAL
1669#include <malloc.h>
1670#endif
1671
1672/* Allocate an array of NMEMB elements each SIZE bytes long.
1673 The entire array is initialized to zeros. */
1674__ptr_t
1675calloc (nmemb, size)
1676 register __malloc_size_t nmemb;
1677 register __malloc_size_t size;
1678{
1679 register __ptr_t result = malloc (nmemb * size);
1680
1681 if (result != NULL)
1682 (void) memset (result, 0, nmemb * size);
1683
1684 return result;
1685}
1686/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1687This file is part of the GNU C Library.
1688
1689The GNU C Library is free software; you can redistribute it and/or modify
1690it under the terms of the GNU General Public License as published by
1691the Free Software Foundation; either version 2, or (at your option)
1692any later version.
1693
1694The GNU C Library is distributed in the hope that it will be useful,
1695but WITHOUT ANY WARRANTY; without even the implied warranty of
1696MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1697GNU General Public License for more details.
1698
1699You should have received a copy of the GNU General Public License
1700along with the GNU C Library; see the file COPYING. If not, write to
3ef97fb6
LK
1701the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1702MA 02110-1301, USA. */
74ad5c7f
KH
1703
1704#ifndef _MALLOC_INTERNAL
1705#define _MALLOC_INTERNAL
1706#include <malloc.h>
1707#endif
1708
65f451d0
DN
1709/* uClibc defines __GNU_LIBRARY__, but it is not completely
1710 compatible. */
1711#if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
74ad5c7f 1712#define __sbrk sbrk
65f451d0 1713#else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1714/* It is best not to declare this and cast its result on foreign operating
1715 systems with potentially hostile include files. */
1716
1717#include <stddef.h>
0a27e8ed 1718extern __ptr_t __sbrk PP ((ptrdiff_t increment));
65f451d0 1719#endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1720
1721#ifndef NULL
1722#define NULL 0
1723#endif
1724
1725/* Allocate INCREMENT more bytes of data space,
1726 and return the start of data space, or NULL on errors.
1727 If INCREMENT is negative, shrink data space. */
1728__ptr_t
1729__default_morecore (increment)
1730 __malloc_ptrdiff_t increment;
1731{
ef6d1039
SM
1732 __ptr_t result;
1733#if defined(CYGWIN)
1734 if (!bss_sbrk_did_unexec)
1735 {
1736 return bss_sbrk (increment);
1737 }
1738#endif
1739 result = (__ptr_t) __sbrk (increment);
74ad5c7f
KH
1740 if (result == (__ptr_t) -1)
1741 return NULL;
1742 return result;
1743}
1744/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1745
1746This library is free software; you can redistribute it and/or
423a1f3c 1747modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1748published by the Free Software Foundation; either version 2 of the
1749License, or (at your option) any later version.
1750
1751This library is distributed in the hope that it will be useful,
1752but WITHOUT ANY WARRANTY; without even the implied warranty of
1753MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1754General Public License for more details.
74ad5c7f 1755
423a1f3c
JB
1756You should have received a copy of the GNU General Public
1757License along with this library; see the file COPYING. If
3ef97fb6
LK
1758not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1759Fifth Floor, Boston, MA 02110-1301, USA. */
74ad5c7f
KH
1760
1761#ifndef _MALLOC_INTERNAL
1762#define _MALLOC_INTERNAL
1763#include <malloc.h>
1764#endif
1765
eec2d1de
EZ
1766__ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1767 __malloc_size_t __alignment));
74ad5c7f
KH
1768
1769__ptr_t
1770memalign (alignment, size)
1771 __malloc_size_t alignment;
1772 __malloc_size_t size;
1773{
1774 __ptr_t result;
1775 unsigned long int adj, lastadj;
8d0d84d2 1776 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
74ad5c7f 1777
8d0d84d2
YM
1778 if (hook)
1779 return (*hook) (alignment, size);
74ad5c7f
KH
1780
1781 /* Allocate a block with enough extra space to pad the block with up to
1782 (ALIGNMENT - 1) bytes if necessary. */
1783 result = malloc (size + alignment - 1);
1784 if (result == NULL)
1785 return NULL;
1786
1787 /* Figure out how much we will need to pad this particular block
1788 to achieve the required alignment. */
1789 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1790
1791 do
1792 {
1793 /* Reallocate the block with only as much excess as it needs. */
1794 free (result);
1795 result = malloc (adj + size);
1796 if (result == NULL) /* Impossible unless interrupted. */
1797 return NULL;
1798
1799 lastadj = adj;
1800 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1801 /* It's conceivable we might have been so unlucky as to get a
1802 different block with weaker alignment. If so, this block is too
1803 short to contain SIZE after alignment correction. So we must
1804 try again and get another block, slightly larger. */
1805 } while (adj > lastadj);
1806
1807 if (adj != 0)
1808 {
1809 /* Record this block in the list of aligned blocks, so that `free'
1810 can identify the pointer it is passed, which will be in the middle
1811 of an allocated block. */
1812
1813 struct alignlist *l;
8d0d84d2 1814 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1815 for (l = _aligned_blocks; l != NULL; l = l->next)
1816 if (l->aligned == NULL)
1817 /* This slot is free. Use it. */
1818 break;
1819 if (l == NULL)
1820 {
1821 l = (struct alignlist *) malloc (sizeof (struct alignlist));
8d0d84d2 1822 if (l != NULL)
74ad5c7f 1823 {
8d0d84d2
YM
1824 l->next = _aligned_blocks;
1825 _aligned_blocks = l;
74ad5c7f 1826 }
74ad5c7f 1827 }
8d0d84d2
YM
1828 if (l != NULL)
1829 {
1830 l->exact = result;
1831 result = l->aligned = (char *) result + alignment - adj;
1832 }
1833 UNLOCK_ALIGNED_BLOCKS ();
1834 if (l == NULL)
1835 {
1836 free (result);
1837 result = NULL;
1838 }
74ad5c7f
KH
1839 }
1840
1841 return result;
1842}
1843
72359c32
YM
1844#ifndef ENOMEM
1845#define ENOMEM 12
1846#endif
1847
1848#ifndef EINVAL
1849#define EINVAL 22
1850#endif
1851
1852int
1853posix_memalign (memptr, alignment, size)
1854 __ptr_t *memptr;
1855 __malloc_size_t alignment;
1856 __malloc_size_t size;
1857{
1858 __ptr_t mem;
1859
1860 if (alignment == 0
1861 || alignment % sizeof (__ptr_t) != 0
1862 || (alignment & (alignment - 1)) != 0)
1863 return EINVAL;
1864
1865 mem = memalign (alignment, size);
1866 if (mem == NULL)
1867 return ENOMEM;
1868
1869 *memptr = mem;
1870
1871 return 0;
1872}
1873
74ad5c7f
KH
1874/* Allocate memory on a page boundary.
1875 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1876
1877This library is free software; you can redistribute it and/or
423a1f3c 1878modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1879published by the Free Software Foundation; either version 2 of the
1880License, or (at your option) any later version.
1881
1882This library is distributed in the hope that it will be useful,
1883but WITHOUT ANY WARRANTY; without even the implied warranty of
1884MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1885General Public License for more details.
74ad5c7f 1886
423a1f3c
JB
1887You should have received a copy of the GNU General Public
1888License along with this library; see the file COPYING. If
3ef97fb6
LK
1889not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1890Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1891
1892 The author may be reached (Email) at the address mike@ai.mit.edu,
1893 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1894
1895#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1896
1897/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1898 on MSDOS, where it conflicts with a system header file. */
1899
1900#define ELIDE_VALLOC
1901
1902#endif
1903
1904#ifndef ELIDE_VALLOC
1905
1906#if defined (__GNU_LIBRARY__) || defined (_LIBC)
1907#include <stddef.h>
1908#include <sys/cdefs.h>
47582ab3
KH
1909#if defined (__GLIBC__) && __GLIBC__ >= 2
1910/* __getpagesize is already declared in <unistd.h> with return type int */
1911#else
0a27e8ed 1912extern size_t __getpagesize PP ((void));
47582ab3 1913#endif
74ad5c7f
KH
1914#else
1915#include "getpagesize.h"
1916#define __getpagesize() getpagesize()
1917#endif
1918
1919#ifndef _MALLOC_INTERNAL
1920#define _MALLOC_INTERNAL
1921#include <malloc.h>
1922#endif
1923
1924static __malloc_size_t pagesize;
1925
1926__ptr_t
1927valloc (size)
1928 __malloc_size_t size;
1929{
1930 if (pagesize == 0)
1931 pagesize = __getpagesize ();
1932
1933 return memalign (pagesize, size);
1934}
1935
1936#endif /* Not ELIDE_VALLOC. */
a3ba27da
GM
1937
1938#ifdef GC_MCHECK
1939
1940/* Standard debugging hooks for `malloc'.
1941 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1942 Written May 1989 by Mike Haertel.
1943
1944This library is free software; you can redistribute it and/or
423a1f3c 1945modify it under the terms of the GNU General Public License as
a3ba27da
GM
1946published by the Free Software Foundation; either version 2 of the
1947License, or (at your option) any later version.
1948
1949This library is distributed in the hope that it will be useful,
1950but WITHOUT ANY WARRANTY; without even the implied warranty of
1951MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1952General Public License for more details.
a3ba27da 1953
423a1f3c
JB
1954You should have received a copy of the GNU General Public
1955License along with this library; see the file COPYING. If
3ef97fb6
LK
1956not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1957Fifth Floor, Boston, MA 02110-1301, USA.
a3ba27da
GM
1958
1959 The author may be reached (Email) at the address mike@ai.mit.edu,
1960 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1961
1962#ifdef emacs
1963#include <stdio.h>
1964#else
1965#ifndef _MALLOC_INTERNAL
1966#define _MALLOC_INTERNAL
1967#include <malloc.h>
1968#include <stdio.h>
1969#endif
1970#endif
1971
1972/* Old hook values. */
1973static void (*old_free_hook) __P ((__ptr_t ptr));
1974static __ptr_t (*old_malloc_hook) __P ((__malloc_size_t size));
1975static __ptr_t (*old_realloc_hook) __P ((__ptr_t ptr, __malloc_size_t size));
1976
1977/* Function to call when something awful happens. */
1978static void (*abortfunc) __P ((enum mcheck_status));
1979
1980/* Arbitrary magical numbers. */
1981#define MAGICWORD 0xfedabeeb
1982#define MAGICFREE 0xd8675309
1983#define MAGICBYTE ((char) 0xd7)
1984#define MALLOCFLOOD ((char) 0x93)
1985#define FREEFLOOD ((char) 0x95)
1986
1987struct hdr
1988 {
1989 __malloc_size_t size; /* Exact size requested by user. */
1990 unsigned long int magic; /* Magic number to check header integrity. */
1991 };
1992
1993#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1994#define flood memset
1995#else
1996static void flood __P ((__ptr_t, int, __malloc_size_t));
1997static void
1998flood (ptr, val, size)
1999 __ptr_t ptr;
2000 int val;
2001 __malloc_size_t size;
2002{
2003 char *cp = ptr;
2004 while (size--)
2005 *cp++ = val;
2006}
2007#endif
2008
2009static enum mcheck_status checkhdr __P ((const struct hdr *));
2010static enum mcheck_status
2011checkhdr (hdr)
2012 const struct hdr *hdr;
2013{
2014 enum mcheck_status status;
2015 switch (hdr->magic)
2016 {
2017 default:
2018 status = MCHECK_HEAD;
2019 break;
2020 case MAGICFREE:
2021 status = MCHECK_FREE;
2022 break;
2023 case MAGICWORD:
2024 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
2025 status = MCHECK_TAIL;
2026 else
2027 status = MCHECK_OK;
2028 break;
2029 }
2030 if (status != MCHECK_OK)
2031 (*abortfunc) (status);
2032 return status;
2033}
2034
2035static void freehook __P ((__ptr_t));
2036static void
2037freehook (ptr)
2038 __ptr_t ptr;
2039{
2040 struct hdr *hdr;
177c0ea7 2041
a3ba27da
GM
2042 if (ptr)
2043 {
2044 hdr = ((struct hdr *) ptr) - 1;
2045 checkhdr (hdr);
2046 hdr->magic = MAGICFREE;
2047 flood (ptr, FREEFLOOD, hdr->size);
2048 }
2049 else
2050 hdr = NULL;
177c0ea7 2051
a3ba27da
GM
2052 __free_hook = old_free_hook;
2053 free (hdr);
2054 __free_hook = freehook;
2055}
2056
2057static __ptr_t mallochook __P ((__malloc_size_t));
2058static __ptr_t
2059mallochook (size)
2060 __malloc_size_t size;
2061{
2062 struct hdr *hdr;
2063
2064 __malloc_hook = old_malloc_hook;
2065 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
2066 __malloc_hook = mallochook;
2067 if (hdr == NULL)
2068 return NULL;
2069
2070 hdr->size = size;
2071 hdr->magic = MAGICWORD;
2072 ((char *) &hdr[1])[size] = MAGICBYTE;
2073 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
2074 return (__ptr_t) (hdr + 1);
2075}
2076
2077static __ptr_t reallochook __P ((__ptr_t, __malloc_size_t));
2078static __ptr_t
2079reallochook (ptr, size)
2080 __ptr_t ptr;
2081 __malloc_size_t size;
2082{
2083 struct hdr *hdr = NULL;
2084 __malloc_size_t osize = 0;
177c0ea7 2085
a3ba27da
GM
2086 if (ptr)
2087 {
2088 hdr = ((struct hdr *) ptr) - 1;
2089 osize = hdr->size;
2090
2091 checkhdr (hdr);
2092 if (size < osize)
2093 flood ((char *) ptr + size, FREEFLOOD, osize - size);
2094 }
177c0ea7 2095
a3ba27da
GM
2096 __free_hook = old_free_hook;
2097 __malloc_hook = old_malloc_hook;
2098 __realloc_hook = old_realloc_hook;
2099 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
2100 __free_hook = freehook;
2101 __malloc_hook = mallochook;
2102 __realloc_hook = reallochook;
2103 if (hdr == NULL)
2104 return NULL;
2105
2106 hdr->size = size;
2107 hdr->magic = MAGICWORD;
2108 ((char *) &hdr[1])[size] = MAGICBYTE;
2109 if (size > osize)
2110 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2111 return (__ptr_t) (hdr + 1);
2112}
2113
2114static void
2115mabort (status)
2116 enum mcheck_status status;
2117{
2118 const char *msg;
2119 switch (status)
2120 {
2121 case MCHECK_OK:
2122 msg = "memory is consistent, library is buggy";
2123 break;
2124 case MCHECK_HEAD:
2125 msg = "memory clobbered before allocated block";
2126 break;
2127 case MCHECK_TAIL:
2128 msg = "memory clobbered past end of allocated block";
2129 break;
2130 case MCHECK_FREE:
2131 msg = "block freed twice";
2132 break;
2133 default:
2134 msg = "bogus mcheck_status, library is buggy";
2135 break;
2136 }
2137#ifdef __GNU_LIBRARY__
2138 __libc_fatal (msg);
2139#else
2140 fprintf (stderr, "mcheck: %s\n", msg);
2141 fflush (stderr);
2142 abort ();
2143#endif
2144}
2145
2146static int mcheck_used = 0;
2147
2148int
2149mcheck (func)
2150 void (*func) __P ((enum mcheck_status));
2151{
2152 abortfunc = (func != NULL) ? func : &mabort;
2153
2154 /* These hooks may not be safely inserted if malloc is already in use. */
2155 if (!__malloc_initialized && !mcheck_used)
2156 {
2157 old_free_hook = __free_hook;
2158 __free_hook = freehook;
2159 old_malloc_hook = __malloc_hook;
2160 __malloc_hook = mallochook;
2161 old_realloc_hook = __realloc_hook;
2162 __realloc_hook = reallochook;
2163 mcheck_used = 1;
2164 }
2165
2166 return mcheck_used ? 0 : -1;
2167}
2168
2169enum mcheck_status
2170mprobe (__ptr_t ptr)
2171{
2172 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2173}
2174
2175#endif /* GC_MCHECK */
ab5796a9
MB
2176
2177/* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
2178 (do not change this comment) */