* lib-src/fakemail.c (action): Convert function definitions to standard C.
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f
KH
1/* This file is no longer automatically generated from libc. */
2
3#define _MALLOC_INTERNAL
4
5/* The malloc headers and source files from the C library follow here. */
6
7/* Declarations for `malloc' and friends.
0b5538bd 8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
4e6835db 9 2005, 2006, 2007 Free Software Foundation, Inc.
74ad5c7f
KH
10 Written May 1989 by Mike Haertel.
11
12This library is free software; you can redistribute it and/or
423a1f3c 13modify it under the terms of the GNU General Public License as
74ad5c7f
KH
14published by the Free Software Foundation; either version 2 of the
15License, or (at your option) any later version.
16
17This library is distributed in the hope that it will be useful,
18but WITHOUT ANY WARRANTY; without even the implied warranty of
19MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 20General Public License for more details.
74ad5c7f 21
423a1f3c
JB
22You should have received a copy of the GNU General Public
23License along with this library; see the file COPYING. If
3ef97fb6
LK
24not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
26
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
29
30#ifndef _MALLOC_H
31
32#define _MALLOC_H 1
33
34#ifdef _MALLOC_INTERNAL
35
36#ifdef HAVE_CONFIG_H
37#include <config.h>
38#endif
39
8d0d84d2
YM
40#ifdef HAVE_GTK_AND_PTHREAD
41#define USE_PTHREAD
42#endif
43
b2e92d3e 44#if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
75934b1d 45 || defined STDC_HEADERS || defined PROTOTYPES))
0a27e8ed
RS
46#undef PP
47#define PP(args) args
74ad5c7f
KH
48#undef __ptr_t
49#define __ptr_t void *
50#else /* Not C++ or ANSI C. */
0a27e8ed
RS
51#undef PP
52#define PP(args) ()
74ad5c7f
KH
53#undef __ptr_t
54#define __ptr_t char *
55#endif /* C++ or ANSI C. */
56
57#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
58#include <string.h>
59#else
60#ifndef memset
61#define memset(s, zero, n) bzero ((s), (n))
62#endif
63#ifndef memcpy
64#define memcpy(d, s, n) bcopy ((s), (d), (n))
65#endif
66#endif
67
ca9c0567 68#ifdef HAVE_LIMITS_H
74ad5c7f 69#include <limits.h>
ca9c0567 70#endif
74ad5c7f
KH
71#ifndef CHAR_BIT
72#define CHAR_BIT 8
73#endif
74ad5c7f
KH
74
75#ifdef HAVE_UNISTD_H
76#include <unistd.h>
77#endif
78
2f213514
YM
79#ifdef USE_PTHREAD
80#include <pthread.h>
81#endif
82
74ad5c7f
KH
83#endif /* _MALLOC_INTERNAL. */
84
85
86#ifdef __cplusplus
87extern "C"
88{
89#endif
90
ca9c0567 91#ifdef STDC_HEADERS
74ad5c7f
KH
92#include <stddef.h>
93#define __malloc_size_t size_t
94#define __malloc_ptrdiff_t ptrdiff_t
95#else
eec2d1de
EZ
96#ifdef __GNUC__
97#include <stddef.h>
98#ifdef __SIZE_TYPE__
99#define __malloc_size_t __SIZE_TYPE__
100#endif
101#endif
102#ifndef __malloc_size_t
74ad5c7f 103#define __malloc_size_t unsigned int
eec2d1de 104#endif
74ad5c7f
KH
105#define __malloc_ptrdiff_t int
106#endif
107
108#ifndef NULL
109#define NULL 0
110#endif
111
112
113/* Allocate SIZE bytes of memory. */
0a27e8ed 114extern __ptr_t malloc PP ((__malloc_size_t __size));
74ad5c7f
KH
115/* Re-allocate the previously allocated block
116 in __ptr_t, making the new block SIZE bytes long. */
0a27e8ed 117extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f 118/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
0a27e8ed 119extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
74ad5c7f 120/* Free a block allocated by `malloc', `realloc' or `calloc'. */
4624371d 121extern void free PP ((__ptr_t __ptr));
74ad5c7f
KH
122
123/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
ed68db4d 124#if !defined (_MALLOC_INTERNAL) || defined (MSDOS) /* Avoid conflict. */
0a27e8ed
RS
125extern __ptr_t memalign PP ((__malloc_size_t __alignment,
126 __malloc_size_t __size));
72359c32
YM
127extern int posix_memalign PP ((__ptr_t *, __malloc_size_t,
128 __malloc_size_t size));
74ad5c7f
KH
129#endif
130
131/* Allocate SIZE bytes on a page boundary. */
132#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
0a27e8ed 133extern __ptr_t valloc PP ((__malloc_size_t __size));
74ad5c7f
KH
134#endif
135
3ceeb306
YM
136#ifdef USE_PTHREAD
137/* Set up mutexes and make malloc etc. thread-safe. */
138extern void malloc_enable_thread PP ((void));
139#endif
74ad5c7f
KH
140
141#ifdef _MALLOC_INTERNAL
142
143/* The allocator divides the heap into blocks of fixed size; large
144 requests receive one or more whole blocks, and small requests
145 receive a fragment of a block. Fragment sizes are powers of two,
146 and all fragments of a block are the same size. When all the
147 fragments in a block have been freed, the block itself is freed. */
148#define INT_BIT (CHAR_BIT * sizeof(int))
149#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
150#define BLOCKSIZE (1 << BLOCKLOG)
151#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
152
153/* Determine the amount of memory spanned by the initial heap table
154 (not an absolute limit). */
155#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
156
157/* Number of contiguous free blocks allowed to build up at the end of
158 memory before they will be returned to the system. */
159#define FINAL_FREE_BLOCKS 8
160
161/* Data structure giving per-block information. */
162typedef union
163 {
164 /* Heap information for a busy block. */
165 struct
166 {
167 /* Zero for a large (multiblock) object, or positive giving the
168 logarithm to the base two of the fragment size. */
169 int type;
170 union
171 {
172 struct
173 {
174 __malloc_size_t nfree; /* Free frags in a fragmented block. */
175 __malloc_size_t first; /* First free fragment of the block. */
176 } frag;
177 /* For a large object, in its first block, this has the number
178 of blocks in the object. In the other blocks, this has a
179 negative number which says how far back the first block is. */
180 __malloc_ptrdiff_t size;
181 } info;
182 } busy;
183 /* Heap information for a free block
184 (that may be the first of a free cluster). */
185 struct
186 {
187 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
188 __malloc_size_t next; /* Index of next free cluster. */
189 __malloc_size_t prev; /* Index of previous free cluster. */
190 } free;
191 } malloc_info;
192
193/* Pointer to first block of the heap. */
194extern char *_heapbase;
195
196/* Table indexed by block number giving per-block information. */
197extern malloc_info *_heapinfo;
198
199/* Address to block number and vice versa. */
200#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
201#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
202
203/* Current search index for the heap table. */
204extern __malloc_size_t _heapindex;
205
206/* Limit of valid info table indices. */
207extern __malloc_size_t _heaplimit;
208
209/* Doubly linked lists of free fragments. */
210struct list
211 {
212 struct list *next;
213 struct list *prev;
214 };
215
216/* Free list headers for each fragment size. */
217extern struct list _fraghead[];
218
219/* List of blocks allocated with `memalign' (or `valloc'). */
220struct alignlist
221 {
222 struct alignlist *next;
223 __ptr_t aligned; /* The address that memaligned returned. */
224 __ptr_t exact; /* The address that malloc returned. */
225 };
226extern struct alignlist *_aligned_blocks;
227
228/* Instrumentation. */
229extern __malloc_size_t _chunks_used;
230extern __malloc_size_t _bytes_used;
231extern __malloc_size_t _chunks_free;
232extern __malloc_size_t _bytes_free;
233
234/* Internal versions of `malloc', `realloc', and `free'
235 used when these functions need to call each other.
236 They are the same but don't call the hooks. */
0a27e8ed
RS
237extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
238extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
239extern void _free_internal PP ((__ptr_t __ptr));
8d0d84d2
YM
240extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
241extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
242extern void _free_internal_nolock PP ((__ptr_t __ptr));
74ad5c7f 243
2f213514 244#ifdef USE_PTHREAD
8d0d84d2 245extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
3ceeb306
YM
246extern int _malloc_thread_enabled_p;
247#define LOCK() \
248 do { \
249 if (_malloc_thread_enabled_p) \
250 pthread_mutex_lock (&_malloc_mutex); \
251 } while (0)
252#define UNLOCK() \
253 do { \
254 if (_malloc_thread_enabled_p) \
255 pthread_mutex_unlock (&_malloc_mutex); \
256 } while (0)
257#define LOCK_ALIGNED_BLOCKS() \
258 do { \
259 if (_malloc_thread_enabled_p) \
260 pthread_mutex_lock (&_aligned_blocks_mutex); \
261 } while (0)
262#define UNLOCK_ALIGNED_BLOCKS() \
263 do { \
264 if (_malloc_thread_enabled_p) \
265 pthread_mutex_unlock (&_aligned_blocks_mutex); \
266 } while (0)
2f213514
YM
267#else
268#define LOCK()
269#define UNLOCK()
8d0d84d2
YM
270#define LOCK_ALIGNED_BLOCKS()
271#define UNLOCK_ALIGNED_BLOCKS()
2f213514
YM
272#endif
273
74ad5c7f
KH
274#endif /* _MALLOC_INTERNAL. */
275
276/* Given an address in the middle of a malloc'd object,
277 return the address of the beginning of the object. */
0a27e8ed 278extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
74ad5c7f
KH
279
280/* Underlying allocation function; successive calls should
281 return contiguous pieces of memory. */
0a27e8ed 282extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
283
284/* Default value of `__morecore'. */
0a27e8ed 285extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
286
287/* If not NULL, this function is called after each time
288 `__morecore' is called to increase the data size. */
0a27e8ed 289extern void (*__after_morecore_hook) PP ((void));
74ad5c7f
KH
290
291/* Number of extra blocks to get each time we ask for more core.
292 This reduces the frequency of calling `(*__morecore)'. */
293extern __malloc_size_t __malloc_extra_blocks;
294
295/* Nonzero if `malloc' has been called and done its initialization. */
296extern int __malloc_initialized;
297/* Function called to initialize malloc data structures. */
0a27e8ed 298extern int __malloc_initialize PP ((void));
74ad5c7f
KH
299
300/* Hooks for debugging versions. */
0a27e8ed
RS
301extern void (*__malloc_initialize_hook) PP ((void));
302extern void (*__free_hook) PP ((__ptr_t __ptr));
303extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
304extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
305extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
306 __malloc_size_t __alignment));
74ad5c7f
KH
307
308/* Return values for `mprobe': these are the kinds of inconsistencies that
309 `mcheck' enables detection of. */
310enum mcheck_status
311 {
312 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
313 MCHECK_OK, /* Block is fine. */
314 MCHECK_FREE, /* Block freed twice. */
315 MCHECK_HEAD, /* Memory before the block was clobbered. */
316 MCHECK_TAIL /* Memory after the block was clobbered. */
317 };
318
319/* Activate a standard collection of debugging hooks. This must be called
320 before `malloc' is ever called. ABORTFUNC is called with an error code
321 (see enum above) when an inconsistency is detected. If ABORTFUNC is
322 null, the standard function prints on stderr and then calls `abort'. */
0a27e8ed 323extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
74ad5c7f
KH
324
325/* Check for aberrations in a particular malloc'd block. You must have
326 called `mcheck' already. These are the same checks that `mcheck' does
327 when you free or reallocate a block. */
0a27e8ed 328extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
74ad5c7f
KH
329
330/* Activate a standard collection of tracing hooks. */
0a27e8ed
RS
331extern void mtrace PP ((void));
332extern void muntrace PP ((void));
74ad5c7f
KH
333
334/* Statistics available to the user. */
335struct mstats
336 {
337 __malloc_size_t bytes_total; /* Total size of the heap. */
338 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
339 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
340 __malloc_size_t chunks_free; /* Chunks in the free list. */
341 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
342 };
343
344/* Pick up the current statistics. */
0a27e8ed 345extern struct mstats mstats PP ((void));
74ad5c7f
KH
346
347/* Call WARNFUN with a warning message when memory usage is high. */
0a27e8ed
RS
348extern void memory_warnings PP ((__ptr_t __start,
349 void (*__warnfun) PP ((const char *))));
74ad5c7f
KH
350
351
352/* Relocating allocator. */
353
354/* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
0a27e8ed 355extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
356
357/* Free the storage allocated in HANDLEPTR. */
0a27e8ed 358extern void r_alloc_free PP ((__ptr_t *__handleptr));
74ad5c7f
KH
359
360/* Adjust the block at HANDLEPTR to be SIZE bytes long. */
0a27e8ed 361extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
362
363
364#ifdef __cplusplus
365}
366#endif
367
368#endif /* malloc.h */
369/* Memory allocator `malloc'.
370 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
371 Written May 1989 by Mike Haertel.
372
373This library is free software; you can redistribute it and/or
423a1f3c 374modify it under the terms of the GNU General Public License as
74ad5c7f
KH
375published by the Free Software Foundation; either version 2 of the
376License, or (at your option) any later version.
377
378This library is distributed in the hope that it will be useful,
379but WITHOUT ANY WARRANTY; without even the implied warranty of
380MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 381General Public License for more details.
74ad5c7f 382
423a1f3c
JB
383You should have received a copy of the GNU General Public
384License along with this library; see the file COPYING. If
3ef97fb6
LK
385not, write to the Free Software Foundation, Inc., 51 Franklin Street,
386Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
387
388 The author may be reached (Email) at the address mike@ai.mit.edu,
389 or (US mail) as Mike Haertel c/o Free Software Foundation. */
390
391#ifndef _MALLOC_INTERNAL
392#define _MALLOC_INTERNAL
393#include <malloc.h>
394#endif
395#include <errno.h>
396
397/* How to really get more memory. */
ef6d1039
SM
398#if defined(CYGWIN)
399extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
400extern int bss_sbrk_did_unexec;
401#endif
3cacba85 402__ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
74ad5c7f
KH
403
404/* Debugging hook for `malloc'. */
0a27e8ed 405__ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
74ad5c7f
KH
406
407/* Pointer to the base of the first block. */
408char *_heapbase;
409
410/* Block information table. Allocated with align/__free (not malloc/free). */
411malloc_info *_heapinfo;
412
413/* Number of info entries. */
414static __malloc_size_t heapsize;
415
416/* Search index in the info table. */
417__malloc_size_t _heapindex;
418
419/* Limit of valid info table indices. */
420__malloc_size_t _heaplimit;
421
422/* Free lists for each fragment size. */
423struct list _fraghead[BLOCKLOG];
424
425/* Instrumentation. */
426__malloc_size_t _chunks_used;
427__malloc_size_t _bytes_used;
428__malloc_size_t _chunks_free;
429__malloc_size_t _bytes_free;
430
431/* Are you experienced? */
432int __malloc_initialized;
433
434__malloc_size_t __malloc_extra_blocks;
435
0a27e8ed
RS
436void (*__malloc_initialize_hook) PP ((void));
437void (*__after_morecore_hook) PP ((void));
74ad5c7f 438
5dcab13e
GM
439#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
440
441/* Some code for hunting a bug writing into _heapinfo.
442
443 Call this macro with argument PROT non-zero to protect internal
444 malloc state against writing to it, call it with a zero argument to
445 make it readable and writable.
446
447 Note that this only works if BLOCKSIZE == page size, which is
448 the case on the i386. */
449
450#include <sys/types.h>
451#include <sys/mman.h>
452
453static int state_protected_p;
454static __malloc_size_t last_state_size;
455static malloc_info *last_heapinfo;
456
457void
458protect_malloc_state (protect_p)
459 int protect_p;
460{
461 /* If _heapinfo has been relocated, make sure its old location
462 isn't left read-only; it will be reused by malloc. */
463 if (_heapinfo != last_heapinfo
464 && last_heapinfo
465 && state_protected_p)
466 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
467
468 last_state_size = _heaplimit * sizeof *_heapinfo;
469 last_heapinfo = _heapinfo;
177c0ea7 470
5dcab13e
GM
471 if (protect_p != state_protected_p)
472 {
473 state_protected_p = protect_p;
474 if (mprotect (_heapinfo, last_state_size,
475 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
476 abort ();
477 }
478}
479
480#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
481
482#else
483#define PROTECT_MALLOC_STATE(PROT) /* empty */
484#endif
485
74ad5c7f
KH
486
487/* Aligned allocation. */
0a27e8ed 488static __ptr_t align PP ((__malloc_size_t));
74ad5c7f
KH
489static __ptr_t
490align (size)
491 __malloc_size_t size;
492{
493 __ptr_t result;
494 unsigned long int adj;
495
ceeb3d7d
EZ
496 /* align accepts an unsigned argument, but __morecore accepts a
497 signed one. This could lead to trouble if SIZE overflows a
498 signed int type accepted by __morecore. We just punt in that
499 case, since they are requesting a ludicrous amount anyway. */
500 if ((__malloc_ptrdiff_t)size < 0)
501 result = 0;
502 else
503 result = (*__morecore) (size);
74ad5c7f
KH
504 adj = (unsigned long int) ((unsigned long int) ((char *) result -
505 (char *) NULL)) % BLOCKSIZE;
506 if (adj != 0)
507 {
508 __ptr_t new;
509 adj = BLOCKSIZE - adj;
510 new = (*__morecore) (adj);
511 result = (char *) result + adj;
512 }
513
514 if (__after_morecore_hook)
515 (*__after_morecore_hook) ();
516
517 return result;
518}
519
520/* Get SIZE bytes, if we can get them starting at END.
521 Return the address of the space we got.
522 If we cannot get space at END, fail and return 0. */
0a27e8ed 523static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
74ad5c7f
KH
524static __ptr_t
525get_contiguous_space (size, position)
526 __malloc_ptrdiff_t size;
527 __ptr_t position;
528{
529 __ptr_t before;
530 __ptr_t after;
531
532 before = (*__morecore) (0);
533 /* If we can tell in advance that the break is at the wrong place,
534 fail now. */
535 if (before != position)
536 return 0;
537
538 /* Allocate SIZE bytes and get the address of them. */
539 after = (*__morecore) (size);
540 if (!after)
541 return 0;
542
543 /* It was not contiguous--reject it. */
544 if (after != position)
545 {
546 (*__morecore) (- size);
547 return 0;
548 }
549
550 return after;
551}
552
553
554/* This is called when `_heapinfo' and `heapsize' have just
555 been set to describe a new info table. Set up the table
556 to describe itself and account for it in the statistics. */
0a27e8ed 557static void register_heapinfo PP ((void));
74ad5c7f
KH
558#ifdef __GNUC__
559__inline__
560#endif
561static void
562register_heapinfo ()
563{
564 __malloc_size_t block, blocks;
565
566 block = BLOCK (_heapinfo);
567 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
568
569 /* Account for the _heapinfo block itself in the statistics. */
570 _bytes_used += blocks * BLOCKSIZE;
571 ++_chunks_used;
572
573 /* Describe the heapinfo block itself in the heapinfo. */
574 _heapinfo[block].busy.type = 0;
575 _heapinfo[block].busy.info.size = blocks;
576 /* Leave back-pointers for malloc_find_address. */
577 while (--blocks > 0)
578 _heapinfo[block + blocks].busy.info.size = -blocks;
579}
580
2f213514 581#ifdef USE_PTHREAD
8d0d84d2
YM
582pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
583pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
3ceeb306
YM
584int _malloc_thread_enabled_p;
585
586static void
587malloc_atfork_handler_prepare ()
588{
589 LOCK ();
590 LOCK_ALIGNED_BLOCKS ();
591}
592
593static void
594malloc_atfork_handler_parent ()
595{
596 UNLOCK_ALIGNED_BLOCKS ();
597 UNLOCK ();
598}
599
600static void
601malloc_atfork_handler_child ()
602{
603 UNLOCK_ALIGNED_BLOCKS ();
604 UNLOCK ();
605}
606
607/* Set up mutexes and make malloc etc. thread-safe. */
608void
609malloc_enable_thread ()
610{
611 if (_malloc_thread_enabled_p)
612 return;
613
614 /* Some pthread implementations call malloc for statically
615 initialized mutexes when they are used first. To avoid such a
616 situation, we initialize mutexes here while their use is
617 disabled in malloc etc. */
618 pthread_mutex_init (&_malloc_mutex, NULL);
619 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
620 pthread_atfork (malloc_atfork_handler_prepare,
621 malloc_atfork_handler_parent,
622 malloc_atfork_handler_child);
623 _malloc_thread_enabled_p = 1;
624}
2f213514 625#endif
74ad5c7f 626
2f213514
YM
627static void
628malloc_initialize_1 ()
629{
a3ba27da
GM
630#ifdef GC_MCHECK
631 mcheck (NULL);
632#endif
633
74ad5c7f
KH
634 if (__malloc_initialize_hook)
635 (*__malloc_initialize_hook) ();
636
637 heapsize = HEAP / BLOCKSIZE;
638 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
639 if (_heapinfo == NULL)
2f213514 640 return;
74ad5c7f
KH
641 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
642 _heapinfo[0].free.size = 0;
643 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
644 _heapindex = 0;
645 _heapbase = (char *) _heapinfo;
646 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
647
648 register_heapinfo ();
649
650 __malloc_initialized = 1;
5dcab13e 651 PROTECT_MALLOC_STATE (1);
2f213514
YM
652 return;
653}
654
784c1472
JD
655/* Set everything up and remember that we have.
656 main will call malloc which calls this function. That is before any threads
657 or signal handlers has been set up, so we don't need thread protection. */
2f213514
YM
658int
659__malloc_initialize ()
660{
2f213514
YM
661 if (__malloc_initialized)
662 return 0;
663
664 malloc_initialize_1 ();
2f213514
YM
665
666 return __malloc_initialized;
74ad5c7f
KH
667}
668
669static int morecore_recursing;
670
671/* Get neatly aligned memory, initializing or
672 growing the heap info table as necessary. */
8d0d84d2 673static __ptr_t morecore_nolock PP ((__malloc_size_t));
74ad5c7f 674static __ptr_t
8d0d84d2 675morecore_nolock (size)
74ad5c7f
KH
676 __malloc_size_t size;
677{
678 __ptr_t result;
679 malloc_info *newinfo, *oldinfo;
680 __malloc_size_t newsize;
681
682 if (morecore_recursing)
683 /* Avoid recursion. The caller will know how to handle a null return. */
684 return NULL;
685
686 result = align (size);
687 if (result == NULL)
688 return NULL;
689
5dcab13e
GM
690 PROTECT_MALLOC_STATE (0);
691
74ad5c7f
KH
692 /* Check if we need to grow the info table. */
693 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
694 {
695 /* Calculate the new _heapinfo table size. We do not account for the
696 added blocks in the table itself, as we hope to place them in
697 existing free space, which is already covered by part of the
698 existing table. */
699 newsize = heapsize;
700 do
701 newsize *= 2;
702 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
703
704 /* We must not reuse existing core for the new info table when called
705 from realloc in the case of growing a large block, because the
706 block being grown is momentarily marked as free. In this case
707 _heaplimit is zero so we know not to reuse space for internal
708 allocation. */
709 if (_heaplimit != 0)
710 {
711 /* First try to allocate the new info table in core we already
712 have, in the usual way using realloc. If realloc cannot
713 extend it in place or relocate it to existing sufficient core,
714 we will get called again, and the code above will notice the
715 `morecore_recursing' flag and return null. */
716 int save = errno; /* Don't want to clobber errno with ENOMEM. */
717 morecore_recursing = 1;
8d0d84d2 718 newinfo = (malloc_info *) _realloc_internal_nolock
74ad5c7f
KH
719 (_heapinfo, newsize * sizeof (malloc_info));
720 morecore_recursing = 0;
721 if (newinfo == NULL)
722 errno = save;
723 else
724 {
725 /* We found some space in core, and realloc has put the old
726 table's blocks on the free list. Now zero the new part
727 of the table and install the new table location. */
728 memset (&newinfo[heapsize], 0,
729 (newsize - heapsize) * sizeof (malloc_info));
730 _heapinfo = newinfo;
731 heapsize = newsize;
732 goto got_heap;
733 }
734 }
735
736 /* Allocate new space for the malloc info table. */
737 while (1)
738 {
739 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
740
741 /* Did it fail? */
742 if (newinfo == NULL)
743 {
744 (*__morecore) (-size);
745 return NULL;
746 }
747
748 /* Is it big enough to record status for its own space?
749 If so, we win. */
750 if ((__malloc_size_t) BLOCK ((char *) newinfo
751 + newsize * sizeof (malloc_info))
752 < newsize)
753 break;
754
755 /* Must try again. First give back most of what we just got. */
756 (*__morecore) (- newsize * sizeof (malloc_info));
757 newsize *= 2;
758 }
759
760 /* Copy the old table to the beginning of the new,
761 and zero the rest of the new table. */
762 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
763 memset (&newinfo[heapsize], 0,
764 (newsize - heapsize) * sizeof (malloc_info));
765 oldinfo = _heapinfo;
766 _heapinfo = newinfo;
767 heapsize = newsize;
768
769 register_heapinfo ();
770
771 /* Reset _heaplimit so _free_internal never decides
772 it can relocate or resize the info table. */
773 _heaplimit = 0;
8d0d84d2 774 _free_internal_nolock (oldinfo);
5dcab13e 775 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
776
777 /* The new heap limit includes the new table just allocated. */
778 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
779 return result;
780 }
781
782 got_heap:
783 _heaplimit = BLOCK ((char *) result + size);
784 return result;
785}
786
787/* Allocate memory from the heap. */
788__ptr_t
8d0d84d2 789_malloc_internal_nolock (size)
74ad5c7f
KH
790 __malloc_size_t size;
791{
792 __ptr_t result;
793 __malloc_size_t block, blocks, lastblocks, start;
794 register __malloc_size_t i;
795 struct list *next;
796
797 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
798 valid address you can realloc and free (though not dereference).
799
800 It turns out that some extant code (sunrpc, at least Ultrix's version)
801 expects `malloc (0)' to return non-NULL and breaks otherwise.
802 Be compatible. */
803
804#if 0
805 if (size == 0)
806 return NULL;
807#endif
808
5dcab13e
GM
809 PROTECT_MALLOC_STATE (0);
810
74ad5c7f
KH
811 if (size < sizeof (struct list))
812 size = sizeof (struct list);
813
74ad5c7f
KH
814 /* Determine the allocation policy based on the request size. */
815 if (size <= BLOCKSIZE / 2)
816 {
817 /* Small allocation to receive a fragment of a block.
818 Determine the logarithm to base two of the fragment size. */
819 register __malloc_size_t log = 1;
820 --size;
821 while ((size /= 2) != 0)
822 ++log;
823
824 /* Look in the fragment lists for a
825 free fragment of the desired size. */
826 next = _fraghead[log].next;
827 if (next != NULL)
828 {
829 /* There are free fragments of this size.
830 Pop a fragment out of the fragment list and return it.
831 Update the block's nfree and first counters. */
832 result = (__ptr_t) next;
833 next->prev->next = next->next;
834 if (next->next != NULL)
835 next->next->prev = next->prev;
836 block = BLOCK (result);
837 if (--_heapinfo[block].busy.info.frag.nfree != 0)
838 _heapinfo[block].busy.info.frag.first = (unsigned long int)
839 ((unsigned long int) ((char *) next->next - (char *) NULL)
840 % BLOCKSIZE) >> log;
841
842 /* Update the statistics. */
843 ++_chunks_used;
844 _bytes_used += 1 << log;
845 --_chunks_free;
846 _bytes_free -= 1 << log;
847 }
848 else
849 {
850 /* No free fragments of the desired size, so get a new block
851 and break it into fragments, returning the first. */
8094989b 852#ifdef GC_MALLOC_CHECK
8d0d84d2 853 result = _malloc_internal_nolock (BLOCKSIZE);
5dcab13e 854 PROTECT_MALLOC_STATE (0);
8d0d84d2
YM
855#elif defined (USE_PTHREAD)
856 result = _malloc_internal_nolock (BLOCKSIZE);
8094989b 857#else
74ad5c7f 858 result = malloc (BLOCKSIZE);
8094989b 859#endif
74ad5c7f 860 if (result == NULL)
5dcab13e
GM
861 {
862 PROTECT_MALLOC_STATE (1);
2f213514 863 goto out;
5dcab13e 864 }
74ad5c7f
KH
865
866 /* Link all fragments but the first into the free list. */
867 next = (struct list *) ((char *) result + (1 << log));
868 next->next = NULL;
869 next->prev = &_fraghead[log];
870 _fraghead[log].next = next;
871
872 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
873 {
874 next = (struct list *) ((char *) result + (i << log));
875 next->next = _fraghead[log].next;
876 next->prev = &_fraghead[log];
877 next->prev->next = next;
878 next->next->prev = next;
879 }
880
881 /* Initialize the nfree and first counters for this block. */
882 block = BLOCK (result);
883 _heapinfo[block].busy.type = log;
884 _heapinfo[block].busy.info.frag.nfree = i - 1;
885 _heapinfo[block].busy.info.frag.first = i - 1;
886
887 _chunks_free += (BLOCKSIZE >> log) - 1;
888 _bytes_free += BLOCKSIZE - (1 << log);
889 _bytes_used -= BLOCKSIZE - (1 << log);
890 }
891 }
892 else
893 {
894 /* Large allocation to receive one or more blocks.
895 Search the free list in a circle starting at the last place visited.
896 If we loop completely around without finding a large enough
897 space we will have to get more memory from the system. */
898 blocks = BLOCKIFY (size);
899 start = block = _heapindex;
900 while (_heapinfo[block].free.size < blocks)
901 {
902 block = _heapinfo[block].free.next;
903 if (block == start)
904 {
905 /* Need to get more from the system. Get a little extra. */
906 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
907 block = _heapinfo[0].free.prev;
908 lastblocks = _heapinfo[block].free.size;
909 /* Check to see if the new core will be contiguous with the
910 final free block; if so we don't need to get as much. */
911 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
912 /* We can't do this if we will have to make the heap info
cc4a96c6 913 table bigger to accommodate the new space. */
74ad5c7f
KH
914 block + wantblocks <= heapsize &&
915 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
916 ADDRESS (block + lastblocks)))
917 {
918 /* We got it contiguously. Which block we are extending
919 (the `final free block' referred to above) might have
920 changed, if it got combined with a freed info table. */
921 block = _heapinfo[0].free.prev;
922 _heapinfo[block].free.size += (wantblocks - lastblocks);
923 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
924 _heaplimit += wantblocks - lastblocks;
925 continue;
926 }
8d0d84d2 927 result = morecore_nolock (wantblocks * BLOCKSIZE);
74ad5c7f 928 if (result == NULL)
2f213514 929 goto out;
74ad5c7f
KH
930 block = BLOCK (result);
931 /* Put the new block at the end of the free list. */
932 _heapinfo[block].free.size = wantblocks;
933 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
934 _heapinfo[block].free.next = 0;
935 _heapinfo[0].free.prev = block;
936 _heapinfo[_heapinfo[block].free.prev].free.next = block;
937 ++_chunks_free;
938 /* Now loop to use some of that block for this allocation. */
939 }
940 }
941
942 /* At this point we have found a suitable free list entry.
943 Figure out how to remove what we need from the list. */
944 result = ADDRESS (block);
945 if (_heapinfo[block].free.size > blocks)
946 {
947 /* The block we found has a bit left over,
948 so relink the tail end back into the free list. */
949 _heapinfo[block + blocks].free.size
950 = _heapinfo[block].free.size - blocks;
951 _heapinfo[block + blocks].free.next
952 = _heapinfo[block].free.next;
953 _heapinfo[block + blocks].free.prev
954 = _heapinfo[block].free.prev;
955 _heapinfo[_heapinfo[block].free.prev].free.next
956 = _heapinfo[_heapinfo[block].free.next].free.prev
957 = _heapindex = block + blocks;
958 }
959 else
960 {
961 /* The block exactly matches our requirements,
962 so just remove it from the list. */
963 _heapinfo[_heapinfo[block].free.next].free.prev
964 = _heapinfo[block].free.prev;
965 _heapinfo[_heapinfo[block].free.prev].free.next
966 = _heapindex = _heapinfo[block].free.next;
967 --_chunks_free;
968 }
969
970 _heapinfo[block].busy.type = 0;
971 _heapinfo[block].busy.info.size = blocks;
972 ++_chunks_used;
973 _bytes_used += blocks * BLOCKSIZE;
974 _bytes_free -= blocks * BLOCKSIZE;
975
976 /* Mark all the blocks of the object just allocated except for the
977 first with a negative number so you can find the first block by
978 adding that adjustment. */
979 while (--blocks > 0)
980 _heapinfo[block + blocks].busy.info.size = -blocks;
981 }
982
5dcab13e 983 PROTECT_MALLOC_STATE (1);
2f213514 984 out:
8d0d84d2
YM
985 return result;
986}
987
988__ptr_t
989_malloc_internal (size)
990 __malloc_size_t size;
991{
992 __ptr_t result;
993
994 LOCK ();
995 result = _malloc_internal_nolock (size);
2f213514 996 UNLOCK ();
8d0d84d2 997
74ad5c7f
KH
998 return result;
999}
1000
1001__ptr_t
1002malloc (size)
1003 __malloc_size_t size;
1004{
8d0d84d2
YM
1005 __ptr_t (*hook) (__malloc_size_t);
1006
74ad5c7f
KH
1007 if (!__malloc_initialized && !__malloc_initialize ())
1008 return NULL;
1009
8d0d84d2
YM
1010 /* Copy the value of __malloc_hook to an automatic variable in case
1011 __malloc_hook is modified in another thread between its
1012 NULL-check and the use.
1013
1014 Note: Strictly speaking, this is not a right solution. We should
1015 use mutexes to access non-read-only variables that are shared
1016 among multiple threads. We just leave it for compatibility with
1017 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1018 hook = __malloc_hook;
1019 return (hook != NULL ? *hook : _malloc_internal) (size);
74ad5c7f
KH
1020}
1021\f
1022#ifndef _LIBC
1023
1024/* On some ANSI C systems, some libc functions call _malloc, _free
1025 and _realloc. Make them use the GNU functions. */
1026
1027__ptr_t
1028_malloc (size)
1029 __malloc_size_t size;
1030{
1031 return malloc (size);
1032}
1033
1034void
1035_free (ptr)
1036 __ptr_t ptr;
1037{
1038 free (ptr);
1039}
1040
1041__ptr_t
1042_realloc (ptr, size)
1043 __ptr_t ptr;
1044 __malloc_size_t size;
1045{
1046 return realloc (ptr, size);
1047}
1048
1049#endif
1050/* Free a block of memory allocated by `malloc'.
1051 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1052 Written May 1989 by Mike Haertel.
1053
1054This library is free software; you can redistribute it and/or
423a1f3c 1055modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1056published by the Free Software Foundation; either version 2 of the
1057License, or (at your option) any later version.
1058
1059This library is distributed in the hope that it will be useful,
1060but WITHOUT ANY WARRANTY; without even the implied warranty of
1061MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1062General Public License for more details.
74ad5c7f 1063
423a1f3c
JB
1064You should have received a copy of the GNU General Public
1065License along with this library; see the file COPYING. If
3ef97fb6
LK
1066not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1067Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1068
1069 The author may be reached (Email) at the address mike@ai.mit.edu,
1070 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1071
1072#ifndef _MALLOC_INTERNAL
1073#define _MALLOC_INTERNAL
1074#include <malloc.h>
1075#endif
1076
1077
1078/* Cope with systems lacking `memmove'. */
1079#ifndef memmove
4624371d 1080#if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
74ad5c7f
KH
1081#ifdef emacs
1082#undef __malloc_safe_bcopy
1083#define __malloc_safe_bcopy safe_bcopy
1084#endif
1085/* This function is defined in realloc.c. */
0a27e8ed 1086extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
1087#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1088#endif
1089#endif
1090
1091
1092/* Debugging hook for free. */
0a27e8ed 1093void (*__free_hook) PP ((__ptr_t __ptr));
74ad5c7f
KH
1094
1095/* List of blocks allocated by memalign. */
1096struct alignlist *_aligned_blocks = NULL;
1097
1098/* Return memory to the heap.
8d0d84d2 1099 Like `_free_internal' but don't lock mutex. */
74ad5c7f 1100void
8d0d84d2 1101_free_internal_nolock (ptr)
74ad5c7f
KH
1102 __ptr_t ptr;
1103{
1104 int type;
1105 __malloc_size_t block, blocks;
1106 register __malloc_size_t i;
1107 struct list *prev, *next;
1108 __ptr_t curbrk;
1109 const __malloc_size_t lesscore_threshold
1110 /* Threshold of free space at which we will return some to the system. */
1111 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1112
1113 register struct alignlist *l;
1114
1115 if (ptr == NULL)
1116 return;
1117
5dcab13e 1118 PROTECT_MALLOC_STATE (0);
177c0ea7 1119
8d0d84d2 1120 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1121 for (l = _aligned_blocks; l != NULL; l = l->next)
1122 if (l->aligned == ptr)
1123 {
1124 l->aligned = NULL; /* Mark the slot in the list as free. */
1125 ptr = l->exact;
1126 break;
1127 }
8d0d84d2 1128 UNLOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1129
1130 block = BLOCK (ptr);
1131
1132 type = _heapinfo[block].busy.type;
1133 switch (type)
1134 {
1135 case 0:
1136 /* Get as many statistics as early as we can. */
1137 --_chunks_used;
1138 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1139 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1140
1141 /* Find the free cluster previous to this one in the free list.
1142 Start searching at the last block referenced; this may benefit
1143 programs with locality of allocation. */
1144 i = _heapindex;
1145 if (i > block)
1146 while (i > block)
1147 i = _heapinfo[i].free.prev;
1148 else
1149 {
1150 do
1151 i = _heapinfo[i].free.next;
1152 while (i > 0 && i < block);
1153 i = _heapinfo[i].free.prev;
1154 }
1155
1156 /* Determine how to link this block into the free list. */
1157 if (block == i + _heapinfo[i].free.size)
1158 {
1159 /* Coalesce this block with its predecessor. */
1160 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1161 block = i;
1162 }
1163 else
1164 {
1165 /* Really link this block back into the free list. */
1166 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1167 _heapinfo[block].free.next = _heapinfo[i].free.next;
1168 _heapinfo[block].free.prev = i;
1169 _heapinfo[i].free.next = block;
1170 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1171 ++_chunks_free;
1172 }
1173
1174 /* Now that the block is linked in, see if we can coalesce it
1175 with its successor (by deleting its successor from the list
1176 and adding in its size). */
1177 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1178 {
1179 _heapinfo[block].free.size
1180 += _heapinfo[_heapinfo[block].free.next].free.size;
1181 _heapinfo[block].free.next
1182 = _heapinfo[_heapinfo[block].free.next].free.next;
1183 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1184 --_chunks_free;
1185 }
1186
1187 /* How many trailing free blocks are there now? */
1188 blocks = _heapinfo[block].free.size;
1189
1190 /* Where is the current end of accessible core? */
1191 curbrk = (*__morecore) (0);
1192
1193 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1194 {
1195 /* The end of the malloc heap is at the end of accessible core.
1196 It's possible that moving _heapinfo will allow us to
1197 return some space to the system. */
1198
1199 __malloc_size_t info_block = BLOCK (_heapinfo);
1200 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1201 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1202 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1203 __malloc_size_t next_block = _heapinfo[block].free.next;
1204 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1205
1206 if (/* Win if this block being freed is last in core, the info table
1207 is just before it, the previous free block is just before the
1208 info table, and the two free blocks together form a useful
1209 amount to return to the system. */
1210 (block + blocks == _heaplimit &&
1211 info_block + info_blocks == block &&
1212 prev_block != 0 && prev_block + prev_blocks == info_block &&
1213 blocks + prev_blocks >= lesscore_threshold) ||
1214 /* Nope, not the case. We can also win if this block being
1215 freed is just before the info table, and the table extends
1216 to the end of core or is followed only by a free block,
1217 and the total free space is worth returning to the system. */
1218 (block + blocks == info_block &&
1219 ((info_block + info_blocks == _heaplimit &&
1220 blocks >= lesscore_threshold) ||
1221 (info_block + info_blocks == next_block &&
1222 next_block + next_blocks == _heaplimit &&
1223 blocks + next_blocks >= lesscore_threshold)))
1224 )
1225 {
1226 malloc_info *newinfo;
1227 __malloc_size_t oldlimit = _heaplimit;
1228
1229 /* Free the old info table, clearing _heaplimit to avoid
1230 recursion into this code. We don't want to return the
1231 table's blocks to the system before we have copied them to
1232 the new location. */
1233 _heaplimit = 0;
8d0d84d2 1234 _free_internal_nolock (_heapinfo);
74ad5c7f
KH
1235 _heaplimit = oldlimit;
1236
1237 /* Tell malloc to search from the beginning of the heap for
1238 free blocks, so it doesn't reuse the ones just freed. */
1239 _heapindex = 0;
1240
1241 /* Allocate new space for the info table and move its data. */
8d0d84d2
YM
1242 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1243 * BLOCKSIZE);
5dcab13e 1244 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1245 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1246 _heapinfo = newinfo;
1247
1248 /* We should now have coalesced the free block with the
1249 blocks freed from the old info table. Examine the entire
1250 trailing free block to decide below whether to return some
1251 to the system. */
1252 block = _heapinfo[0].free.prev;
1253 blocks = _heapinfo[block].free.size;
1254 }
1255
1256 /* Now see if we can return stuff to the system. */
1257 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1258 {
1259 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1260 _heaplimit -= blocks;
1261 (*__morecore) (-bytes);
1262 _heapinfo[_heapinfo[block].free.prev].free.next
1263 = _heapinfo[block].free.next;
1264 _heapinfo[_heapinfo[block].free.next].free.prev
1265 = _heapinfo[block].free.prev;
1266 block = _heapinfo[block].free.prev;
1267 --_chunks_free;
1268 _bytes_free -= bytes;
1269 }
1270 }
1271
1272 /* Set the next search to begin at this block. */
1273 _heapindex = block;
1274 break;
1275
1276 default:
1277 /* Do some of the statistics. */
1278 --_chunks_used;
1279 _bytes_used -= 1 << type;
1280 ++_chunks_free;
1281 _bytes_free += 1 << type;
1282
1283 /* Get the address of the first free fragment in this block. */
1284 prev = (struct list *) ((char *) ADDRESS (block) +
1285 (_heapinfo[block].busy.info.frag.first << type));
1286
1287 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1288 {
1289 /* If all fragments of this block are free, remove them
1290 from the fragment list and free the whole block. */
1291 next = prev;
1292 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1293 next = next->next;
1294 prev->prev->next = next;
1295 if (next != NULL)
1296 next->prev = prev->prev;
1297 _heapinfo[block].busy.type = 0;
1298 _heapinfo[block].busy.info.size = 1;
1299
1300 /* Keep the statistics accurate. */
1301 ++_chunks_used;
1302 _bytes_used += BLOCKSIZE;
1303 _chunks_free -= BLOCKSIZE >> type;
1304 _bytes_free -= BLOCKSIZE;
1305
8d0d84d2
YM
1306#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1307 _free_internal_nolock (ADDRESS (block));
8094989b 1308#else
74ad5c7f 1309 free (ADDRESS (block));
8094989b 1310#endif
74ad5c7f
KH
1311 }
1312 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1313 {
1314 /* If some fragments of this block are free, link this
1315 fragment into the fragment list after the first free
1316 fragment of this block. */
1317 next = (struct list *) ptr;
1318 next->next = prev->next;
1319 next->prev = prev;
1320 prev->next = next;
1321 if (next->next != NULL)
1322 next->next->prev = next;
1323 ++_heapinfo[block].busy.info.frag.nfree;
1324 }
1325 else
1326 {
1327 /* No fragments of this block are free, so link this
1328 fragment into the fragment list and announce that
1329 it is the first free fragment of this block. */
1330 prev = (struct list *) ptr;
1331 _heapinfo[block].busy.info.frag.nfree = 1;
1332 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1333 ((unsigned long int) ((char *) ptr - (char *) NULL)
1334 % BLOCKSIZE >> type);
1335 prev->next = _fraghead[type].next;
1336 prev->prev = &_fraghead[type];
1337 prev->prev->next = prev;
1338 if (prev->next != NULL)
1339 prev->next->prev = prev;
1340 }
1341 break;
1342 }
177c0ea7 1343
5dcab13e 1344 PROTECT_MALLOC_STATE (1);
8d0d84d2
YM
1345}
1346
1347/* Return memory to the heap.
1348 Like `free' but don't call a __free_hook if there is one. */
1349void
1350_free_internal (ptr)
1351 __ptr_t ptr;
1352{
1353 LOCK ();
1354 _free_internal_nolock (ptr);
2f213514 1355 UNLOCK ();
74ad5c7f
KH
1356}
1357
1358/* Return memory to the heap. */
ca9c0567 1359
4624371d 1360void
74ad5c7f
KH
1361free (ptr)
1362 __ptr_t ptr;
1363{
8d0d84d2
YM
1364 void (*hook) (__ptr_t) = __free_hook;
1365
1366 if (hook != NULL)
1367 (*hook) (ptr);
74ad5c7f
KH
1368 else
1369 _free_internal (ptr);
1370}
1371
1372/* Define the `cfree' alias for `free'. */
1373#ifdef weak_alias
1374weak_alias (free, cfree)
1375#else
1376void
1377cfree (ptr)
1378 __ptr_t ptr;
1379{
1380 free (ptr);
1381}
1382#endif
1383/* Change the size of a block allocated by `malloc'.
1384 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1385 Written May 1989 by Mike Haertel.
1386
1387This library is free software; you can redistribute it and/or
423a1f3c 1388modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1389published by the Free Software Foundation; either version 2 of the
1390License, or (at your option) any later version.
1391
1392This library is distributed in the hope that it will be useful,
1393but WITHOUT ANY WARRANTY; without even the implied warranty of
1394MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1395General Public License for more details.
74ad5c7f 1396
423a1f3c
JB
1397You should have received a copy of the GNU General Public
1398License along with this library; see the file COPYING. If
3ef97fb6
LK
1399not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1400Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1401
1402 The author may be reached (Email) at the address mike@ai.mit.edu,
1403 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1404
1405#ifndef _MALLOC_INTERNAL
1406#define _MALLOC_INTERNAL
1407#include <malloc.h>
1408#endif
1409
1410
1411
1412/* Cope with systems lacking `memmove'. */
4624371d 1413#if (!defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
74ad5c7f
KH
1414
1415#ifdef emacs
1416#undef __malloc_safe_bcopy
1417#define __malloc_safe_bcopy safe_bcopy
1418#else
1419
1420/* Snarfed directly from Emacs src/dispnew.c:
1421 XXX Should use system bcopy if it handles overlap. */
1422
1423/* Like bcopy except never gets confused by overlap. */
1424
1425void
1426__malloc_safe_bcopy (afrom, ato, size)
1427 __ptr_t afrom;
1428 __ptr_t ato;
1429 __malloc_size_t size;
1430{
1431 char *from = afrom, *to = ato;
1432
1433 if (size <= 0 || from == to)
1434 return;
1435
1436 /* If the source and destination don't overlap, then bcopy can
1437 handle it. If they do overlap, but the destination is lower in
1438 memory than the source, we'll assume bcopy can handle that. */
1439 if (to < from || from + size <= to)
1440 bcopy (from, to, size);
1441
1442 /* Otherwise, we'll copy from the end. */
1443 else
1444 {
1445 register char *endf = from + size;
1446 register char *endt = to + size;
1447
1448 /* If TO - FROM is large, then we should break the copy into
1449 nonoverlapping chunks of TO - FROM bytes each. However, if
1450 TO - FROM is small, then the bcopy function call overhead
1451 makes this not worth it. The crossover point could be about
1452 anywhere. Since I don't think the obvious copy loop is too
1453 bad, I'm trying to err in its favor. */
1454 if (to - from < 64)
1455 {
1456 do
1457 *--endt = *--endf;
1458 while (endf != from);
1459 }
1460 else
1461 {
1462 for (;;)
1463 {
1464 endt -= (to - from);
1465 endf -= (to - from);
1466
1467 if (endt < to)
1468 break;
1469
1470 bcopy (endf, endt, to - from);
1471 }
1472
1473 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1474 little left over. The amount left over is
1475 (endt + (to - from)) - to, which is endt - from. */
1476 bcopy (from, to, endt - from);
1477 }
1478 }
1479}
1480#endif /* emacs */
1481
1482#ifndef memmove
0a27e8ed 1483extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
1484#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1485#endif
1486
1487#endif
1488
1489
1490#define min(A, B) ((A) < (B) ? (A) : (B))
1491
1492/* Debugging hook for realloc. */
0a27e8ed 1493__ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f
KH
1494
1495/* Resize the given region to the new size, returning a pointer
1496 to the (possibly moved) region. This is optimized for speed;
1497 some benchmarks seem to indicate that greater compactness is
1498 achieved by unconditionally allocating and copying to a
1499 new region. This module has incestuous knowledge of the
1500 internals of both free and malloc. */
1501__ptr_t
8d0d84d2 1502_realloc_internal_nolock (ptr, size)
74ad5c7f
KH
1503 __ptr_t ptr;
1504 __malloc_size_t size;
1505{
1506 __ptr_t result;
1507 int type;
1508 __malloc_size_t block, blocks, oldlimit;
1509
1510 if (size == 0)
1511 {
8d0d84d2
YM
1512 _free_internal_nolock (ptr);
1513 return _malloc_internal_nolock (0);
74ad5c7f
KH
1514 }
1515 else if (ptr == NULL)
8d0d84d2 1516 return _malloc_internal_nolock (size);
74ad5c7f
KH
1517
1518 block = BLOCK (ptr);
1519
5dcab13e 1520 PROTECT_MALLOC_STATE (0);
177c0ea7 1521
74ad5c7f
KH
1522 type = _heapinfo[block].busy.type;
1523 switch (type)
1524 {
1525 case 0:
1526 /* Maybe reallocate a large block to a small fragment. */
1527 if (size <= BLOCKSIZE / 2)
1528 {
8d0d84d2 1529 result = _malloc_internal_nolock (size);
74ad5c7f
KH
1530 if (result != NULL)
1531 {
1532 memcpy (result, ptr, size);
8d0d84d2 1533 _free_internal_nolock (ptr);
2f213514 1534 goto out;
74ad5c7f
KH
1535 }
1536 }
1537
1538 /* The new size is a large allocation as well;
1539 see if we can hold it in place. */
1540 blocks = BLOCKIFY (size);
1541 if (blocks < _heapinfo[block].busy.info.size)
1542 {
1543 /* The new size is smaller; return
1544 excess memory to the free list. */
1545 _heapinfo[block + blocks].busy.type = 0;
1546 _heapinfo[block + blocks].busy.info.size
1547 = _heapinfo[block].busy.info.size - blocks;
1548 _heapinfo[block].busy.info.size = blocks;
1549 /* We have just created a new chunk by splitting a chunk in two.
1550 Now we will free this chunk; increment the statistics counter
1551 so it doesn't become wrong when _free_internal decrements it. */
1552 ++_chunks_used;
8d0d84d2 1553 _free_internal_nolock (ADDRESS (block + blocks));
74ad5c7f
KH
1554 result = ptr;
1555 }
1556 else if (blocks == _heapinfo[block].busy.info.size)
1557 /* No size change necessary. */
1558 result = ptr;
1559 else
1560 {
1561 /* Won't fit, so allocate a new region that will.
1562 Free the old region first in case there is sufficient
1563 adjacent free space to grow without moving. */
1564 blocks = _heapinfo[block].busy.info.size;
1565 /* Prevent free from actually returning memory to the system. */
1566 oldlimit = _heaplimit;
1567 _heaplimit = 0;
8d0d84d2
YM
1568 _free_internal_nolock (ptr);
1569 result = _malloc_internal_nolock (size);
5dcab13e 1570 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1571 if (_heaplimit == 0)
1572 _heaplimit = oldlimit;
1573 if (result == NULL)
1574 {
1575 /* Now we're really in trouble. We have to unfree
1576 the thing we just freed. Unfortunately it might
1577 have been coalesced with its neighbors. */
1578 if (_heapindex == block)
8d0d84d2 1579 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
74ad5c7f
KH
1580 else
1581 {
1582 __ptr_t previous
8d0d84d2
YM
1583 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1584 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1585 _free_internal_nolock (previous);
74ad5c7f 1586 }
2f213514 1587 goto out;
74ad5c7f
KH
1588 }
1589 if (ptr != result)
1590 memmove (result, ptr, blocks * BLOCKSIZE);
1591 }
1592 break;
1593
1594 default:
1595 /* Old size is a fragment; type is logarithm
1596 to base two of the fragment size. */
1597 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1598 size <= (__malloc_size_t) (1 << type))
1599 /* The new size is the same kind of fragment. */
1600 result = ptr;
1601 else
1602 {
1603 /* The new size is different; allocate a new space,
1604 and copy the lesser of the new size and the old. */
8d0d84d2 1605 result = _malloc_internal_nolock (size);
74ad5c7f 1606 if (result == NULL)
2f213514 1607 goto out;
74ad5c7f 1608 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
8d0d84d2 1609 _free_internal_nolock (ptr);
74ad5c7f
KH
1610 }
1611 break;
1612 }
1613
5dcab13e 1614 PROTECT_MALLOC_STATE (1);
2f213514 1615 out:
8d0d84d2
YM
1616 return result;
1617}
1618
1619__ptr_t
1620_realloc_internal (ptr, size)
1621 __ptr_t ptr;
1622 __malloc_size_t size;
1623{
1624 __ptr_t result;
1625
1626 LOCK();
1627 result = _realloc_internal_nolock (ptr, size);
2f213514 1628 UNLOCK ();
8d0d84d2 1629
74ad5c7f
KH
1630 return result;
1631}
1632
1633__ptr_t
1634realloc (ptr, size)
1635 __ptr_t ptr;
1636 __malloc_size_t size;
1637{
8d0d84d2
YM
1638 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1639
74ad5c7f
KH
1640 if (!__malloc_initialized && !__malloc_initialize ())
1641 return NULL;
1642
8d0d84d2
YM
1643 hook = __realloc_hook;
1644 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
74ad5c7f
KH
1645}
1646/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1647
1648This library is free software; you can redistribute it and/or
423a1f3c 1649modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1650published by the Free Software Foundation; either version 2 of the
1651License, or (at your option) any later version.
1652
1653This library is distributed in the hope that it will be useful,
1654but WITHOUT ANY WARRANTY; without even the implied warranty of
1655MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1656General Public License for more details.
74ad5c7f 1657
423a1f3c
JB
1658You should have received a copy of the GNU General Public
1659License along with this library; see the file COPYING. If
3ef97fb6
LK
1660not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1661Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1662
1663 The author may be reached (Email) at the address mike@ai.mit.edu,
1664 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1665
1666#ifndef _MALLOC_INTERNAL
1667#define _MALLOC_INTERNAL
1668#include <malloc.h>
1669#endif
1670
1671/* Allocate an array of NMEMB elements each SIZE bytes long.
1672 The entire array is initialized to zeros. */
1673__ptr_t
1674calloc (nmemb, size)
1675 register __malloc_size_t nmemb;
1676 register __malloc_size_t size;
1677{
1678 register __ptr_t result = malloc (nmemb * size);
1679
1680 if (result != NULL)
1681 (void) memset (result, 0, nmemb * size);
1682
1683 return result;
1684}
1685/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1686This file is part of the GNU C Library.
1687
1688The GNU C Library is free software; you can redistribute it and/or modify
1689it under the terms of the GNU General Public License as published by
1690the Free Software Foundation; either version 2, or (at your option)
1691any later version.
1692
1693The GNU C Library is distributed in the hope that it will be useful,
1694but WITHOUT ANY WARRANTY; without even the implied warranty of
1695MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1696GNU General Public License for more details.
1697
1698You should have received a copy of the GNU General Public License
1699along with the GNU C Library; see the file COPYING. If not, write to
3ef97fb6
LK
1700the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1701MA 02110-1301, USA. */
74ad5c7f
KH
1702
1703#ifndef _MALLOC_INTERNAL
1704#define _MALLOC_INTERNAL
1705#include <malloc.h>
1706#endif
1707
65f451d0
DN
1708/* uClibc defines __GNU_LIBRARY__, but it is not completely
1709 compatible. */
1710#if !defined(__GNU_LIBRARY__) || defined(__UCLIBC__)
74ad5c7f 1711#define __sbrk sbrk
65f451d0 1712#else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1713/* It is best not to declare this and cast its result on foreign operating
1714 systems with potentially hostile include files. */
1715
1716#include <stddef.h>
0a27e8ed 1717extern __ptr_t __sbrk PP ((ptrdiff_t increment));
65f451d0 1718#endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
74ad5c7f
KH
1719
1720#ifndef NULL
1721#define NULL 0
1722#endif
1723
1724/* Allocate INCREMENT more bytes of data space,
1725 and return the start of data space, or NULL on errors.
1726 If INCREMENT is negative, shrink data space. */
1727__ptr_t
1728__default_morecore (increment)
1729 __malloc_ptrdiff_t increment;
1730{
ef6d1039
SM
1731 __ptr_t result;
1732#if defined(CYGWIN)
1733 if (!bss_sbrk_did_unexec)
1734 {
1735 return bss_sbrk (increment);
1736 }
1737#endif
1738 result = (__ptr_t) __sbrk (increment);
74ad5c7f
KH
1739 if (result == (__ptr_t) -1)
1740 return NULL;
1741 return result;
1742}
1743/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1744
1745This library is free software; you can redistribute it and/or
423a1f3c 1746modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1747published by the Free Software Foundation; either version 2 of the
1748License, or (at your option) any later version.
1749
1750This library is distributed in the hope that it will be useful,
1751but WITHOUT ANY WARRANTY; without even the implied warranty of
1752MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1753General Public License for more details.
74ad5c7f 1754
423a1f3c
JB
1755You should have received a copy of the GNU General Public
1756License along with this library; see the file COPYING. If
3ef97fb6
LK
1757not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1758Fifth Floor, Boston, MA 02110-1301, USA. */
74ad5c7f
KH
1759
1760#ifndef _MALLOC_INTERNAL
1761#define _MALLOC_INTERNAL
1762#include <malloc.h>
1763#endif
1764
eec2d1de
EZ
1765__ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1766 __malloc_size_t __alignment));
74ad5c7f
KH
1767
1768__ptr_t
1769memalign (alignment, size)
1770 __malloc_size_t alignment;
1771 __malloc_size_t size;
1772{
1773 __ptr_t result;
1774 unsigned long int adj, lastadj;
8d0d84d2 1775 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
74ad5c7f 1776
8d0d84d2
YM
1777 if (hook)
1778 return (*hook) (alignment, size);
74ad5c7f
KH
1779
1780 /* Allocate a block with enough extra space to pad the block with up to
1781 (ALIGNMENT - 1) bytes if necessary. */
1782 result = malloc (size + alignment - 1);
1783 if (result == NULL)
1784 return NULL;
1785
1786 /* Figure out how much we will need to pad this particular block
1787 to achieve the required alignment. */
1788 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1789
1790 do
1791 {
1792 /* Reallocate the block with only as much excess as it needs. */
1793 free (result);
1794 result = malloc (adj + size);
1795 if (result == NULL) /* Impossible unless interrupted. */
1796 return NULL;
1797
1798 lastadj = adj;
1799 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1800 /* It's conceivable we might have been so unlucky as to get a
1801 different block with weaker alignment. If so, this block is too
1802 short to contain SIZE after alignment correction. So we must
1803 try again and get another block, slightly larger. */
1804 } while (adj > lastadj);
1805
1806 if (adj != 0)
1807 {
1808 /* Record this block in the list of aligned blocks, so that `free'
1809 can identify the pointer it is passed, which will be in the middle
1810 of an allocated block. */
1811
1812 struct alignlist *l;
8d0d84d2 1813 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1814 for (l = _aligned_blocks; l != NULL; l = l->next)
1815 if (l->aligned == NULL)
1816 /* This slot is free. Use it. */
1817 break;
1818 if (l == NULL)
1819 {
1820 l = (struct alignlist *) malloc (sizeof (struct alignlist));
8d0d84d2 1821 if (l != NULL)
74ad5c7f 1822 {
8d0d84d2
YM
1823 l->next = _aligned_blocks;
1824 _aligned_blocks = l;
74ad5c7f 1825 }
74ad5c7f 1826 }
8d0d84d2
YM
1827 if (l != NULL)
1828 {
1829 l->exact = result;
1830 result = l->aligned = (char *) result + alignment - adj;
1831 }
1832 UNLOCK_ALIGNED_BLOCKS ();
1833 if (l == NULL)
1834 {
1835 free (result);
1836 result = NULL;
1837 }
74ad5c7f
KH
1838 }
1839
1840 return result;
1841}
1842
72359c32
YM
1843#ifndef ENOMEM
1844#define ENOMEM 12
1845#endif
1846
1847#ifndef EINVAL
1848#define EINVAL 22
1849#endif
1850
1851int
1852posix_memalign (memptr, alignment, size)
1853 __ptr_t *memptr;
1854 __malloc_size_t alignment;
1855 __malloc_size_t size;
1856{
1857 __ptr_t mem;
1858
1859 if (alignment == 0
1860 || alignment % sizeof (__ptr_t) != 0
1861 || (alignment & (alignment - 1)) != 0)
1862 return EINVAL;
1863
1864 mem = memalign (alignment, size);
1865 if (mem == NULL)
1866 return ENOMEM;
1867
1868 *memptr = mem;
1869
1870 return 0;
1871}
1872
74ad5c7f
KH
1873/* Allocate memory on a page boundary.
1874 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1875
1876This library is free software; you can redistribute it and/or
423a1f3c 1877modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1878published by the Free Software Foundation; either version 2 of the
1879License, or (at your option) any later version.
1880
1881This library is distributed in the hope that it will be useful,
1882but WITHOUT ANY WARRANTY; without even the implied warranty of
1883MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1884General Public License for more details.
74ad5c7f 1885
423a1f3c
JB
1886You should have received a copy of the GNU General Public
1887License along with this library; see the file COPYING. If
3ef97fb6
LK
1888not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1889Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1890
1891 The author may be reached (Email) at the address mike@ai.mit.edu,
1892 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1893
1894#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1895
1896/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1897 on MSDOS, where it conflicts with a system header file. */
1898
1899#define ELIDE_VALLOC
1900
1901#endif
1902
1903#ifndef ELIDE_VALLOC
1904
1905#if defined (__GNU_LIBRARY__) || defined (_LIBC)
1906#include <stddef.h>
1907#include <sys/cdefs.h>
47582ab3
KH
1908#if defined (__GLIBC__) && __GLIBC__ >= 2
1909/* __getpagesize is already declared in <unistd.h> with return type int */
1910#else
0a27e8ed 1911extern size_t __getpagesize PP ((void));
47582ab3 1912#endif
74ad5c7f
KH
1913#else
1914#include "getpagesize.h"
1915#define __getpagesize() getpagesize()
1916#endif
1917
1918#ifndef _MALLOC_INTERNAL
1919#define _MALLOC_INTERNAL
1920#include <malloc.h>
1921#endif
1922
1923static __malloc_size_t pagesize;
1924
1925__ptr_t
1926valloc (size)
1927 __malloc_size_t size;
1928{
1929 if (pagesize == 0)
1930 pagesize = __getpagesize ();
1931
1932 return memalign (pagesize, size);
1933}
1934
1935#endif /* Not ELIDE_VALLOC. */
a3ba27da
GM
1936
1937#ifdef GC_MCHECK
1938
1939/* Standard debugging hooks for `malloc'.
1940 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1941 Written May 1989 by Mike Haertel.
1942
1943This library is free software; you can redistribute it and/or
423a1f3c 1944modify it under the terms of the GNU General Public License as
a3ba27da
GM
1945published by the Free Software Foundation; either version 2 of the
1946License, or (at your option) any later version.
1947
1948This library is distributed in the hope that it will be useful,
1949but WITHOUT ANY WARRANTY; without even the implied warranty of
1950MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1951General Public License for more details.
a3ba27da 1952
423a1f3c
JB
1953You should have received a copy of the GNU General Public
1954License along with this library; see the file COPYING. If
3ef97fb6
LK
1955not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1956Fifth Floor, Boston, MA 02110-1301, USA.
a3ba27da
GM
1957
1958 The author may be reached (Email) at the address mike@ai.mit.edu,
1959 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1960
1961#ifdef emacs
1962#include <stdio.h>
1963#else
1964#ifndef _MALLOC_INTERNAL
1965#define _MALLOC_INTERNAL
1966#include <malloc.h>
1967#include <stdio.h>
1968#endif
1969#endif
1970
1971/* Old hook values. */
f57e2426
J
1972static void (*old_free_hook) (__ptr_t ptr);
1973static __ptr_t (*old_malloc_hook) (__malloc_size_t size);
1974static __ptr_t (*old_realloc_hook) (__ptr_t ptr, __malloc_size_t size);
a3ba27da
GM
1975
1976/* Function to call when something awful happens. */
f57e2426 1977static void (*abortfunc) (enum mcheck_status);
a3ba27da
GM
1978
1979/* Arbitrary magical numbers. */
1980#define MAGICWORD 0xfedabeeb
1981#define MAGICFREE 0xd8675309
1982#define MAGICBYTE ((char) 0xd7)
1983#define MALLOCFLOOD ((char) 0x93)
1984#define FREEFLOOD ((char) 0x95)
1985
1986struct hdr
1987 {
1988 __malloc_size_t size; /* Exact size requested by user. */
1989 unsigned long int magic; /* Magic number to check header integrity. */
1990 };
1991
1992#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1993#define flood memset
1994#else
f57e2426 1995static void flood (__ptr_t, int, __malloc_size_t);
a3ba27da
GM
1996static void
1997flood (ptr, val, size)
1998 __ptr_t ptr;
1999 int val;
2000 __malloc_size_t size;
2001{
2002 char *cp = ptr;
2003 while (size--)
2004 *cp++ = val;
2005}
2006#endif
2007
f57e2426 2008static enum mcheck_status checkhdr (const struct hdr *);
a3ba27da
GM
2009static enum mcheck_status
2010checkhdr (hdr)
2011 const struct hdr *hdr;
2012{
2013 enum mcheck_status status;
2014 switch (hdr->magic)
2015 {
2016 default:
2017 status = MCHECK_HEAD;
2018 break;
2019 case MAGICFREE:
2020 status = MCHECK_FREE;
2021 break;
2022 case MAGICWORD:
2023 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
2024 status = MCHECK_TAIL;
2025 else
2026 status = MCHECK_OK;
2027 break;
2028 }
2029 if (status != MCHECK_OK)
2030 (*abortfunc) (status);
2031 return status;
2032}
2033
f57e2426 2034static void freehook (__ptr_t);
a3ba27da
GM
2035static void
2036freehook (ptr)
2037 __ptr_t ptr;
2038{
2039 struct hdr *hdr;
177c0ea7 2040
a3ba27da
GM
2041 if (ptr)
2042 {
2043 hdr = ((struct hdr *) ptr) - 1;
2044 checkhdr (hdr);
2045 hdr->magic = MAGICFREE;
2046 flood (ptr, FREEFLOOD, hdr->size);
2047 }
2048 else
2049 hdr = NULL;
177c0ea7 2050
a3ba27da
GM
2051 __free_hook = old_free_hook;
2052 free (hdr);
2053 __free_hook = freehook;
2054}
2055
f57e2426 2056static __ptr_t mallochook (__malloc_size_t);
a3ba27da
GM
2057static __ptr_t
2058mallochook (size)
2059 __malloc_size_t size;
2060{
2061 struct hdr *hdr;
2062
2063 __malloc_hook = old_malloc_hook;
2064 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
2065 __malloc_hook = mallochook;
2066 if (hdr == NULL)
2067 return NULL;
2068
2069 hdr->size = size;
2070 hdr->magic = MAGICWORD;
2071 ((char *) &hdr[1])[size] = MAGICBYTE;
2072 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
2073 return (__ptr_t) (hdr + 1);
2074}
2075
f57e2426 2076static __ptr_t reallochook (__ptr_t, __malloc_size_t);
a3ba27da
GM
2077static __ptr_t
2078reallochook (ptr, size)
2079 __ptr_t ptr;
2080 __malloc_size_t size;
2081{
2082 struct hdr *hdr = NULL;
2083 __malloc_size_t osize = 0;
177c0ea7 2084
a3ba27da
GM
2085 if (ptr)
2086 {
2087 hdr = ((struct hdr *) ptr) - 1;
2088 osize = hdr->size;
2089
2090 checkhdr (hdr);
2091 if (size < osize)
2092 flood ((char *) ptr + size, FREEFLOOD, osize - size);
2093 }
177c0ea7 2094
a3ba27da
GM
2095 __free_hook = old_free_hook;
2096 __malloc_hook = old_malloc_hook;
2097 __realloc_hook = old_realloc_hook;
2098 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
2099 __free_hook = freehook;
2100 __malloc_hook = mallochook;
2101 __realloc_hook = reallochook;
2102 if (hdr == NULL)
2103 return NULL;
2104
2105 hdr->size = size;
2106 hdr->magic = MAGICWORD;
2107 ((char *) &hdr[1])[size] = MAGICBYTE;
2108 if (size > osize)
2109 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2110 return (__ptr_t) (hdr + 1);
2111}
2112
2113static void
2114mabort (status)
2115 enum mcheck_status status;
2116{
2117 const char *msg;
2118 switch (status)
2119 {
2120 case MCHECK_OK:
2121 msg = "memory is consistent, library is buggy";
2122 break;
2123 case MCHECK_HEAD:
2124 msg = "memory clobbered before allocated block";
2125 break;
2126 case MCHECK_TAIL:
2127 msg = "memory clobbered past end of allocated block";
2128 break;
2129 case MCHECK_FREE:
2130 msg = "block freed twice";
2131 break;
2132 default:
2133 msg = "bogus mcheck_status, library is buggy";
2134 break;
2135 }
2136#ifdef __GNU_LIBRARY__
2137 __libc_fatal (msg);
2138#else
2139 fprintf (stderr, "mcheck: %s\n", msg);
2140 fflush (stderr);
2141 abort ();
2142#endif
2143}
2144
2145static int mcheck_used = 0;
2146
2147int
2148mcheck (func)
f57e2426 2149 void (*func) (enum mcheck_status);
a3ba27da
GM
2150{
2151 abortfunc = (func != NULL) ? func : &mabort;
2152
2153 /* These hooks may not be safely inserted if malloc is already in use. */
2154 if (!__malloc_initialized && !mcheck_used)
2155 {
2156 old_free_hook = __free_hook;
2157 __free_hook = freehook;
2158 old_malloc_hook = __malloc_hook;
2159 __malloc_hook = mallochook;
2160 old_realloc_hook = __realloc_hook;
2161 __realloc_hook = reallochook;
2162 mcheck_used = 1;
2163 }
2164
2165 return mcheck_used ? 0 : -1;
2166}
2167
2168enum mcheck_status
2169mprobe (__ptr_t ptr)
2170{
2171 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2172}
2173
2174#endif /* GC_MCHECK */
ab5796a9
MB
2175
2176/* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
2177 (do not change this comment) */