(Windows Fonts): Use a @table for describing font properties.
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f
KH
1/* This file is no longer automatically generated from libc. */
2
3#define _MALLOC_INTERNAL
4
5/* The malloc headers and source files from the C library follow here. */
6
7/* Declarations for `malloc' and friends.
0b5538bd 8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
4e6835db 9 2005, 2006, 2007 Free Software Foundation, Inc.
74ad5c7f
KH
10 Written May 1989 by Mike Haertel.
11
12This library is free software; you can redistribute it and/or
423a1f3c 13modify it under the terms of the GNU General Public License as
74ad5c7f
KH
14published by the Free Software Foundation; either version 2 of the
15License, or (at your option) any later version.
16
17This library is distributed in the hope that it will be useful,
18but WITHOUT ANY WARRANTY; without even the implied warranty of
19MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 20General Public License for more details.
74ad5c7f 21
423a1f3c
JB
22You should have received a copy of the GNU General Public
23License along with this library; see the file COPYING. If
3ef97fb6
LK
24not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
26
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
29
30#ifndef _MALLOC_H
31
32#define _MALLOC_H 1
33
34#ifdef _MALLOC_INTERNAL
35
36#ifdef HAVE_CONFIG_H
37#include <config.h>
38#endif
39
8d0d84d2
YM
40#ifdef HAVE_GTK_AND_PTHREAD
41#define USE_PTHREAD
42#endif
43
b2e92d3e
RS
44#if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
45 || defined STDC_HEADERS || defined PROTOTYPES) \
46 && ! defined (BROKEN_PROTOTYPES))
0a27e8ed
RS
47#undef PP
48#define PP(args) args
74ad5c7f
KH
49#undef __ptr_t
50#define __ptr_t void *
51#else /* Not C++ or ANSI C. */
0a27e8ed
RS
52#undef PP
53#define PP(args) ()
74ad5c7f
KH
54#undef __ptr_t
55#define __ptr_t char *
56#endif /* C++ or ANSI C. */
57
58#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
59#include <string.h>
60#else
61#ifndef memset
62#define memset(s, zero, n) bzero ((s), (n))
63#endif
64#ifndef memcpy
65#define memcpy(d, s, n) bcopy ((s), (d), (n))
66#endif
67#endif
68
ca9c0567 69#ifdef HAVE_LIMITS_H
74ad5c7f 70#include <limits.h>
ca9c0567 71#endif
74ad5c7f
KH
72#ifndef CHAR_BIT
73#define CHAR_BIT 8
74#endif
74ad5c7f
KH
75
76#ifdef HAVE_UNISTD_H
77#include <unistd.h>
78#endif
79
2f213514
YM
80#ifdef USE_PTHREAD
81#include <pthread.h>
82#endif
83
74ad5c7f
KH
84#endif /* _MALLOC_INTERNAL. */
85
86
87#ifdef __cplusplus
88extern "C"
89{
90#endif
91
ca9c0567 92#ifdef STDC_HEADERS
74ad5c7f
KH
93#include <stddef.h>
94#define __malloc_size_t size_t
95#define __malloc_ptrdiff_t ptrdiff_t
96#else
eec2d1de
EZ
97#ifdef __GNUC__
98#include <stddef.h>
99#ifdef __SIZE_TYPE__
100#define __malloc_size_t __SIZE_TYPE__
101#endif
102#endif
103#ifndef __malloc_size_t
74ad5c7f 104#define __malloc_size_t unsigned int
eec2d1de 105#endif
74ad5c7f
KH
106#define __malloc_ptrdiff_t int
107#endif
108
109#ifndef NULL
110#define NULL 0
111#endif
112
ca9c0567
PE
113#ifndef FREE_RETURN_TYPE
114#define FREE_RETURN_TYPE void
115#endif
116
74ad5c7f
KH
117
118/* Allocate SIZE bytes of memory. */
0a27e8ed 119extern __ptr_t malloc PP ((__malloc_size_t __size));
74ad5c7f
KH
120/* Re-allocate the previously allocated block
121 in __ptr_t, making the new block SIZE bytes long. */
0a27e8ed 122extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f 123/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
0a27e8ed 124extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
74ad5c7f 125/* Free a block allocated by `malloc', `realloc' or `calloc'. */
ca9c0567 126extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr));
74ad5c7f
KH
127
128/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
129#if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
0a27e8ed
RS
130extern __ptr_t memalign PP ((__malloc_size_t __alignment,
131 __malloc_size_t __size));
72359c32
YM
132extern int posix_memalign PP ((__ptr_t *, __malloc_size_t,
133 __malloc_size_t size));
74ad5c7f
KH
134#endif
135
136/* Allocate SIZE bytes on a page boundary. */
137#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
0a27e8ed 138extern __ptr_t valloc PP ((__malloc_size_t __size));
74ad5c7f
KH
139#endif
140
3ceeb306
YM
141#ifdef USE_PTHREAD
142/* Set up mutexes and make malloc etc. thread-safe. */
143extern void malloc_enable_thread PP ((void));
144#endif
74ad5c7f
KH
145
146#ifdef _MALLOC_INTERNAL
147
148/* The allocator divides the heap into blocks of fixed size; large
149 requests receive one or more whole blocks, and small requests
150 receive a fragment of a block. Fragment sizes are powers of two,
151 and all fragments of a block are the same size. When all the
152 fragments in a block have been freed, the block itself is freed. */
153#define INT_BIT (CHAR_BIT * sizeof(int))
154#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
155#define BLOCKSIZE (1 << BLOCKLOG)
156#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
157
158/* Determine the amount of memory spanned by the initial heap table
159 (not an absolute limit). */
160#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
161
162/* Number of contiguous free blocks allowed to build up at the end of
163 memory before they will be returned to the system. */
164#define FINAL_FREE_BLOCKS 8
165
166/* Data structure giving per-block information. */
167typedef union
168 {
169 /* Heap information for a busy block. */
170 struct
171 {
172 /* Zero for a large (multiblock) object, or positive giving the
173 logarithm to the base two of the fragment size. */
174 int type;
175 union
176 {
177 struct
178 {
179 __malloc_size_t nfree; /* Free frags in a fragmented block. */
180 __malloc_size_t first; /* First free fragment of the block. */
181 } frag;
182 /* For a large object, in its first block, this has the number
183 of blocks in the object. In the other blocks, this has a
184 negative number which says how far back the first block is. */
185 __malloc_ptrdiff_t size;
186 } info;
187 } busy;
188 /* Heap information for a free block
189 (that may be the first of a free cluster). */
190 struct
191 {
192 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
193 __malloc_size_t next; /* Index of next free cluster. */
194 __malloc_size_t prev; /* Index of previous free cluster. */
195 } free;
196 } malloc_info;
197
198/* Pointer to first block of the heap. */
199extern char *_heapbase;
200
201/* Table indexed by block number giving per-block information. */
202extern malloc_info *_heapinfo;
203
204/* Address to block number and vice versa. */
205#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
206#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
207
208/* Current search index for the heap table. */
209extern __malloc_size_t _heapindex;
210
211/* Limit of valid info table indices. */
212extern __malloc_size_t _heaplimit;
213
214/* Doubly linked lists of free fragments. */
215struct list
216 {
217 struct list *next;
218 struct list *prev;
219 };
220
221/* Free list headers for each fragment size. */
222extern struct list _fraghead[];
223
224/* List of blocks allocated with `memalign' (or `valloc'). */
225struct alignlist
226 {
227 struct alignlist *next;
228 __ptr_t aligned; /* The address that memaligned returned. */
229 __ptr_t exact; /* The address that malloc returned. */
230 };
231extern struct alignlist *_aligned_blocks;
232
233/* Instrumentation. */
234extern __malloc_size_t _chunks_used;
235extern __malloc_size_t _bytes_used;
236extern __malloc_size_t _chunks_free;
237extern __malloc_size_t _bytes_free;
238
239/* Internal versions of `malloc', `realloc', and `free'
240 used when these functions need to call each other.
241 They are the same but don't call the hooks. */
0a27e8ed
RS
242extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
243extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
244extern void _free_internal PP ((__ptr_t __ptr));
8d0d84d2
YM
245extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
246extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
247extern void _free_internal_nolock PP ((__ptr_t __ptr));
74ad5c7f 248
2f213514 249#ifdef USE_PTHREAD
8d0d84d2 250extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
3ceeb306
YM
251extern int _malloc_thread_enabled_p;
252#define LOCK() \
253 do { \
254 if (_malloc_thread_enabled_p) \
255 pthread_mutex_lock (&_malloc_mutex); \
256 } while (0)
257#define UNLOCK() \
258 do { \
259 if (_malloc_thread_enabled_p) \
260 pthread_mutex_unlock (&_malloc_mutex); \
261 } while (0)
262#define LOCK_ALIGNED_BLOCKS() \
263 do { \
264 if (_malloc_thread_enabled_p) \
265 pthread_mutex_lock (&_aligned_blocks_mutex); \
266 } while (0)
267#define UNLOCK_ALIGNED_BLOCKS() \
268 do { \
269 if (_malloc_thread_enabled_p) \
270 pthread_mutex_unlock (&_aligned_blocks_mutex); \
271 } while (0)
2f213514
YM
272#else
273#define LOCK()
274#define UNLOCK()
8d0d84d2
YM
275#define LOCK_ALIGNED_BLOCKS()
276#define UNLOCK_ALIGNED_BLOCKS()
2f213514
YM
277#endif
278
74ad5c7f
KH
279#endif /* _MALLOC_INTERNAL. */
280
281/* Given an address in the middle of a malloc'd object,
282 return the address of the beginning of the object. */
0a27e8ed 283extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
74ad5c7f
KH
284
285/* Underlying allocation function; successive calls should
286 return contiguous pieces of memory. */
0a27e8ed 287extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
288
289/* Default value of `__morecore'. */
0a27e8ed 290extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
291
292/* If not NULL, this function is called after each time
293 `__morecore' is called to increase the data size. */
0a27e8ed 294extern void (*__after_morecore_hook) PP ((void));
74ad5c7f
KH
295
296/* Number of extra blocks to get each time we ask for more core.
297 This reduces the frequency of calling `(*__morecore)'. */
298extern __malloc_size_t __malloc_extra_blocks;
299
300/* Nonzero if `malloc' has been called and done its initialization. */
301extern int __malloc_initialized;
302/* Function called to initialize malloc data structures. */
0a27e8ed 303extern int __malloc_initialize PP ((void));
74ad5c7f
KH
304
305/* Hooks for debugging versions. */
0a27e8ed
RS
306extern void (*__malloc_initialize_hook) PP ((void));
307extern void (*__free_hook) PP ((__ptr_t __ptr));
308extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
309extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
310extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
311 __malloc_size_t __alignment));
74ad5c7f
KH
312
313/* Return values for `mprobe': these are the kinds of inconsistencies that
314 `mcheck' enables detection of. */
315enum mcheck_status
316 {
317 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
318 MCHECK_OK, /* Block is fine. */
319 MCHECK_FREE, /* Block freed twice. */
320 MCHECK_HEAD, /* Memory before the block was clobbered. */
321 MCHECK_TAIL /* Memory after the block was clobbered. */
322 };
323
324/* Activate a standard collection of debugging hooks. This must be called
325 before `malloc' is ever called. ABORTFUNC is called with an error code
326 (see enum above) when an inconsistency is detected. If ABORTFUNC is
327 null, the standard function prints on stderr and then calls `abort'. */
0a27e8ed 328extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
74ad5c7f
KH
329
330/* Check for aberrations in a particular malloc'd block. You must have
331 called `mcheck' already. These are the same checks that `mcheck' does
332 when you free or reallocate a block. */
0a27e8ed 333extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
74ad5c7f
KH
334
335/* Activate a standard collection of tracing hooks. */
0a27e8ed
RS
336extern void mtrace PP ((void));
337extern void muntrace PP ((void));
74ad5c7f
KH
338
339/* Statistics available to the user. */
340struct mstats
341 {
342 __malloc_size_t bytes_total; /* Total size of the heap. */
343 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
344 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
345 __malloc_size_t chunks_free; /* Chunks in the free list. */
346 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
347 };
348
349/* Pick up the current statistics. */
0a27e8ed 350extern struct mstats mstats PP ((void));
74ad5c7f
KH
351
352/* Call WARNFUN with a warning message when memory usage is high. */
0a27e8ed
RS
353extern void memory_warnings PP ((__ptr_t __start,
354 void (*__warnfun) PP ((const char *))));
74ad5c7f
KH
355
356
357/* Relocating allocator. */
358
359/* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
0a27e8ed 360extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
361
362/* Free the storage allocated in HANDLEPTR. */
0a27e8ed 363extern void r_alloc_free PP ((__ptr_t *__handleptr));
74ad5c7f
KH
364
365/* Adjust the block at HANDLEPTR to be SIZE bytes long. */
0a27e8ed 366extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
367
368
369#ifdef __cplusplus
370}
371#endif
372
373#endif /* malloc.h */
374/* Memory allocator `malloc'.
375 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
376 Written May 1989 by Mike Haertel.
377
378This library is free software; you can redistribute it and/or
423a1f3c 379modify it under the terms of the GNU General Public License as
74ad5c7f
KH
380published by the Free Software Foundation; either version 2 of the
381License, or (at your option) any later version.
382
383This library is distributed in the hope that it will be useful,
384but WITHOUT ANY WARRANTY; without even the implied warranty of
385MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 386General Public License for more details.
74ad5c7f 387
423a1f3c
JB
388You should have received a copy of the GNU General Public
389License along with this library; see the file COPYING. If
3ef97fb6
LK
390not, write to the Free Software Foundation, Inc., 51 Franklin Street,
391Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
392
393 The author may be reached (Email) at the address mike@ai.mit.edu,
394 or (US mail) as Mike Haertel c/o Free Software Foundation. */
395
396#ifndef _MALLOC_INTERNAL
397#define _MALLOC_INTERNAL
398#include <malloc.h>
399#endif
400#include <errno.h>
401
402/* How to really get more memory. */
ef6d1039
SM
403#if defined(CYGWIN)
404extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
405extern int bss_sbrk_did_unexec;
406#endif
3cacba85 407__ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
74ad5c7f
KH
408
409/* Debugging hook for `malloc'. */
0a27e8ed 410__ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
74ad5c7f
KH
411
412/* Pointer to the base of the first block. */
413char *_heapbase;
414
415/* Block information table. Allocated with align/__free (not malloc/free). */
416malloc_info *_heapinfo;
417
418/* Number of info entries. */
419static __malloc_size_t heapsize;
420
421/* Search index in the info table. */
422__malloc_size_t _heapindex;
423
424/* Limit of valid info table indices. */
425__malloc_size_t _heaplimit;
426
427/* Free lists for each fragment size. */
428struct list _fraghead[BLOCKLOG];
429
430/* Instrumentation. */
431__malloc_size_t _chunks_used;
432__malloc_size_t _bytes_used;
433__malloc_size_t _chunks_free;
434__malloc_size_t _bytes_free;
435
436/* Are you experienced? */
437int __malloc_initialized;
438
439__malloc_size_t __malloc_extra_blocks;
440
0a27e8ed
RS
441void (*__malloc_initialize_hook) PP ((void));
442void (*__after_morecore_hook) PP ((void));
74ad5c7f 443
5dcab13e
GM
444#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
445
446/* Some code for hunting a bug writing into _heapinfo.
447
448 Call this macro with argument PROT non-zero to protect internal
449 malloc state against writing to it, call it with a zero argument to
450 make it readable and writable.
451
452 Note that this only works if BLOCKSIZE == page size, which is
453 the case on the i386. */
454
455#include <sys/types.h>
456#include <sys/mman.h>
457
458static int state_protected_p;
459static __malloc_size_t last_state_size;
460static malloc_info *last_heapinfo;
461
462void
463protect_malloc_state (protect_p)
464 int protect_p;
465{
466 /* If _heapinfo has been relocated, make sure its old location
467 isn't left read-only; it will be reused by malloc. */
468 if (_heapinfo != last_heapinfo
469 && last_heapinfo
470 && state_protected_p)
471 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
472
473 last_state_size = _heaplimit * sizeof *_heapinfo;
474 last_heapinfo = _heapinfo;
177c0ea7 475
5dcab13e
GM
476 if (protect_p != state_protected_p)
477 {
478 state_protected_p = protect_p;
479 if (mprotect (_heapinfo, last_state_size,
480 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
481 abort ();
482 }
483}
484
485#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
486
487#else
488#define PROTECT_MALLOC_STATE(PROT) /* empty */
489#endif
490
74ad5c7f
KH
491
492/* Aligned allocation. */
0a27e8ed 493static __ptr_t align PP ((__malloc_size_t));
74ad5c7f
KH
494static __ptr_t
495align (size)
496 __malloc_size_t size;
497{
498 __ptr_t result;
499 unsigned long int adj;
500
ceeb3d7d
EZ
501 /* align accepts an unsigned argument, but __morecore accepts a
502 signed one. This could lead to trouble if SIZE overflows a
503 signed int type accepted by __morecore. We just punt in that
504 case, since they are requesting a ludicrous amount anyway. */
505 if ((__malloc_ptrdiff_t)size < 0)
506 result = 0;
507 else
508 result = (*__morecore) (size);
74ad5c7f
KH
509 adj = (unsigned long int) ((unsigned long int) ((char *) result -
510 (char *) NULL)) % BLOCKSIZE;
511 if (adj != 0)
512 {
513 __ptr_t new;
514 adj = BLOCKSIZE - adj;
515 new = (*__morecore) (adj);
516 result = (char *) result + adj;
517 }
518
519 if (__after_morecore_hook)
520 (*__after_morecore_hook) ();
521
522 return result;
523}
524
525/* Get SIZE bytes, if we can get them starting at END.
526 Return the address of the space we got.
527 If we cannot get space at END, fail and return 0. */
0a27e8ed 528static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
74ad5c7f
KH
529static __ptr_t
530get_contiguous_space (size, position)
531 __malloc_ptrdiff_t size;
532 __ptr_t position;
533{
534 __ptr_t before;
535 __ptr_t after;
536
537 before = (*__morecore) (0);
538 /* If we can tell in advance that the break is at the wrong place,
539 fail now. */
540 if (before != position)
541 return 0;
542
543 /* Allocate SIZE bytes and get the address of them. */
544 after = (*__morecore) (size);
545 if (!after)
546 return 0;
547
548 /* It was not contiguous--reject it. */
549 if (after != position)
550 {
551 (*__morecore) (- size);
552 return 0;
553 }
554
555 return after;
556}
557
558
559/* This is called when `_heapinfo' and `heapsize' have just
560 been set to describe a new info table. Set up the table
561 to describe itself and account for it in the statistics. */
0a27e8ed 562static void register_heapinfo PP ((void));
74ad5c7f
KH
563#ifdef __GNUC__
564__inline__
565#endif
566static void
567register_heapinfo ()
568{
569 __malloc_size_t block, blocks;
570
571 block = BLOCK (_heapinfo);
572 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
573
574 /* Account for the _heapinfo block itself in the statistics. */
575 _bytes_used += blocks * BLOCKSIZE;
576 ++_chunks_used;
577
578 /* Describe the heapinfo block itself in the heapinfo. */
579 _heapinfo[block].busy.type = 0;
580 _heapinfo[block].busy.info.size = blocks;
581 /* Leave back-pointers for malloc_find_address. */
582 while (--blocks > 0)
583 _heapinfo[block + blocks].busy.info.size = -blocks;
584}
585
2f213514 586#ifdef USE_PTHREAD
8d0d84d2
YM
587pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
588pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
3ceeb306
YM
589int _malloc_thread_enabled_p;
590
591static void
592malloc_atfork_handler_prepare ()
593{
594 LOCK ();
595 LOCK_ALIGNED_BLOCKS ();
596}
597
598static void
599malloc_atfork_handler_parent ()
600{
601 UNLOCK_ALIGNED_BLOCKS ();
602 UNLOCK ();
603}
604
605static void
606malloc_atfork_handler_child ()
607{
608 UNLOCK_ALIGNED_BLOCKS ();
609 UNLOCK ();
610}
611
612/* Set up mutexes and make malloc etc. thread-safe. */
613void
614malloc_enable_thread ()
615{
616 if (_malloc_thread_enabled_p)
617 return;
618
619 /* Some pthread implementations call malloc for statically
620 initialized mutexes when they are used first. To avoid such a
621 situation, we initialize mutexes here while their use is
622 disabled in malloc etc. */
623 pthread_mutex_init (&_malloc_mutex, NULL);
624 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
625 pthread_atfork (malloc_atfork_handler_prepare,
626 malloc_atfork_handler_parent,
627 malloc_atfork_handler_child);
628 _malloc_thread_enabled_p = 1;
629}
2f213514 630#endif
74ad5c7f 631
2f213514
YM
632static void
633malloc_initialize_1 ()
634{
a3ba27da
GM
635#ifdef GC_MCHECK
636 mcheck (NULL);
637#endif
638
74ad5c7f
KH
639 if (__malloc_initialize_hook)
640 (*__malloc_initialize_hook) ();
641
642 heapsize = HEAP / BLOCKSIZE;
643 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
644 if (_heapinfo == NULL)
2f213514 645 return;
74ad5c7f
KH
646 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
647 _heapinfo[0].free.size = 0;
648 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
649 _heapindex = 0;
650 _heapbase = (char *) _heapinfo;
651 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
652
653 register_heapinfo ();
654
655 __malloc_initialized = 1;
5dcab13e 656 PROTECT_MALLOC_STATE (1);
2f213514
YM
657 return;
658}
659
784c1472
JD
660/* Set everything up and remember that we have.
661 main will call malloc which calls this function. That is before any threads
662 or signal handlers has been set up, so we don't need thread protection. */
2f213514
YM
663int
664__malloc_initialize ()
665{
2f213514
YM
666 if (__malloc_initialized)
667 return 0;
668
669 malloc_initialize_1 ();
2f213514
YM
670
671 return __malloc_initialized;
74ad5c7f
KH
672}
673
674static int morecore_recursing;
675
676/* Get neatly aligned memory, initializing or
677 growing the heap info table as necessary. */
8d0d84d2 678static __ptr_t morecore_nolock PP ((__malloc_size_t));
74ad5c7f 679static __ptr_t
8d0d84d2 680morecore_nolock (size)
74ad5c7f
KH
681 __malloc_size_t size;
682{
683 __ptr_t result;
684 malloc_info *newinfo, *oldinfo;
685 __malloc_size_t newsize;
686
687 if (morecore_recursing)
688 /* Avoid recursion. The caller will know how to handle a null return. */
689 return NULL;
690
691 result = align (size);
692 if (result == NULL)
693 return NULL;
694
5dcab13e
GM
695 PROTECT_MALLOC_STATE (0);
696
74ad5c7f
KH
697 /* Check if we need to grow the info table. */
698 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
699 {
700 /* Calculate the new _heapinfo table size. We do not account for the
701 added blocks in the table itself, as we hope to place them in
702 existing free space, which is already covered by part of the
703 existing table. */
704 newsize = heapsize;
705 do
706 newsize *= 2;
707 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
708
709 /* We must not reuse existing core for the new info table when called
710 from realloc in the case of growing a large block, because the
711 block being grown is momentarily marked as free. In this case
712 _heaplimit is zero so we know not to reuse space for internal
713 allocation. */
714 if (_heaplimit != 0)
715 {
716 /* First try to allocate the new info table in core we already
717 have, in the usual way using realloc. If realloc cannot
718 extend it in place or relocate it to existing sufficient core,
719 we will get called again, and the code above will notice the
720 `morecore_recursing' flag and return null. */
721 int save = errno; /* Don't want to clobber errno with ENOMEM. */
722 morecore_recursing = 1;
8d0d84d2 723 newinfo = (malloc_info *) _realloc_internal_nolock
74ad5c7f
KH
724 (_heapinfo, newsize * sizeof (malloc_info));
725 morecore_recursing = 0;
726 if (newinfo == NULL)
727 errno = save;
728 else
729 {
730 /* We found some space in core, and realloc has put the old
731 table's blocks on the free list. Now zero the new part
732 of the table and install the new table location. */
733 memset (&newinfo[heapsize], 0,
734 (newsize - heapsize) * sizeof (malloc_info));
735 _heapinfo = newinfo;
736 heapsize = newsize;
737 goto got_heap;
738 }
739 }
740
741 /* Allocate new space for the malloc info table. */
742 while (1)
743 {
744 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
745
746 /* Did it fail? */
747 if (newinfo == NULL)
748 {
749 (*__morecore) (-size);
750 return NULL;
751 }
752
753 /* Is it big enough to record status for its own space?
754 If so, we win. */
755 if ((__malloc_size_t) BLOCK ((char *) newinfo
756 + newsize * sizeof (malloc_info))
757 < newsize)
758 break;
759
760 /* Must try again. First give back most of what we just got. */
761 (*__morecore) (- newsize * sizeof (malloc_info));
762 newsize *= 2;
763 }
764
765 /* Copy the old table to the beginning of the new,
766 and zero the rest of the new table. */
767 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
768 memset (&newinfo[heapsize], 0,
769 (newsize - heapsize) * sizeof (malloc_info));
770 oldinfo = _heapinfo;
771 _heapinfo = newinfo;
772 heapsize = newsize;
773
774 register_heapinfo ();
775
776 /* Reset _heaplimit so _free_internal never decides
777 it can relocate or resize the info table. */
778 _heaplimit = 0;
8d0d84d2 779 _free_internal_nolock (oldinfo);
5dcab13e 780 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
781
782 /* The new heap limit includes the new table just allocated. */
783 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
784 return result;
785 }
786
787 got_heap:
788 _heaplimit = BLOCK ((char *) result + size);
789 return result;
790}
791
792/* Allocate memory from the heap. */
793__ptr_t
8d0d84d2 794_malloc_internal_nolock (size)
74ad5c7f
KH
795 __malloc_size_t size;
796{
797 __ptr_t result;
798 __malloc_size_t block, blocks, lastblocks, start;
799 register __malloc_size_t i;
800 struct list *next;
801
802 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
803 valid address you can realloc and free (though not dereference).
804
805 It turns out that some extant code (sunrpc, at least Ultrix's version)
806 expects `malloc (0)' to return non-NULL and breaks otherwise.
807 Be compatible. */
808
809#if 0
810 if (size == 0)
811 return NULL;
812#endif
813
5dcab13e
GM
814 PROTECT_MALLOC_STATE (0);
815
74ad5c7f
KH
816 if (size < sizeof (struct list))
817 size = sizeof (struct list);
818
819#ifdef SUNOS_LOCALTIME_BUG
820 if (size < 16)
821 size = 16;
822#endif
823
824 /* Determine the allocation policy based on the request size. */
825 if (size <= BLOCKSIZE / 2)
826 {
827 /* Small allocation to receive a fragment of a block.
828 Determine the logarithm to base two of the fragment size. */
829 register __malloc_size_t log = 1;
830 --size;
831 while ((size /= 2) != 0)
832 ++log;
833
834 /* Look in the fragment lists for a
835 free fragment of the desired size. */
836 next = _fraghead[log].next;
837 if (next != NULL)
838 {
839 /* There are free fragments of this size.
840 Pop a fragment out of the fragment list and return it.
841 Update the block's nfree and first counters. */
842 result = (__ptr_t) next;
843 next->prev->next = next->next;
844 if (next->next != NULL)
845 next->next->prev = next->prev;
846 block = BLOCK (result);
847 if (--_heapinfo[block].busy.info.frag.nfree != 0)
848 _heapinfo[block].busy.info.frag.first = (unsigned long int)
849 ((unsigned long int) ((char *) next->next - (char *) NULL)
850 % BLOCKSIZE) >> log;
851
852 /* Update the statistics. */
853 ++_chunks_used;
854 _bytes_used += 1 << log;
855 --_chunks_free;
856 _bytes_free -= 1 << log;
857 }
858 else
859 {
860 /* No free fragments of the desired size, so get a new block
861 and break it into fragments, returning the first. */
8094989b 862#ifdef GC_MALLOC_CHECK
8d0d84d2 863 result = _malloc_internal_nolock (BLOCKSIZE);
5dcab13e 864 PROTECT_MALLOC_STATE (0);
8d0d84d2
YM
865#elif defined (USE_PTHREAD)
866 result = _malloc_internal_nolock (BLOCKSIZE);
8094989b 867#else
74ad5c7f 868 result = malloc (BLOCKSIZE);
8094989b 869#endif
74ad5c7f 870 if (result == NULL)
5dcab13e
GM
871 {
872 PROTECT_MALLOC_STATE (1);
2f213514 873 goto out;
5dcab13e 874 }
74ad5c7f
KH
875
876 /* Link all fragments but the first into the free list. */
877 next = (struct list *) ((char *) result + (1 << log));
878 next->next = NULL;
879 next->prev = &_fraghead[log];
880 _fraghead[log].next = next;
881
882 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
883 {
884 next = (struct list *) ((char *) result + (i << log));
885 next->next = _fraghead[log].next;
886 next->prev = &_fraghead[log];
887 next->prev->next = next;
888 next->next->prev = next;
889 }
890
891 /* Initialize the nfree and first counters for this block. */
892 block = BLOCK (result);
893 _heapinfo[block].busy.type = log;
894 _heapinfo[block].busy.info.frag.nfree = i - 1;
895 _heapinfo[block].busy.info.frag.first = i - 1;
896
897 _chunks_free += (BLOCKSIZE >> log) - 1;
898 _bytes_free += BLOCKSIZE - (1 << log);
899 _bytes_used -= BLOCKSIZE - (1 << log);
900 }
901 }
902 else
903 {
904 /* Large allocation to receive one or more blocks.
905 Search the free list in a circle starting at the last place visited.
906 If we loop completely around without finding a large enough
907 space we will have to get more memory from the system. */
908 blocks = BLOCKIFY (size);
909 start = block = _heapindex;
910 while (_heapinfo[block].free.size < blocks)
911 {
912 block = _heapinfo[block].free.next;
913 if (block == start)
914 {
915 /* Need to get more from the system. Get a little extra. */
916 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
917 block = _heapinfo[0].free.prev;
918 lastblocks = _heapinfo[block].free.size;
919 /* Check to see if the new core will be contiguous with the
920 final free block; if so we don't need to get as much. */
921 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
922 /* We can't do this if we will have to make the heap info
923 table bigger to accomodate the new space. */
924 block + wantblocks <= heapsize &&
925 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
926 ADDRESS (block + lastblocks)))
927 {
928 /* We got it contiguously. Which block we are extending
929 (the `final free block' referred to above) might have
930 changed, if it got combined with a freed info table. */
931 block = _heapinfo[0].free.prev;
932 _heapinfo[block].free.size += (wantblocks - lastblocks);
933 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
934 _heaplimit += wantblocks - lastblocks;
935 continue;
936 }
8d0d84d2 937 result = morecore_nolock (wantblocks * BLOCKSIZE);
74ad5c7f 938 if (result == NULL)
2f213514 939 goto out;
74ad5c7f
KH
940 block = BLOCK (result);
941 /* Put the new block at the end of the free list. */
942 _heapinfo[block].free.size = wantblocks;
943 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
944 _heapinfo[block].free.next = 0;
945 _heapinfo[0].free.prev = block;
946 _heapinfo[_heapinfo[block].free.prev].free.next = block;
947 ++_chunks_free;
948 /* Now loop to use some of that block for this allocation. */
949 }
950 }
951
952 /* At this point we have found a suitable free list entry.
953 Figure out how to remove what we need from the list. */
954 result = ADDRESS (block);
955 if (_heapinfo[block].free.size > blocks)
956 {
957 /* The block we found has a bit left over,
958 so relink the tail end back into the free list. */
959 _heapinfo[block + blocks].free.size
960 = _heapinfo[block].free.size - blocks;
961 _heapinfo[block + blocks].free.next
962 = _heapinfo[block].free.next;
963 _heapinfo[block + blocks].free.prev
964 = _heapinfo[block].free.prev;
965 _heapinfo[_heapinfo[block].free.prev].free.next
966 = _heapinfo[_heapinfo[block].free.next].free.prev
967 = _heapindex = block + blocks;
968 }
969 else
970 {
971 /* The block exactly matches our requirements,
972 so just remove it from the list. */
973 _heapinfo[_heapinfo[block].free.next].free.prev
974 = _heapinfo[block].free.prev;
975 _heapinfo[_heapinfo[block].free.prev].free.next
976 = _heapindex = _heapinfo[block].free.next;
977 --_chunks_free;
978 }
979
980 _heapinfo[block].busy.type = 0;
981 _heapinfo[block].busy.info.size = blocks;
982 ++_chunks_used;
983 _bytes_used += blocks * BLOCKSIZE;
984 _bytes_free -= blocks * BLOCKSIZE;
985
986 /* Mark all the blocks of the object just allocated except for the
987 first with a negative number so you can find the first block by
988 adding that adjustment. */
989 while (--blocks > 0)
990 _heapinfo[block + blocks].busy.info.size = -blocks;
991 }
992
5dcab13e 993 PROTECT_MALLOC_STATE (1);
2f213514 994 out:
8d0d84d2
YM
995 return result;
996}
997
998__ptr_t
999_malloc_internal (size)
1000 __malloc_size_t size;
1001{
1002 __ptr_t result;
1003
1004 LOCK ();
1005 result = _malloc_internal_nolock (size);
2f213514 1006 UNLOCK ();
8d0d84d2 1007
74ad5c7f
KH
1008 return result;
1009}
1010
1011__ptr_t
1012malloc (size)
1013 __malloc_size_t size;
1014{
8d0d84d2
YM
1015 __ptr_t (*hook) (__malloc_size_t);
1016
74ad5c7f
KH
1017 if (!__malloc_initialized && !__malloc_initialize ())
1018 return NULL;
1019
8d0d84d2
YM
1020 /* Copy the value of __malloc_hook to an automatic variable in case
1021 __malloc_hook is modified in another thread between its
1022 NULL-check and the use.
1023
1024 Note: Strictly speaking, this is not a right solution. We should
1025 use mutexes to access non-read-only variables that are shared
1026 among multiple threads. We just leave it for compatibility with
1027 glibc malloc (i.e., assignments to __malloc_hook) for now. */
1028 hook = __malloc_hook;
1029 return (hook != NULL ? *hook : _malloc_internal) (size);
74ad5c7f
KH
1030}
1031\f
1032#ifndef _LIBC
1033
1034/* On some ANSI C systems, some libc functions call _malloc, _free
1035 and _realloc. Make them use the GNU functions. */
1036
1037__ptr_t
1038_malloc (size)
1039 __malloc_size_t size;
1040{
1041 return malloc (size);
1042}
1043
1044void
1045_free (ptr)
1046 __ptr_t ptr;
1047{
1048 free (ptr);
1049}
1050
1051__ptr_t
1052_realloc (ptr, size)
1053 __ptr_t ptr;
1054 __malloc_size_t size;
1055{
1056 return realloc (ptr, size);
1057}
1058
1059#endif
1060/* Free a block of memory allocated by `malloc'.
1061 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1062 Written May 1989 by Mike Haertel.
1063
1064This library is free software; you can redistribute it and/or
423a1f3c 1065modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1066published by the Free Software Foundation; either version 2 of the
1067License, or (at your option) any later version.
1068
1069This library is distributed in the hope that it will be useful,
1070but WITHOUT ANY WARRANTY; without even the implied warranty of
1071MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1072General Public License for more details.
74ad5c7f 1073
423a1f3c
JB
1074You should have received a copy of the GNU General Public
1075License along with this library; see the file COPYING. If
3ef97fb6
LK
1076not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1077Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1078
1079 The author may be reached (Email) at the address mike@ai.mit.edu,
1080 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1081
1082#ifndef _MALLOC_INTERNAL
1083#define _MALLOC_INTERNAL
1084#include <malloc.h>
1085#endif
1086
1087
1088/* Cope with systems lacking `memmove'. */
1089#ifndef memmove
1090#if (defined (MEMMOVE_MISSING) || \
1091 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1092#ifdef emacs
1093#undef __malloc_safe_bcopy
1094#define __malloc_safe_bcopy safe_bcopy
1095#endif
1096/* This function is defined in realloc.c. */
0a27e8ed 1097extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
1098#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1099#endif
1100#endif
1101
1102
1103/* Debugging hook for free. */
0a27e8ed 1104void (*__free_hook) PP ((__ptr_t __ptr));
74ad5c7f
KH
1105
1106/* List of blocks allocated by memalign. */
1107struct alignlist *_aligned_blocks = NULL;
1108
1109/* Return memory to the heap.
8d0d84d2 1110 Like `_free_internal' but don't lock mutex. */
74ad5c7f 1111void
8d0d84d2 1112_free_internal_nolock (ptr)
74ad5c7f
KH
1113 __ptr_t ptr;
1114{
1115 int type;
1116 __malloc_size_t block, blocks;
1117 register __malloc_size_t i;
1118 struct list *prev, *next;
1119 __ptr_t curbrk;
1120 const __malloc_size_t lesscore_threshold
1121 /* Threshold of free space at which we will return some to the system. */
1122 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1123
1124 register struct alignlist *l;
1125
1126 if (ptr == NULL)
1127 return;
1128
5dcab13e 1129 PROTECT_MALLOC_STATE (0);
177c0ea7 1130
8d0d84d2 1131 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1132 for (l = _aligned_blocks; l != NULL; l = l->next)
1133 if (l->aligned == ptr)
1134 {
1135 l->aligned = NULL; /* Mark the slot in the list as free. */
1136 ptr = l->exact;
1137 break;
1138 }
8d0d84d2 1139 UNLOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1140
1141 block = BLOCK (ptr);
1142
1143 type = _heapinfo[block].busy.type;
1144 switch (type)
1145 {
1146 case 0:
1147 /* Get as many statistics as early as we can. */
1148 --_chunks_used;
1149 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1150 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1151
1152 /* Find the free cluster previous to this one in the free list.
1153 Start searching at the last block referenced; this may benefit
1154 programs with locality of allocation. */
1155 i = _heapindex;
1156 if (i > block)
1157 while (i > block)
1158 i = _heapinfo[i].free.prev;
1159 else
1160 {
1161 do
1162 i = _heapinfo[i].free.next;
1163 while (i > 0 && i < block);
1164 i = _heapinfo[i].free.prev;
1165 }
1166
1167 /* Determine how to link this block into the free list. */
1168 if (block == i + _heapinfo[i].free.size)
1169 {
1170 /* Coalesce this block with its predecessor. */
1171 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1172 block = i;
1173 }
1174 else
1175 {
1176 /* Really link this block back into the free list. */
1177 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1178 _heapinfo[block].free.next = _heapinfo[i].free.next;
1179 _heapinfo[block].free.prev = i;
1180 _heapinfo[i].free.next = block;
1181 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1182 ++_chunks_free;
1183 }
1184
1185 /* Now that the block is linked in, see if we can coalesce it
1186 with its successor (by deleting its successor from the list
1187 and adding in its size). */
1188 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1189 {
1190 _heapinfo[block].free.size
1191 += _heapinfo[_heapinfo[block].free.next].free.size;
1192 _heapinfo[block].free.next
1193 = _heapinfo[_heapinfo[block].free.next].free.next;
1194 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1195 --_chunks_free;
1196 }
1197
1198 /* How many trailing free blocks are there now? */
1199 blocks = _heapinfo[block].free.size;
1200
1201 /* Where is the current end of accessible core? */
1202 curbrk = (*__morecore) (0);
1203
1204 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1205 {
1206 /* The end of the malloc heap is at the end of accessible core.
1207 It's possible that moving _heapinfo will allow us to
1208 return some space to the system. */
1209
1210 __malloc_size_t info_block = BLOCK (_heapinfo);
1211 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1212 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1213 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1214 __malloc_size_t next_block = _heapinfo[block].free.next;
1215 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1216
1217 if (/* Win if this block being freed is last in core, the info table
1218 is just before it, the previous free block is just before the
1219 info table, and the two free blocks together form a useful
1220 amount to return to the system. */
1221 (block + blocks == _heaplimit &&
1222 info_block + info_blocks == block &&
1223 prev_block != 0 && prev_block + prev_blocks == info_block &&
1224 blocks + prev_blocks >= lesscore_threshold) ||
1225 /* Nope, not the case. We can also win if this block being
1226 freed is just before the info table, and the table extends
1227 to the end of core or is followed only by a free block,
1228 and the total free space is worth returning to the system. */
1229 (block + blocks == info_block &&
1230 ((info_block + info_blocks == _heaplimit &&
1231 blocks >= lesscore_threshold) ||
1232 (info_block + info_blocks == next_block &&
1233 next_block + next_blocks == _heaplimit &&
1234 blocks + next_blocks >= lesscore_threshold)))
1235 )
1236 {
1237 malloc_info *newinfo;
1238 __malloc_size_t oldlimit = _heaplimit;
1239
1240 /* Free the old info table, clearing _heaplimit to avoid
1241 recursion into this code. We don't want to return the
1242 table's blocks to the system before we have copied them to
1243 the new location. */
1244 _heaplimit = 0;
8d0d84d2 1245 _free_internal_nolock (_heapinfo);
74ad5c7f
KH
1246 _heaplimit = oldlimit;
1247
1248 /* Tell malloc to search from the beginning of the heap for
1249 free blocks, so it doesn't reuse the ones just freed. */
1250 _heapindex = 0;
1251
1252 /* Allocate new space for the info table and move its data. */
8d0d84d2
YM
1253 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1254 * BLOCKSIZE);
5dcab13e 1255 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1256 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1257 _heapinfo = newinfo;
1258
1259 /* We should now have coalesced the free block with the
1260 blocks freed from the old info table. Examine the entire
1261 trailing free block to decide below whether to return some
1262 to the system. */
1263 block = _heapinfo[0].free.prev;
1264 blocks = _heapinfo[block].free.size;
1265 }
1266
1267 /* Now see if we can return stuff to the system. */
1268 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1269 {
1270 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1271 _heaplimit -= blocks;
1272 (*__morecore) (-bytes);
1273 _heapinfo[_heapinfo[block].free.prev].free.next
1274 = _heapinfo[block].free.next;
1275 _heapinfo[_heapinfo[block].free.next].free.prev
1276 = _heapinfo[block].free.prev;
1277 block = _heapinfo[block].free.prev;
1278 --_chunks_free;
1279 _bytes_free -= bytes;
1280 }
1281 }
1282
1283 /* Set the next search to begin at this block. */
1284 _heapindex = block;
1285 break;
1286
1287 default:
1288 /* Do some of the statistics. */
1289 --_chunks_used;
1290 _bytes_used -= 1 << type;
1291 ++_chunks_free;
1292 _bytes_free += 1 << type;
1293
1294 /* Get the address of the first free fragment in this block. */
1295 prev = (struct list *) ((char *) ADDRESS (block) +
1296 (_heapinfo[block].busy.info.frag.first << type));
1297
1298 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1299 {
1300 /* If all fragments of this block are free, remove them
1301 from the fragment list and free the whole block. */
1302 next = prev;
1303 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1304 next = next->next;
1305 prev->prev->next = next;
1306 if (next != NULL)
1307 next->prev = prev->prev;
1308 _heapinfo[block].busy.type = 0;
1309 _heapinfo[block].busy.info.size = 1;
1310
1311 /* Keep the statistics accurate. */
1312 ++_chunks_used;
1313 _bytes_used += BLOCKSIZE;
1314 _chunks_free -= BLOCKSIZE >> type;
1315 _bytes_free -= BLOCKSIZE;
1316
8d0d84d2
YM
1317#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1318 _free_internal_nolock (ADDRESS (block));
8094989b 1319#else
74ad5c7f 1320 free (ADDRESS (block));
8094989b 1321#endif
74ad5c7f
KH
1322 }
1323 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1324 {
1325 /* If some fragments of this block are free, link this
1326 fragment into the fragment list after the first free
1327 fragment of this block. */
1328 next = (struct list *) ptr;
1329 next->next = prev->next;
1330 next->prev = prev;
1331 prev->next = next;
1332 if (next->next != NULL)
1333 next->next->prev = next;
1334 ++_heapinfo[block].busy.info.frag.nfree;
1335 }
1336 else
1337 {
1338 /* No fragments of this block are free, so link this
1339 fragment into the fragment list and announce that
1340 it is the first free fragment of this block. */
1341 prev = (struct list *) ptr;
1342 _heapinfo[block].busy.info.frag.nfree = 1;
1343 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1344 ((unsigned long int) ((char *) ptr - (char *) NULL)
1345 % BLOCKSIZE >> type);
1346 prev->next = _fraghead[type].next;
1347 prev->prev = &_fraghead[type];
1348 prev->prev->next = prev;
1349 if (prev->next != NULL)
1350 prev->next->prev = prev;
1351 }
1352 break;
1353 }
177c0ea7 1354
5dcab13e 1355 PROTECT_MALLOC_STATE (1);
8d0d84d2
YM
1356}
1357
1358/* Return memory to the heap.
1359 Like `free' but don't call a __free_hook if there is one. */
1360void
1361_free_internal (ptr)
1362 __ptr_t ptr;
1363{
1364 LOCK ();
1365 _free_internal_nolock (ptr);
2f213514 1366 UNLOCK ();
74ad5c7f
KH
1367}
1368
1369/* Return memory to the heap. */
ca9c0567
PE
1370
1371FREE_RETURN_TYPE
74ad5c7f
KH
1372free (ptr)
1373 __ptr_t ptr;
1374{
8d0d84d2
YM
1375 void (*hook) (__ptr_t) = __free_hook;
1376
1377 if (hook != NULL)
1378 (*hook) (ptr);
74ad5c7f
KH
1379 else
1380 _free_internal (ptr);
1381}
1382
1383/* Define the `cfree' alias for `free'. */
1384#ifdef weak_alias
1385weak_alias (free, cfree)
1386#else
1387void
1388cfree (ptr)
1389 __ptr_t ptr;
1390{
1391 free (ptr);
1392}
1393#endif
1394/* Change the size of a block allocated by `malloc'.
1395 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1396 Written May 1989 by Mike Haertel.
1397
1398This library is free software; you can redistribute it and/or
423a1f3c 1399modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1400published by the Free Software Foundation; either version 2 of the
1401License, or (at your option) any later version.
1402
1403This library is distributed in the hope that it will be useful,
1404but WITHOUT ANY WARRANTY; without even the implied warranty of
1405MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1406General Public License for more details.
74ad5c7f 1407
423a1f3c
JB
1408You should have received a copy of the GNU General Public
1409License along with this library; see the file COPYING. If
3ef97fb6
LK
1410not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1411Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1412
1413 The author may be reached (Email) at the address mike@ai.mit.edu,
1414 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1415
1416#ifndef _MALLOC_INTERNAL
1417#define _MALLOC_INTERNAL
1418#include <malloc.h>
1419#endif
1420
1421
1422
1423/* Cope with systems lacking `memmove'. */
1424#if (defined (MEMMOVE_MISSING) || \
1425 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1426
1427#ifdef emacs
1428#undef __malloc_safe_bcopy
1429#define __malloc_safe_bcopy safe_bcopy
1430#else
1431
1432/* Snarfed directly from Emacs src/dispnew.c:
1433 XXX Should use system bcopy if it handles overlap. */
1434
1435/* Like bcopy except never gets confused by overlap. */
1436
1437void
1438__malloc_safe_bcopy (afrom, ato, size)
1439 __ptr_t afrom;
1440 __ptr_t ato;
1441 __malloc_size_t size;
1442{
1443 char *from = afrom, *to = ato;
1444
1445 if (size <= 0 || from == to)
1446 return;
1447
1448 /* If the source and destination don't overlap, then bcopy can
1449 handle it. If they do overlap, but the destination is lower in
1450 memory than the source, we'll assume bcopy can handle that. */
1451 if (to < from || from + size <= to)
1452 bcopy (from, to, size);
1453
1454 /* Otherwise, we'll copy from the end. */
1455 else
1456 {
1457 register char *endf = from + size;
1458 register char *endt = to + size;
1459
1460 /* If TO - FROM is large, then we should break the copy into
1461 nonoverlapping chunks of TO - FROM bytes each. However, if
1462 TO - FROM is small, then the bcopy function call overhead
1463 makes this not worth it. The crossover point could be about
1464 anywhere. Since I don't think the obvious copy loop is too
1465 bad, I'm trying to err in its favor. */
1466 if (to - from < 64)
1467 {
1468 do
1469 *--endt = *--endf;
1470 while (endf != from);
1471 }
1472 else
1473 {
1474 for (;;)
1475 {
1476 endt -= (to - from);
1477 endf -= (to - from);
1478
1479 if (endt < to)
1480 break;
1481
1482 bcopy (endf, endt, to - from);
1483 }
1484
1485 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1486 little left over. The amount left over is
1487 (endt + (to - from)) - to, which is endt - from. */
1488 bcopy (from, to, endt - from);
1489 }
1490 }
1491}
1492#endif /* emacs */
1493
1494#ifndef memmove
0a27e8ed 1495extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
1496#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1497#endif
1498
1499#endif
1500
1501
1502#define min(A, B) ((A) < (B) ? (A) : (B))
1503
1504/* Debugging hook for realloc. */
0a27e8ed 1505__ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f
KH
1506
1507/* Resize the given region to the new size, returning a pointer
1508 to the (possibly moved) region. This is optimized for speed;
1509 some benchmarks seem to indicate that greater compactness is
1510 achieved by unconditionally allocating and copying to a
1511 new region. This module has incestuous knowledge of the
1512 internals of both free and malloc. */
1513__ptr_t
8d0d84d2 1514_realloc_internal_nolock (ptr, size)
74ad5c7f
KH
1515 __ptr_t ptr;
1516 __malloc_size_t size;
1517{
1518 __ptr_t result;
1519 int type;
1520 __malloc_size_t block, blocks, oldlimit;
1521
1522 if (size == 0)
1523 {
8d0d84d2
YM
1524 _free_internal_nolock (ptr);
1525 return _malloc_internal_nolock (0);
74ad5c7f
KH
1526 }
1527 else if (ptr == NULL)
8d0d84d2 1528 return _malloc_internal_nolock (size);
74ad5c7f
KH
1529
1530 block = BLOCK (ptr);
1531
5dcab13e 1532 PROTECT_MALLOC_STATE (0);
177c0ea7 1533
74ad5c7f
KH
1534 type = _heapinfo[block].busy.type;
1535 switch (type)
1536 {
1537 case 0:
1538 /* Maybe reallocate a large block to a small fragment. */
1539 if (size <= BLOCKSIZE / 2)
1540 {
8d0d84d2 1541 result = _malloc_internal_nolock (size);
74ad5c7f
KH
1542 if (result != NULL)
1543 {
1544 memcpy (result, ptr, size);
8d0d84d2 1545 _free_internal_nolock (ptr);
2f213514 1546 goto out;
74ad5c7f
KH
1547 }
1548 }
1549
1550 /* The new size is a large allocation as well;
1551 see if we can hold it in place. */
1552 blocks = BLOCKIFY (size);
1553 if (blocks < _heapinfo[block].busy.info.size)
1554 {
1555 /* The new size is smaller; return
1556 excess memory to the free list. */
1557 _heapinfo[block + blocks].busy.type = 0;
1558 _heapinfo[block + blocks].busy.info.size
1559 = _heapinfo[block].busy.info.size - blocks;
1560 _heapinfo[block].busy.info.size = blocks;
1561 /* We have just created a new chunk by splitting a chunk in two.
1562 Now we will free this chunk; increment the statistics counter
1563 so it doesn't become wrong when _free_internal decrements it. */
1564 ++_chunks_used;
8d0d84d2 1565 _free_internal_nolock (ADDRESS (block + blocks));
74ad5c7f
KH
1566 result = ptr;
1567 }
1568 else if (blocks == _heapinfo[block].busy.info.size)
1569 /* No size change necessary. */
1570 result = ptr;
1571 else
1572 {
1573 /* Won't fit, so allocate a new region that will.
1574 Free the old region first in case there is sufficient
1575 adjacent free space to grow without moving. */
1576 blocks = _heapinfo[block].busy.info.size;
1577 /* Prevent free from actually returning memory to the system. */
1578 oldlimit = _heaplimit;
1579 _heaplimit = 0;
8d0d84d2
YM
1580 _free_internal_nolock (ptr);
1581 result = _malloc_internal_nolock (size);
5dcab13e 1582 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1583 if (_heaplimit == 0)
1584 _heaplimit = oldlimit;
1585 if (result == NULL)
1586 {
1587 /* Now we're really in trouble. We have to unfree
1588 the thing we just freed. Unfortunately it might
1589 have been coalesced with its neighbors. */
1590 if (_heapindex == block)
8d0d84d2 1591 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
74ad5c7f
KH
1592 else
1593 {
1594 __ptr_t previous
8d0d84d2
YM
1595 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1596 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1597 _free_internal_nolock (previous);
74ad5c7f 1598 }
2f213514 1599 goto out;
74ad5c7f
KH
1600 }
1601 if (ptr != result)
1602 memmove (result, ptr, blocks * BLOCKSIZE);
1603 }
1604 break;
1605
1606 default:
1607 /* Old size is a fragment; type is logarithm
1608 to base two of the fragment size. */
1609 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1610 size <= (__malloc_size_t) (1 << type))
1611 /* The new size is the same kind of fragment. */
1612 result = ptr;
1613 else
1614 {
1615 /* The new size is different; allocate a new space,
1616 and copy the lesser of the new size and the old. */
8d0d84d2 1617 result = _malloc_internal_nolock (size);
74ad5c7f 1618 if (result == NULL)
2f213514 1619 goto out;
74ad5c7f 1620 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
8d0d84d2 1621 _free_internal_nolock (ptr);
74ad5c7f
KH
1622 }
1623 break;
1624 }
1625
5dcab13e 1626 PROTECT_MALLOC_STATE (1);
2f213514 1627 out:
8d0d84d2
YM
1628 return result;
1629}
1630
1631__ptr_t
1632_realloc_internal (ptr, size)
1633 __ptr_t ptr;
1634 __malloc_size_t size;
1635{
1636 __ptr_t result;
1637
1638 LOCK();
1639 result = _realloc_internal_nolock (ptr, size);
2f213514 1640 UNLOCK ();
8d0d84d2 1641
74ad5c7f
KH
1642 return result;
1643}
1644
1645__ptr_t
1646realloc (ptr, size)
1647 __ptr_t ptr;
1648 __malloc_size_t size;
1649{
8d0d84d2
YM
1650 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1651
74ad5c7f
KH
1652 if (!__malloc_initialized && !__malloc_initialize ())
1653 return NULL;
1654
8d0d84d2
YM
1655 hook = __realloc_hook;
1656 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
74ad5c7f
KH
1657}
1658/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1659
1660This library is free software; you can redistribute it and/or
423a1f3c 1661modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1662published by the Free Software Foundation; either version 2 of the
1663License, or (at your option) any later version.
1664
1665This library is distributed in the hope that it will be useful,
1666but WITHOUT ANY WARRANTY; without even the implied warranty of
1667MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1668General Public License for more details.
74ad5c7f 1669
423a1f3c
JB
1670You should have received a copy of the GNU General Public
1671License along with this library; see the file COPYING. If
3ef97fb6
LK
1672not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1673Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1674
1675 The author may be reached (Email) at the address mike@ai.mit.edu,
1676 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1677
1678#ifndef _MALLOC_INTERNAL
1679#define _MALLOC_INTERNAL
1680#include <malloc.h>
1681#endif
1682
1683/* Allocate an array of NMEMB elements each SIZE bytes long.
1684 The entire array is initialized to zeros. */
1685__ptr_t
1686calloc (nmemb, size)
1687 register __malloc_size_t nmemb;
1688 register __malloc_size_t size;
1689{
1690 register __ptr_t result = malloc (nmemb * size);
1691
1692 if (result != NULL)
1693 (void) memset (result, 0, nmemb * size);
1694
1695 return result;
1696}
1697/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1698This file is part of the GNU C Library.
1699
1700The GNU C Library is free software; you can redistribute it and/or modify
1701it under the terms of the GNU General Public License as published by
1702the Free Software Foundation; either version 2, or (at your option)
1703any later version.
1704
1705The GNU C Library is distributed in the hope that it will be useful,
1706but WITHOUT ANY WARRANTY; without even the implied warranty of
1707MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1708GNU General Public License for more details.
1709
1710You should have received a copy of the GNU General Public License
1711along with the GNU C Library; see the file COPYING. If not, write to
3ef97fb6
LK
1712the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1713MA 02110-1301, USA. */
74ad5c7f
KH
1714
1715#ifndef _MALLOC_INTERNAL
1716#define _MALLOC_INTERNAL
1717#include <malloc.h>
1718#endif
1719
1720#ifndef __GNU_LIBRARY__
1721#define __sbrk sbrk
1722#endif
1723
1724#ifdef __GNU_LIBRARY__
1725/* It is best not to declare this and cast its result on foreign operating
1726 systems with potentially hostile include files. */
1727
1728#include <stddef.h>
0a27e8ed 1729extern __ptr_t __sbrk PP ((ptrdiff_t increment));
74ad5c7f
KH
1730#endif
1731
1732#ifndef NULL
1733#define NULL 0
1734#endif
1735
1736/* Allocate INCREMENT more bytes of data space,
1737 and return the start of data space, or NULL on errors.
1738 If INCREMENT is negative, shrink data space. */
1739__ptr_t
1740__default_morecore (increment)
1741 __malloc_ptrdiff_t increment;
1742{
ef6d1039
SM
1743 __ptr_t result;
1744#if defined(CYGWIN)
1745 if (!bss_sbrk_did_unexec)
1746 {
1747 return bss_sbrk (increment);
1748 }
1749#endif
1750 result = (__ptr_t) __sbrk (increment);
74ad5c7f
KH
1751 if (result == (__ptr_t) -1)
1752 return NULL;
1753 return result;
1754}
1755/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1756
1757This library is free software; you can redistribute it and/or
423a1f3c 1758modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1759published by the Free Software Foundation; either version 2 of the
1760License, or (at your option) any later version.
1761
1762This library is distributed in the hope that it will be useful,
1763but WITHOUT ANY WARRANTY; without even the implied warranty of
1764MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1765General Public License for more details.
74ad5c7f 1766
423a1f3c
JB
1767You should have received a copy of the GNU General Public
1768License along with this library; see the file COPYING. If
3ef97fb6
LK
1769not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1770Fifth Floor, Boston, MA 02110-1301, USA. */
74ad5c7f
KH
1771
1772#ifndef _MALLOC_INTERNAL
1773#define _MALLOC_INTERNAL
1774#include <malloc.h>
1775#endif
1776
1777#if __DJGPP__ - 0 == 1
1778
1779/* There is some problem with memalign in DJGPP v1 and we are supposed
1780 to omit it. Noone told me why, they just told me to do it. */
1781
1782#else
1783
eec2d1de
EZ
1784__ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1785 __malloc_size_t __alignment));
74ad5c7f
KH
1786
1787__ptr_t
1788memalign (alignment, size)
1789 __malloc_size_t alignment;
1790 __malloc_size_t size;
1791{
1792 __ptr_t result;
1793 unsigned long int adj, lastadj;
8d0d84d2 1794 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
74ad5c7f 1795
8d0d84d2
YM
1796 if (hook)
1797 return (*hook) (alignment, size);
74ad5c7f
KH
1798
1799 /* Allocate a block with enough extra space to pad the block with up to
1800 (ALIGNMENT - 1) bytes if necessary. */
1801 result = malloc (size + alignment - 1);
1802 if (result == NULL)
1803 return NULL;
1804
1805 /* Figure out how much we will need to pad this particular block
1806 to achieve the required alignment. */
1807 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1808
1809 do
1810 {
1811 /* Reallocate the block with only as much excess as it needs. */
1812 free (result);
1813 result = malloc (adj + size);
1814 if (result == NULL) /* Impossible unless interrupted. */
1815 return NULL;
1816
1817 lastadj = adj;
1818 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1819 /* It's conceivable we might have been so unlucky as to get a
1820 different block with weaker alignment. If so, this block is too
1821 short to contain SIZE after alignment correction. So we must
1822 try again and get another block, slightly larger. */
1823 } while (adj > lastadj);
1824
1825 if (adj != 0)
1826 {
1827 /* Record this block in the list of aligned blocks, so that `free'
1828 can identify the pointer it is passed, which will be in the middle
1829 of an allocated block. */
1830
1831 struct alignlist *l;
8d0d84d2 1832 LOCK_ALIGNED_BLOCKS ();
74ad5c7f
KH
1833 for (l = _aligned_blocks; l != NULL; l = l->next)
1834 if (l->aligned == NULL)
1835 /* This slot is free. Use it. */
1836 break;
1837 if (l == NULL)
1838 {
1839 l = (struct alignlist *) malloc (sizeof (struct alignlist));
8d0d84d2 1840 if (l != NULL)
74ad5c7f 1841 {
8d0d84d2
YM
1842 l->next = _aligned_blocks;
1843 _aligned_blocks = l;
74ad5c7f 1844 }
74ad5c7f 1845 }
8d0d84d2
YM
1846 if (l != NULL)
1847 {
1848 l->exact = result;
1849 result = l->aligned = (char *) result + alignment - adj;
1850 }
1851 UNLOCK_ALIGNED_BLOCKS ();
1852 if (l == NULL)
1853 {
1854 free (result);
1855 result = NULL;
1856 }
74ad5c7f
KH
1857 }
1858
1859 return result;
1860}
1861
72359c32
YM
1862#ifndef ENOMEM
1863#define ENOMEM 12
1864#endif
1865
1866#ifndef EINVAL
1867#define EINVAL 22
1868#endif
1869
1870int
1871posix_memalign (memptr, alignment, size)
1872 __ptr_t *memptr;
1873 __malloc_size_t alignment;
1874 __malloc_size_t size;
1875{
1876 __ptr_t mem;
1877
1878 if (alignment == 0
1879 || alignment % sizeof (__ptr_t) != 0
1880 || (alignment & (alignment - 1)) != 0)
1881 return EINVAL;
1882
1883 mem = memalign (alignment, size);
1884 if (mem == NULL)
1885 return ENOMEM;
1886
1887 *memptr = mem;
1888
1889 return 0;
1890}
1891
74ad5c7f
KH
1892#endif /* Not DJGPP v1 */
1893/* Allocate memory on a page boundary.
1894 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1895
1896This library is free software; you can redistribute it and/or
423a1f3c 1897modify it under the terms of the GNU General Public License as
74ad5c7f
KH
1898published by the Free Software Foundation; either version 2 of the
1899License, or (at your option) any later version.
1900
1901This library is distributed in the hope that it will be useful,
1902but WITHOUT ANY WARRANTY; without even the implied warranty of
1903MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1904General Public License for more details.
74ad5c7f 1905
423a1f3c
JB
1906You should have received a copy of the GNU General Public
1907License along with this library; see the file COPYING. If
3ef97fb6
LK
1908not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1909Fifth Floor, Boston, MA 02110-1301, USA.
74ad5c7f
KH
1910
1911 The author may be reached (Email) at the address mike@ai.mit.edu,
1912 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1913
1914#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1915
1916/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1917 on MSDOS, where it conflicts with a system header file. */
1918
1919#define ELIDE_VALLOC
1920
1921#endif
1922
1923#ifndef ELIDE_VALLOC
1924
1925#if defined (__GNU_LIBRARY__) || defined (_LIBC)
1926#include <stddef.h>
1927#include <sys/cdefs.h>
47582ab3
KH
1928#if defined (__GLIBC__) && __GLIBC__ >= 2
1929/* __getpagesize is already declared in <unistd.h> with return type int */
1930#else
0a27e8ed 1931extern size_t __getpagesize PP ((void));
47582ab3 1932#endif
74ad5c7f
KH
1933#else
1934#include "getpagesize.h"
1935#define __getpagesize() getpagesize()
1936#endif
1937
1938#ifndef _MALLOC_INTERNAL
1939#define _MALLOC_INTERNAL
1940#include <malloc.h>
1941#endif
1942
1943static __malloc_size_t pagesize;
1944
1945__ptr_t
1946valloc (size)
1947 __malloc_size_t size;
1948{
1949 if (pagesize == 0)
1950 pagesize = __getpagesize ();
1951
1952 return memalign (pagesize, size);
1953}
1954
1955#endif /* Not ELIDE_VALLOC. */
a3ba27da
GM
1956
1957#ifdef GC_MCHECK
1958
1959/* Standard debugging hooks for `malloc'.
1960 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1961 Written May 1989 by Mike Haertel.
1962
1963This library is free software; you can redistribute it and/or
423a1f3c 1964modify it under the terms of the GNU General Public License as
a3ba27da
GM
1965published by the Free Software Foundation; either version 2 of the
1966License, or (at your option) any later version.
1967
1968This library is distributed in the hope that it will be useful,
1969but WITHOUT ANY WARRANTY; without even the implied warranty of
1970MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
423a1f3c 1971General Public License for more details.
a3ba27da 1972
423a1f3c
JB
1973You should have received a copy of the GNU General Public
1974License along with this library; see the file COPYING. If
3ef97fb6
LK
1975not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1976Fifth Floor, Boston, MA 02110-1301, USA.
a3ba27da
GM
1977
1978 The author may be reached (Email) at the address mike@ai.mit.edu,
1979 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1980
1981#ifdef emacs
1982#include <stdio.h>
1983#else
1984#ifndef _MALLOC_INTERNAL
1985#define _MALLOC_INTERNAL
1986#include <malloc.h>
1987#include <stdio.h>
1988#endif
1989#endif
1990
1991/* Old hook values. */
1992static void (*old_free_hook) __P ((__ptr_t ptr));
1993static __ptr_t (*old_malloc_hook) __P ((__malloc_size_t size));
1994static __ptr_t (*old_realloc_hook) __P ((__ptr_t ptr, __malloc_size_t size));
1995
1996/* Function to call when something awful happens. */
1997static void (*abortfunc) __P ((enum mcheck_status));
1998
1999/* Arbitrary magical numbers. */
2000#define MAGICWORD 0xfedabeeb
2001#define MAGICFREE 0xd8675309
2002#define MAGICBYTE ((char) 0xd7)
2003#define MALLOCFLOOD ((char) 0x93)
2004#define FREEFLOOD ((char) 0x95)
2005
2006struct hdr
2007 {
2008 __malloc_size_t size; /* Exact size requested by user. */
2009 unsigned long int magic; /* Magic number to check header integrity. */
2010 };
2011
2012#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
2013#define flood memset
2014#else
2015static void flood __P ((__ptr_t, int, __malloc_size_t));
2016static void
2017flood (ptr, val, size)
2018 __ptr_t ptr;
2019 int val;
2020 __malloc_size_t size;
2021{
2022 char *cp = ptr;
2023 while (size--)
2024 *cp++ = val;
2025}
2026#endif
2027
2028static enum mcheck_status checkhdr __P ((const struct hdr *));
2029static enum mcheck_status
2030checkhdr (hdr)
2031 const struct hdr *hdr;
2032{
2033 enum mcheck_status status;
2034 switch (hdr->magic)
2035 {
2036 default:
2037 status = MCHECK_HEAD;
2038 break;
2039 case MAGICFREE:
2040 status = MCHECK_FREE;
2041 break;
2042 case MAGICWORD:
2043 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
2044 status = MCHECK_TAIL;
2045 else
2046 status = MCHECK_OK;
2047 break;
2048 }
2049 if (status != MCHECK_OK)
2050 (*abortfunc) (status);
2051 return status;
2052}
2053
2054static void freehook __P ((__ptr_t));
2055static void
2056freehook (ptr)
2057 __ptr_t ptr;
2058{
2059 struct hdr *hdr;
177c0ea7 2060
a3ba27da
GM
2061 if (ptr)
2062 {
2063 hdr = ((struct hdr *) ptr) - 1;
2064 checkhdr (hdr);
2065 hdr->magic = MAGICFREE;
2066 flood (ptr, FREEFLOOD, hdr->size);
2067 }
2068 else
2069 hdr = NULL;
177c0ea7 2070
a3ba27da
GM
2071 __free_hook = old_free_hook;
2072 free (hdr);
2073 __free_hook = freehook;
2074}
2075
2076static __ptr_t mallochook __P ((__malloc_size_t));
2077static __ptr_t
2078mallochook (size)
2079 __malloc_size_t size;
2080{
2081 struct hdr *hdr;
2082
2083 __malloc_hook = old_malloc_hook;
2084 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
2085 __malloc_hook = mallochook;
2086 if (hdr == NULL)
2087 return NULL;
2088
2089 hdr->size = size;
2090 hdr->magic = MAGICWORD;
2091 ((char *) &hdr[1])[size] = MAGICBYTE;
2092 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
2093 return (__ptr_t) (hdr + 1);
2094}
2095
2096static __ptr_t reallochook __P ((__ptr_t, __malloc_size_t));
2097static __ptr_t
2098reallochook (ptr, size)
2099 __ptr_t ptr;
2100 __malloc_size_t size;
2101{
2102 struct hdr *hdr = NULL;
2103 __malloc_size_t osize = 0;
177c0ea7 2104
a3ba27da
GM
2105 if (ptr)
2106 {
2107 hdr = ((struct hdr *) ptr) - 1;
2108 osize = hdr->size;
2109
2110 checkhdr (hdr);
2111 if (size < osize)
2112 flood ((char *) ptr + size, FREEFLOOD, osize - size);
2113 }
177c0ea7 2114
a3ba27da
GM
2115 __free_hook = old_free_hook;
2116 __malloc_hook = old_malloc_hook;
2117 __realloc_hook = old_realloc_hook;
2118 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
2119 __free_hook = freehook;
2120 __malloc_hook = mallochook;
2121 __realloc_hook = reallochook;
2122 if (hdr == NULL)
2123 return NULL;
2124
2125 hdr->size = size;
2126 hdr->magic = MAGICWORD;
2127 ((char *) &hdr[1])[size] = MAGICBYTE;
2128 if (size > osize)
2129 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2130 return (__ptr_t) (hdr + 1);
2131}
2132
2133static void
2134mabort (status)
2135 enum mcheck_status status;
2136{
2137 const char *msg;
2138 switch (status)
2139 {
2140 case MCHECK_OK:
2141 msg = "memory is consistent, library is buggy";
2142 break;
2143 case MCHECK_HEAD:
2144 msg = "memory clobbered before allocated block";
2145 break;
2146 case MCHECK_TAIL:
2147 msg = "memory clobbered past end of allocated block";
2148 break;
2149 case MCHECK_FREE:
2150 msg = "block freed twice";
2151 break;
2152 default:
2153 msg = "bogus mcheck_status, library is buggy";
2154 break;
2155 }
2156#ifdef __GNU_LIBRARY__
2157 __libc_fatal (msg);
2158#else
2159 fprintf (stderr, "mcheck: %s\n", msg);
2160 fflush (stderr);
2161 abort ();
2162#endif
2163}
2164
2165static int mcheck_used = 0;
2166
2167int
2168mcheck (func)
2169 void (*func) __P ((enum mcheck_status));
2170{
2171 abortfunc = (func != NULL) ? func : &mabort;
2172
2173 /* These hooks may not be safely inserted if malloc is already in use. */
2174 if (!__malloc_initialized && !mcheck_used)
2175 {
2176 old_free_hook = __free_hook;
2177 __free_hook = freehook;
2178 old_malloc_hook = __malloc_hook;
2179 __malloc_hook = mallochook;
2180 old_realloc_hook = __realloc_hook;
2181 __realloc_hook = reallochook;
2182 mcheck_used = 1;
2183 }
2184
2185 return mcheck_used ? 0 : -1;
2186}
2187
2188enum mcheck_status
2189mprobe (__ptr_t ptr)
2190{
2191 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2192}
2193
2194#endif /* GC_MCHECK */
ab5796a9
MB
2195
2196/* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
2197 (do not change this comment) */