Update FSF's address.
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f
KH
1/* This file is no longer automatically generated from libc. */
2
3#define _MALLOC_INTERNAL
4
5/* The malloc headers and source files from the C library follow here. */
6
7/* Declarations for `malloc' and friends.
68c45bf0 8 Copyright 1990, 91, 92, 93, 95, 96, 99 Free Software Foundation, Inc.
74ad5c7f
KH
9 Written May 1989 by Mike Haertel.
10
11This library is free software; you can redistribute it and/or
12modify it under the terms of the GNU Library General Public License as
13published by the Free Software Foundation; either version 2 of the
14License, or (at your option) any later version.
15
16This library is distributed in the hope that it will be useful,
17but WITHOUT ANY WARRANTY; without even the implied warranty of
18MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19Library General Public License for more details.
20
21You should have received a copy of the GNU Library General Public
22License along with this library; see the file COPYING.LIB. If
23not, write to the Free Software Foundation, Inc., 675 Mass Ave,
24Cambridge, MA 02139, USA.
25
26 The author may be reached (Email) at the address mike@ai.mit.edu,
27 or (US mail) as Mike Haertel c/o Free Software Foundation. */
28
29#ifndef _MALLOC_H
30
31#define _MALLOC_H 1
32
33#ifdef _MALLOC_INTERNAL
34
35#ifdef HAVE_CONFIG_H
36#include <config.h>
37#endif
38
b2e92d3e
RS
39#if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
40 || defined STDC_HEADERS || defined PROTOTYPES) \
41 && ! defined (BROKEN_PROTOTYPES))
0a27e8ed
RS
42#undef PP
43#define PP(args) args
74ad5c7f
KH
44#undef __ptr_t
45#define __ptr_t void *
46#else /* Not C++ or ANSI C. */
0a27e8ed
RS
47#undef PP
48#define PP(args) ()
74ad5c7f
KH
49#undef __ptr_t
50#define __ptr_t char *
51#endif /* C++ or ANSI C. */
52
53#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
54#include <string.h>
55#else
56#ifndef memset
57#define memset(s, zero, n) bzero ((s), (n))
58#endif
59#ifndef memcpy
60#define memcpy(d, s, n) bcopy ((s), (d), (n))
61#endif
62#endif
63
ca9c0567 64#ifdef HAVE_LIMITS_H
74ad5c7f 65#include <limits.h>
ca9c0567 66#endif
74ad5c7f
KH
67#ifndef CHAR_BIT
68#define CHAR_BIT 8
69#endif
74ad5c7f
KH
70
71#ifdef HAVE_UNISTD_H
72#include <unistd.h>
73#endif
74
75#endif /* _MALLOC_INTERNAL. */
76
77
78#ifdef __cplusplus
79extern "C"
80{
81#endif
82
ca9c0567 83#ifdef STDC_HEADERS
74ad5c7f
KH
84#include <stddef.h>
85#define __malloc_size_t size_t
86#define __malloc_ptrdiff_t ptrdiff_t
87#else
eec2d1de
EZ
88#ifdef __GNUC__
89#include <stddef.h>
90#ifdef __SIZE_TYPE__
91#define __malloc_size_t __SIZE_TYPE__
92#endif
93#endif
94#ifndef __malloc_size_t
74ad5c7f 95#define __malloc_size_t unsigned int
eec2d1de 96#endif
74ad5c7f
KH
97#define __malloc_ptrdiff_t int
98#endif
99
100#ifndef NULL
101#define NULL 0
102#endif
103
ca9c0567
PE
104#ifndef FREE_RETURN_TYPE
105#define FREE_RETURN_TYPE void
106#endif
107
74ad5c7f
KH
108
109/* Allocate SIZE bytes of memory. */
0a27e8ed 110extern __ptr_t malloc PP ((__malloc_size_t __size));
74ad5c7f
KH
111/* Re-allocate the previously allocated block
112 in __ptr_t, making the new block SIZE bytes long. */
0a27e8ed 113extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f 114/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
0a27e8ed 115extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
74ad5c7f 116/* Free a block allocated by `malloc', `realloc' or `calloc'. */
ca9c0567 117extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr));
74ad5c7f
KH
118
119/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
120#if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
0a27e8ed
RS
121extern __ptr_t memalign PP ((__malloc_size_t __alignment,
122 __malloc_size_t __size));
74ad5c7f
KH
123#endif
124
125/* Allocate SIZE bytes on a page boundary. */
126#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
0a27e8ed 127extern __ptr_t valloc PP ((__malloc_size_t __size));
74ad5c7f
KH
128#endif
129
130
131#ifdef _MALLOC_INTERNAL
132
133/* The allocator divides the heap into blocks of fixed size; large
134 requests receive one or more whole blocks, and small requests
135 receive a fragment of a block. Fragment sizes are powers of two,
136 and all fragments of a block are the same size. When all the
137 fragments in a block have been freed, the block itself is freed. */
138#define INT_BIT (CHAR_BIT * sizeof(int))
139#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
140#define BLOCKSIZE (1 << BLOCKLOG)
141#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
142
143/* Determine the amount of memory spanned by the initial heap table
144 (not an absolute limit). */
145#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
146
147/* Number of contiguous free blocks allowed to build up at the end of
148 memory before they will be returned to the system. */
149#define FINAL_FREE_BLOCKS 8
150
151/* Data structure giving per-block information. */
152typedef union
153 {
154 /* Heap information for a busy block. */
155 struct
156 {
157 /* Zero for a large (multiblock) object, or positive giving the
158 logarithm to the base two of the fragment size. */
159 int type;
160 union
161 {
162 struct
163 {
164 __malloc_size_t nfree; /* Free frags in a fragmented block. */
165 __malloc_size_t first; /* First free fragment of the block. */
166 } frag;
167 /* For a large object, in its first block, this has the number
168 of blocks in the object. In the other blocks, this has a
169 negative number which says how far back the first block is. */
170 __malloc_ptrdiff_t size;
171 } info;
172 } busy;
173 /* Heap information for a free block
174 (that may be the first of a free cluster). */
175 struct
176 {
177 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
178 __malloc_size_t next; /* Index of next free cluster. */
179 __malloc_size_t prev; /* Index of previous free cluster. */
180 } free;
181 } malloc_info;
182
183/* Pointer to first block of the heap. */
184extern char *_heapbase;
185
186/* Table indexed by block number giving per-block information. */
187extern malloc_info *_heapinfo;
188
189/* Address to block number and vice versa. */
190#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
191#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
192
193/* Current search index for the heap table. */
194extern __malloc_size_t _heapindex;
195
196/* Limit of valid info table indices. */
197extern __malloc_size_t _heaplimit;
198
199/* Doubly linked lists of free fragments. */
200struct list
201 {
202 struct list *next;
203 struct list *prev;
204 };
205
206/* Free list headers for each fragment size. */
207extern struct list _fraghead[];
208
209/* List of blocks allocated with `memalign' (or `valloc'). */
210struct alignlist
211 {
212 struct alignlist *next;
213 __ptr_t aligned; /* The address that memaligned returned. */
214 __ptr_t exact; /* The address that malloc returned. */
215 };
216extern struct alignlist *_aligned_blocks;
217
218/* Instrumentation. */
219extern __malloc_size_t _chunks_used;
220extern __malloc_size_t _bytes_used;
221extern __malloc_size_t _chunks_free;
222extern __malloc_size_t _bytes_free;
223
224/* Internal versions of `malloc', `realloc', and `free'
225 used when these functions need to call each other.
226 They are the same but don't call the hooks. */
0a27e8ed
RS
227extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
228extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
229extern void _free_internal PP ((__ptr_t __ptr));
74ad5c7f
KH
230
231#endif /* _MALLOC_INTERNAL. */
232
233/* Given an address in the middle of a malloc'd object,
234 return the address of the beginning of the object. */
0a27e8ed 235extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
74ad5c7f
KH
236
237/* Underlying allocation function; successive calls should
238 return contiguous pieces of memory. */
0a27e8ed 239extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
240
241/* Default value of `__morecore'. */
0a27e8ed 242extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
243
244/* If not NULL, this function is called after each time
245 `__morecore' is called to increase the data size. */
0a27e8ed 246extern void (*__after_morecore_hook) PP ((void));
74ad5c7f
KH
247
248/* Number of extra blocks to get each time we ask for more core.
249 This reduces the frequency of calling `(*__morecore)'. */
250extern __malloc_size_t __malloc_extra_blocks;
251
252/* Nonzero if `malloc' has been called and done its initialization. */
253extern int __malloc_initialized;
254/* Function called to initialize malloc data structures. */
0a27e8ed 255extern int __malloc_initialize PP ((void));
74ad5c7f
KH
256
257/* Hooks for debugging versions. */
0a27e8ed
RS
258extern void (*__malloc_initialize_hook) PP ((void));
259extern void (*__free_hook) PP ((__ptr_t __ptr));
260extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
261extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
262extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
263 __malloc_size_t __alignment));
74ad5c7f
KH
264
265/* Return values for `mprobe': these are the kinds of inconsistencies that
266 `mcheck' enables detection of. */
267enum mcheck_status
268 {
269 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
270 MCHECK_OK, /* Block is fine. */
271 MCHECK_FREE, /* Block freed twice. */
272 MCHECK_HEAD, /* Memory before the block was clobbered. */
273 MCHECK_TAIL /* Memory after the block was clobbered. */
274 };
275
276/* Activate a standard collection of debugging hooks. This must be called
277 before `malloc' is ever called. ABORTFUNC is called with an error code
278 (see enum above) when an inconsistency is detected. If ABORTFUNC is
279 null, the standard function prints on stderr and then calls `abort'. */
0a27e8ed 280extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
74ad5c7f
KH
281
282/* Check for aberrations in a particular malloc'd block. You must have
283 called `mcheck' already. These are the same checks that `mcheck' does
284 when you free or reallocate a block. */
0a27e8ed 285extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
74ad5c7f
KH
286
287/* Activate a standard collection of tracing hooks. */
0a27e8ed
RS
288extern void mtrace PP ((void));
289extern void muntrace PP ((void));
74ad5c7f
KH
290
291/* Statistics available to the user. */
292struct mstats
293 {
294 __malloc_size_t bytes_total; /* Total size of the heap. */
295 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
296 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
297 __malloc_size_t chunks_free; /* Chunks in the free list. */
298 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
299 };
300
301/* Pick up the current statistics. */
0a27e8ed 302extern struct mstats mstats PP ((void));
74ad5c7f
KH
303
304/* Call WARNFUN with a warning message when memory usage is high. */
0a27e8ed
RS
305extern void memory_warnings PP ((__ptr_t __start,
306 void (*__warnfun) PP ((const char *))));
74ad5c7f
KH
307
308
309/* Relocating allocator. */
310
311/* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
0a27e8ed 312extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
313
314/* Free the storage allocated in HANDLEPTR. */
0a27e8ed 315extern void r_alloc_free PP ((__ptr_t *__handleptr));
74ad5c7f
KH
316
317/* Adjust the block at HANDLEPTR to be SIZE bytes long. */
0a27e8ed 318extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
319
320
321#ifdef __cplusplus
322}
323#endif
324
325#endif /* malloc.h */
326/* Memory allocator `malloc'.
327 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
328 Written May 1989 by Mike Haertel.
329
330This library is free software; you can redistribute it and/or
331modify it under the terms of the GNU Library General Public License as
332published by the Free Software Foundation; either version 2 of the
333License, or (at your option) any later version.
334
335This library is distributed in the hope that it will be useful,
336but WITHOUT ANY WARRANTY; without even the implied warranty of
337MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
338Library General Public License for more details.
339
340You should have received a copy of the GNU Library General Public
341License along with this library; see the file COPYING.LIB. If
342not, write to the Free Software Foundation, Inc., 675 Mass Ave,
343Cambridge, MA 02139, USA.
344
345 The author may be reached (Email) at the address mike@ai.mit.edu,
346 or (US mail) as Mike Haertel c/o Free Software Foundation. */
347
348#ifndef _MALLOC_INTERNAL
349#define _MALLOC_INTERNAL
350#include <malloc.h>
351#endif
352#include <errno.h>
353
354/* How to really get more memory. */
ef6d1039
SM
355#if defined(CYGWIN)
356extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
357extern int bss_sbrk_did_unexec;
358#endif
0a27e8ed 359__ptr_t (*__morecore) PP ((ptrdiff_t __size)) = __default_morecore;
74ad5c7f
KH
360
361/* Debugging hook for `malloc'. */
0a27e8ed 362__ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
74ad5c7f
KH
363
364/* Pointer to the base of the first block. */
365char *_heapbase;
366
367/* Block information table. Allocated with align/__free (not malloc/free). */
368malloc_info *_heapinfo;
369
370/* Number of info entries. */
371static __malloc_size_t heapsize;
372
373/* Search index in the info table. */
374__malloc_size_t _heapindex;
375
376/* Limit of valid info table indices. */
377__malloc_size_t _heaplimit;
378
379/* Free lists for each fragment size. */
380struct list _fraghead[BLOCKLOG];
381
382/* Instrumentation. */
383__malloc_size_t _chunks_used;
384__malloc_size_t _bytes_used;
385__malloc_size_t _chunks_free;
386__malloc_size_t _bytes_free;
387
388/* Are you experienced? */
389int __malloc_initialized;
390
391__malloc_size_t __malloc_extra_blocks;
392
0a27e8ed
RS
393void (*__malloc_initialize_hook) PP ((void));
394void (*__after_morecore_hook) PP ((void));
74ad5c7f 395
5dcab13e
GM
396#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
397
398/* Some code for hunting a bug writing into _heapinfo.
399
400 Call this macro with argument PROT non-zero to protect internal
401 malloc state against writing to it, call it with a zero argument to
402 make it readable and writable.
403
404 Note that this only works if BLOCKSIZE == page size, which is
405 the case on the i386. */
406
407#include <sys/types.h>
408#include <sys/mman.h>
409
410static int state_protected_p;
411static __malloc_size_t last_state_size;
412static malloc_info *last_heapinfo;
413
414void
415protect_malloc_state (protect_p)
416 int protect_p;
417{
418 /* If _heapinfo has been relocated, make sure its old location
419 isn't left read-only; it will be reused by malloc. */
420 if (_heapinfo != last_heapinfo
421 && last_heapinfo
422 && state_protected_p)
423 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
424
425 last_state_size = _heaplimit * sizeof *_heapinfo;
426 last_heapinfo = _heapinfo;
177c0ea7 427
5dcab13e
GM
428 if (protect_p != state_protected_p)
429 {
430 state_protected_p = protect_p;
431 if (mprotect (_heapinfo, last_state_size,
432 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
433 abort ();
434 }
435}
436
437#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
438
439#else
440#define PROTECT_MALLOC_STATE(PROT) /* empty */
441#endif
442
74ad5c7f
KH
443
444/* Aligned allocation. */
0a27e8ed 445static __ptr_t align PP ((__malloc_size_t));
74ad5c7f
KH
446static __ptr_t
447align (size)
448 __malloc_size_t size;
449{
450 __ptr_t result;
451 unsigned long int adj;
452
ceeb3d7d
EZ
453 /* align accepts an unsigned argument, but __morecore accepts a
454 signed one. This could lead to trouble if SIZE overflows a
455 signed int type accepted by __morecore. We just punt in that
456 case, since they are requesting a ludicrous amount anyway. */
457 if ((__malloc_ptrdiff_t)size < 0)
458 result = 0;
459 else
460 result = (*__morecore) (size);
74ad5c7f
KH
461 adj = (unsigned long int) ((unsigned long int) ((char *) result -
462 (char *) NULL)) % BLOCKSIZE;
463 if (adj != 0)
464 {
465 __ptr_t new;
466 adj = BLOCKSIZE - adj;
467 new = (*__morecore) (adj);
468 result = (char *) result + adj;
469 }
470
471 if (__after_morecore_hook)
472 (*__after_morecore_hook) ();
473
474 return result;
475}
476
477/* Get SIZE bytes, if we can get them starting at END.
478 Return the address of the space we got.
479 If we cannot get space at END, fail and return 0. */
0a27e8ed 480static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
74ad5c7f
KH
481static __ptr_t
482get_contiguous_space (size, position)
483 __malloc_ptrdiff_t size;
484 __ptr_t position;
485{
486 __ptr_t before;
487 __ptr_t after;
488
489 before = (*__morecore) (0);
490 /* If we can tell in advance that the break is at the wrong place,
491 fail now. */
492 if (before != position)
493 return 0;
494
495 /* Allocate SIZE bytes and get the address of them. */
496 after = (*__morecore) (size);
497 if (!after)
498 return 0;
499
500 /* It was not contiguous--reject it. */
501 if (after != position)
502 {
503 (*__morecore) (- size);
504 return 0;
505 }
506
507 return after;
508}
509
510
511/* This is called when `_heapinfo' and `heapsize' have just
512 been set to describe a new info table. Set up the table
513 to describe itself and account for it in the statistics. */
0a27e8ed 514static void register_heapinfo PP ((void));
74ad5c7f
KH
515#ifdef __GNUC__
516__inline__
517#endif
518static void
519register_heapinfo ()
520{
521 __malloc_size_t block, blocks;
522
523 block = BLOCK (_heapinfo);
524 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
525
526 /* Account for the _heapinfo block itself in the statistics. */
527 _bytes_used += blocks * BLOCKSIZE;
528 ++_chunks_used;
529
530 /* Describe the heapinfo block itself in the heapinfo. */
531 _heapinfo[block].busy.type = 0;
532 _heapinfo[block].busy.info.size = blocks;
533 /* Leave back-pointers for malloc_find_address. */
534 while (--blocks > 0)
535 _heapinfo[block + blocks].busy.info.size = -blocks;
536}
537
538/* Set everything up and remember that we have. */
539int
540__malloc_initialize ()
541{
542 if (__malloc_initialized)
543 return 0;
544
a3ba27da
GM
545#ifdef GC_MCHECK
546 mcheck (NULL);
547#endif
548
74ad5c7f
KH
549 if (__malloc_initialize_hook)
550 (*__malloc_initialize_hook) ();
551
552 heapsize = HEAP / BLOCKSIZE;
553 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
554 if (_heapinfo == NULL)
555 return 0;
556 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
557 _heapinfo[0].free.size = 0;
558 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
559 _heapindex = 0;
560 _heapbase = (char *) _heapinfo;
561 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
562
563 register_heapinfo ();
564
565 __malloc_initialized = 1;
5dcab13e 566 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
567 return 1;
568}
569
570static int morecore_recursing;
571
572/* Get neatly aligned memory, initializing or
573 growing the heap info table as necessary. */
0a27e8ed 574static __ptr_t morecore PP ((__malloc_size_t));
74ad5c7f
KH
575static __ptr_t
576morecore (size)
577 __malloc_size_t size;
578{
579 __ptr_t result;
580 malloc_info *newinfo, *oldinfo;
581 __malloc_size_t newsize;
582
583 if (morecore_recursing)
584 /* Avoid recursion. The caller will know how to handle a null return. */
585 return NULL;
586
587 result = align (size);
588 if (result == NULL)
589 return NULL;
590
5dcab13e
GM
591 PROTECT_MALLOC_STATE (0);
592
74ad5c7f
KH
593 /* Check if we need to grow the info table. */
594 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
595 {
596 /* Calculate the new _heapinfo table size. We do not account for the
597 added blocks in the table itself, as we hope to place them in
598 existing free space, which is already covered by part of the
599 existing table. */
600 newsize = heapsize;
601 do
602 newsize *= 2;
603 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
604
605 /* We must not reuse existing core for the new info table when called
606 from realloc in the case of growing a large block, because the
607 block being grown is momentarily marked as free. In this case
608 _heaplimit is zero so we know not to reuse space for internal
609 allocation. */
610 if (_heaplimit != 0)
611 {
612 /* First try to allocate the new info table in core we already
613 have, in the usual way using realloc. If realloc cannot
614 extend it in place or relocate it to existing sufficient core,
615 we will get called again, and the code above will notice the
616 `morecore_recursing' flag and return null. */
617 int save = errno; /* Don't want to clobber errno with ENOMEM. */
618 morecore_recursing = 1;
619 newinfo = (malloc_info *) _realloc_internal
620 (_heapinfo, newsize * sizeof (malloc_info));
621 morecore_recursing = 0;
622 if (newinfo == NULL)
623 errno = save;
624 else
625 {
626 /* We found some space in core, and realloc has put the old
627 table's blocks on the free list. Now zero the new part
628 of the table and install the new table location. */
629 memset (&newinfo[heapsize], 0,
630 (newsize - heapsize) * sizeof (malloc_info));
631 _heapinfo = newinfo;
632 heapsize = newsize;
633 goto got_heap;
634 }
635 }
636
637 /* Allocate new space for the malloc info table. */
638 while (1)
639 {
640 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
641
642 /* Did it fail? */
643 if (newinfo == NULL)
644 {
645 (*__morecore) (-size);
646 return NULL;
647 }
648
649 /* Is it big enough to record status for its own space?
650 If so, we win. */
651 if ((__malloc_size_t) BLOCK ((char *) newinfo
652 + newsize * sizeof (malloc_info))
653 < newsize)
654 break;
655
656 /* Must try again. First give back most of what we just got. */
657 (*__morecore) (- newsize * sizeof (malloc_info));
658 newsize *= 2;
659 }
660
661 /* Copy the old table to the beginning of the new,
662 and zero the rest of the new table. */
663 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
664 memset (&newinfo[heapsize], 0,
665 (newsize - heapsize) * sizeof (malloc_info));
666 oldinfo = _heapinfo;
667 _heapinfo = newinfo;
668 heapsize = newsize;
669
670 register_heapinfo ();
671
672 /* Reset _heaplimit so _free_internal never decides
673 it can relocate or resize the info table. */
674 _heaplimit = 0;
675 _free_internal (oldinfo);
5dcab13e 676 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
677
678 /* The new heap limit includes the new table just allocated. */
679 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
680 return result;
681 }
682
683 got_heap:
684 _heaplimit = BLOCK ((char *) result + size);
685 return result;
686}
687
688/* Allocate memory from the heap. */
689__ptr_t
690_malloc_internal (size)
691 __malloc_size_t size;
692{
693 __ptr_t result;
694 __malloc_size_t block, blocks, lastblocks, start;
695 register __malloc_size_t i;
696 struct list *next;
697
698 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
699 valid address you can realloc and free (though not dereference).
700
701 It turns out that some extant code (sunrpc, at least Ultrix's version)
702 expects `malloc (0)' to return non-NULL and breaks otherwise.
703 Be compatible. */
704
705#if 0
706 if (size == 0)
707 return NULL;
708#endif
709
5dcab13e
GM
710 PROTECT_MALLOC_STATE (0);
711
74ad5c7f
KH
712 if (size < sizeof (struct list))
713 size = sizeof (struct list);
714
715#ifdef SUNOS_LOCALTIME_BUG
716 if (size < 16)
717 size = 16;
718#endif
719
720 /* Determine the allocation policy based on the request size. */
721 if (size <= BLOCKSIZE / 2)
722 {
723 /* Small allocation to receive a fragment of a block.
724 Determine the logarithm to base two of the fragment size. */
725 register __malloc_size_t log = 1;
726 --size;
727 while ((size /= 2) != 0)
728 ++log;
729
730 /* Look in the fragment lists for a
731 free fragment of the desired size. */
732 next = _fraghead[log].next;
733 if (next != NULL)
734 {
735 /* There are free fragments of this size.
736 Pop a fragment out of the fragment list and return it.
737 Update the block's nfree and first counters. */
738 result = (__ptr_t) next;
739 next->prev->next = next->next;
740 if (next->next != NULL)
741 next->next->prev = next->prev;
742 block = BLOCK (result);
743 if (--_heapinfo[block].busy.info.frag.nfree != 0)
744 _heapinfo[block].busy.info.frag.first = (unsigned long int)
745 ((unsigned long int) ((char *) next->next - (char *) NULL)
746 % BLOCKSIZE) >> log;
747
748 /* Update the statistics. */
749 ++_chunks_used;
750 _bytes_used += 1 << log;
751 --_chunks_free;
752 _bytes_free -= 1 << log;
753 }
754 else
755 {
756 /* No free fragments of the desired size, so get a new block
757 and break it into fragments, returning the first. */
8094989b
GM
758#ifdef GC_MALLOC_CHECK
759 result = _malloc_internal (BLOCKSIZE);
5dcab13e 760 PROTECT_MALLOC_STATE (0);
8094989b 761#else
74ad5c7f 762 result = malloc (BLOCKSIZE);
8094989b 763#endif
74ad5c7f 764 if (result == NULL)
5dcab13e
GM
765 {
766 PROTECT_MALLOC_STATE (1);
767 return NULL;
768 }
74ad5c7f
KH
769
770 /* Link all fragments but the first into the free list. */
771 next = (struct list *) ((char *) result + (1 << log));
772 next->next = NULL;
773 next->prev = &_fraghead[log];
774 _fraghead[log].next = next;
775
776 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
777 {
778 next = (struct list *) ((char *) result + (i << log));
779 next->next = _fraghead[log].next;
780 next->prev = &_fraghead[log];
781 next->prev->next = next;
782 next->next->prev = next;
783 }
784
785 /* Initialize the nfree and first counters for this block. */
786 block = BLOCK (result);
787 _heapinfo[block].busy.type = log;
788 _heapinfo[block].busy.info.frag.nfree = i - 1;
789 _heapinfo[block].busy.info.frag.first = i - 1;
790
791 _chunks_free += (BLOCKSIZE >> log) - 1;
792 _bytes_free += BLOCKSIZE - (1 << log);
793 _bytes_used -= BLOCKSIZE - (1 << log);
794 }
795 }
796 else
797 {
798 /* Large allocation to receive one or more blocks.
799 Search the free list in a circle starting at the last place visited.
800 If we loop completely around without finding a large enough
801 space we will have to get more memory from the system. */
802 blocks = BLOCKIFY (size);
803 start = block = _heapindex;
804 while (_heapinfo[block].free.size < blocks)
805 {
806 block = _heapinfo[block].free.next;
807 if (block == start)
808 {
809 /* Need to get more from the system. Get a little extra. */
810 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
811 block = _heapinfo[0].free.prev;
812 lastblocks = _heapinfo[block].free.size;
813 /* Check to see if the new core will be contiguous with the
814 final free block; if so we don't need to get as much. */
815 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
816 /* We can't do this if we will have to make the heap info
817 table bigger to accomodate the new space. */
818 block + wantblocks <= heapsize &&
819 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
820 ADDRESS (block + lastblocks)))
821 {
822 /* We got it contiguously. Which block we are extending
823 (the `final free block' referred to above) might have
824 changed, if it got combined with a freed info table. */
825 block = _heapinfo[0].free.prev;
826 _heapinfo[block].free.size += (wantblocks - lastblocks);
827 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
828 _heaplimit += wantblocks - lastblocks;
829 continue;
830 }
831 result = morecore (wantblocks * BLOCKSIZE);
832 if (result == NULL)
833 return NULL;
834 block = BLOCK (result);
835 /* Put the new block at the end of the free list. */
836 _heapinfo[block].free.size = wantblocks;
837 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
838 _heapinfo[block].free.next = 0;
839 _heapinfo[0].free.prev = block;
840 _heapinfo[_heapinfo[block].free.prev].free.next = block;
841 ++_chunks_free;
842 /* Now loop to use some of that block for this allocation. */
843 }
844 }
845
846 /* At this point we have found a suitable free list entry.
847 Figure out how to remove what we need from the list. */
848 result = ADDRESS (block);
849 if (_heapinfo[block].free.size > blocks)
850 {
851 /* The block we found has a bit left over,
852 so relink the tail end back into the free list. */
853 _heapinfo[block + blocks].free.size
854 = _heapinfo[block].free.size - blocks;
855 _heapinfo[block + blocks].free.next
856 = _heapinfo[block].free.next;
857 _heapinfo[block + blocks].free.prev
858 = _heapinfo[block].free.prev;
859 _heapinfo[_heapinfo[block].free.prev].free.next
860 = _heapinfo[_heapinfo[block].free.next].free.prev
861 = _heapindex = block + blocks;
862 }
863 else
864 {
865 /* The block exactly matches our requirements,
866 so just remove it from the list. */
867 _heapinfo[_heapinfo[block].free.next].free.prev
868 = _heapinfo[block].free.prev;
869 _heapinfo[_heapinfo[block].free.prev].free.next
870 = _heapindex = _heapinfo[block].free.next;
871 --_chunks_free;
872 }
873
874 _heapinfo[block].busy.type = 0;
875 _heapinfo[block].busy.info.size = blocks;
876 ++_chunks_used;
877 _bytes_used += blocks * BLOCKSIZE;
878 _bytes_free -= blocks * BLOCKSIZE;
879
880 /* Mark all the blocks of the object just allocated except for the
881 first with a negative number so you can find the first block by
882 adding that adjustment. */
883 while (--blocks > 0)
884 _heapinfo[block + blocks].busy.info.size = -blocks;
885 }
886
5dcab13e 887 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
888 return result;
889}
890
891__ptr_t
892malloc (size)
893 __malloc_size_t size;
894{
895 if (!__malloc_initialized && !__malloc_initialize ())
896 return NULL;
897
898 return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size);
899}
900\f
901#ifndef _LIBC
902
903/* On some ANSI C systems, some libc functions call _malloc, _free
904 and _realloc. Make them use the GNU functions. */
905
906__ptr_t
907_malloc (size)
908 __malloc_size_t size;
909{
910 return malloc (size);
911}
912
913void
914_free (ptr)
915 __ptr_t ptr;
916{
917 free (ptr);
918}
919
920__ptr_t
921_realloc (ptr, size)
922 __ptr_t ptr;
923 __malloc_size_t size;
924{
925 return realloc (ptr, size);
926}
927
928#endif
929/* Free a block of memory allocated by `malloc'.
930 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
931 Written May 1989 by Mike Haertel.
932
933This library is free software; you can redistribute it and/or
934modify it under the terms of the GNU Library General Public License as
935published by the Free Software Foundation; either version 2 of the
936License, or (at your option) any later version.
937
938This library is distributed in the hope that it will be useful,
939but WITHOUT ANY WARRANTY; without even the implied warranty of
940MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
941Library General Public License for more details.
942
943You should have received a copy of the GNU Library General Public
944License along with this library; see the file COPYING.LIB. If
945not, write to the Free Software Foundation, Inc., 675 Mass Ave,
946Cambridge, MA 02139, USA.
947
948 The author may be reached (Email) at the address mike@ai.mit.edu,
949 or (US mail) as Mike Haertel c/o Free Software Foundation. */
950
951#ifndef _MALLOC_INTERNAL
952#define _MALLOC_INTERNAL
953#include <malloc.h>
954#endif
955
956
957/* Cope with systems lacking `memmove'. */
958#ifndef memmove
959#if (defined (MEMMOVE_MISSING) || \
960 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
961#ifdef emacs
962#undef __malloc_safe_bcopy
963#define __malloc_safe_bcopy safe_bcopy
964#endif
965/* This function is defined in realloc.c. */
0a27e8ed 966extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
967#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
968#endif
969#endif
970
971
972/* Debugging hook for free. */
0a27e8ed 973void (*__free_hook) PP ((__ptr_t __ptr));
74ad5c7f
KH
974
975/* List of blocks allocated by memalign. */
976struct alignlist *_aligned_blocks = NULL;
977
978/* Return memory to the heap.
979 Like `free' but don't call a __free_hook if there is one. */
980void
981_free_internal (ptr)
982 __ptr_t ptr;
983{
984 int type;
985 __malloc_size_t block, blocks;
986 register __malloc_size_t i;
987 struct list *prev, *next;
988 __ptr_t curbrk;
989 const __malloc_size_t lesscore_threshold
990 /* Threshold of free space at which we will return some to the system. */
991 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
992
993 register struct alignlist *l;
994
995 if (ptr == NULL)
996 return;
997
5dcab13e 998 PROTECT_MALLOC_STATE (0);
177c0ea7 999
74ad5c7f
KH
1000 for (l = _aligned_blocks; l != NULL; l = l->next)
1001 if (l->aligned == ptr)
1002 {
1003 l->aligned = NULL; /* Mark the slot in the list as free. */
1004 ptr = l->exact;
1005 break;
1006 }
1007
1008 block = BLOCK (ptr);
1009
1010 type = _heapinfo[block].busy.type;
1011 switch (type)
1012 {
1013 case 0:
1014 /* Get as many statistics as early as we can. */
1015 --_chunks_used;
1016 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1017 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1018
1019 /* Find the free cluster previous to this one in the free list.
1020 Start searching at the last block referenced; this may benefit
1021 programs with locality of allocation. */
1022 i = _heapindex;
1023 if (i > block)
1024 while (i > block)
1025 i = _heapinfo[i].free.prev;
1026 else
1027 {
1028 do
1029 i = _heapinfo[i].free.next;
1030 while (i > 0 && i < block);
1031 i = _heapinfo[i].free.prev;
1032 }
1033
1034 /* Determine how to link this block into the free list. */
1035 if (block == i + _heapinfo[i].free.size)
1036 {
1037 /* Coalesce this block with its predecessor. */
1038 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1039 block = i;
1040 }
1041 else
1042 {
1043 /* Really link this block back into the free list. */
1044 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1045 _heapinfo[block].free.next = _heapinfo[i].free.next;
1046 _heapinfo[block].free.prev = i;
1047 _heapinfo[i].free.next = block;
1048 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1049 ++_chunks_free;
1050 }
1051
1052 /* Now that the block is linked in, see if we can coalesce it
1053 with its successor (by deleting its successor from the list
1054 and adding in its size). */
1055 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1056 {
1057 _heapinfo[block].free.size
1058 += _heapinfo[_heapinfo[block].free.next].free.size;
1059 _heapinfo[block].free.next
1060 = _heapinfo[_heapinfo[block].free.next].free.next;
1061 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1062 --_chunks_free;
1063 }
1064
1065 /* How many trailing free blocks are there now? */
1066 blocks = _heapinfo[block].free.size;
1067
1068 /* Where is the current end of accessible core? */
1069 curbrk = (*__morecore) (0);
1070
1071 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1072 {
1073 /* The end of the malloc heap is at the end of accessible core.
1074 It's possible that moving _heapinfo will allow us to
1075 return some space to the system. */
1076
1077 __malloc_size_t info_block = BLOCK (_heapinfo);
1078 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1079 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1080 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1081 __malloc_size_t next_block = _heapinfo[block].free.next;
1082 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1083
1084 if (/* Win if this block being freed is last in core, the info table
1085 is just before it, the previous free block is just before the
1086 info table, and the two free blocks together form a useful
1087 amount to return to the system. */
1088 (block + blocks == _heaplimit &&
1089 info_block + info_blocks == block &&
1090 prev_block != 0 && prev_block + prev_blocks == info_block &&
1091 blocks + prev_blocks >= lesscore_threshold) ||
1092 /* Nope, not the case. We can also win if this block being
1093 freed is just before the info table, and the table extends
1094 to the end of core or is followed only by a free block,
1095 and the total free space is worth returning to the system. */
1096 (block + blocks == info_block &&
1097 ((info_block + info_blocks == _heaplimit &&
1098 blocks >= lesscore_threshold) ||
1099 (info_block + info_blocks == next_block &&
1100 next_block + next_blocks == _heaplimit &&
1101 blocks + next_blocks >= lesscore_threshold)))
1102 )
1103 {
1104 malloc_info *newinfo;
1105 __malloc_size_t oldlimit = _heaplimit;
1106
1107 /* Free the old info table, clearing _heaplimit to avoid
1108 recursion into this code. We don't want to return the
1109 table's blocks to the system before we have copied them to
1110 the new location. */
1111 _heaplimit = 0;
1112 _free_internal (_heapinfo);
1113 _heaplimit = oldlimit;
1114
1115 /* Tell malloc to search from the beginning of the heap for
1116 free blocks, so it doesn't reuse the ones just freed. */
1117 _heapindex = 0;
1118
1119 /* Allocate new space for the info table and move its data. */
1120 newinfo = (malloc_info *) _malloc_internal (info_blocks
1121 * BLOCKSIZE);
5dcab13e 1122 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1123 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1124 _heapinfo = newinfo;
1125
1126 /* We should now have coalesced the free block with the
1127 blocks freed from the old info table. Examine the entire
1128 trailing free block to decide below whether to return some
1129 to the system. */
1130 block = _heapinfo[0].free.prev;
1131 blocks = _heapinfo[block].free.size;
1132 }
1133
1134 /* Now see if we can return stuff to the system. */
1135 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1136 {
1137 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1138 _heaplimit -= blocks;
1139 (*__morecore) (-bytes);
1140 _heapinfo[_heapinfo[block].free.prev].free.next
1141 = _heapinfo[block].free.next;
1142 _heapinfo[_heapinfo[block].free.next].free.prev
1143 = _heapinfo[block].free.prev;
1144 block = _heapinfo[block].free.prev;
1145 --_chunks_free;
1146 _bytes_free -= bytes;
1147 }
1148 }
1149
1150 /* Set the next search to begin at this block. */
1151 _heapindex = block;
1152 break;
1153
1154 default:
1155 /* Do some of the statistics. */
1156 --_chunks_used;
1157 _bytes_used -= 1 << type;
1158 ++_chunks_free;
1159 _bytes_free += 1 << type;
1160
1161 /* Get the address of the first free fragment in this block. */
1162 prev = (struct list *) ((char *) ADDRESS (block) +
1163 (_heapinfo[block].busy.info.frag.first << type));
1164
1165 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1166 {
1167 /* If all fragments of this block are free, remove them
1168 from the fragment list and free the whole block. */
1169 next = prev;
1170 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1171 next = next->next;
1172 prev->prev->next = next;
1173 if (next != NULL)
1174 next->prev = prev->prev;
1175 _heapinfo[block].busy.type = 0;
1176 _heapinfo[block].busy.info.size = 1;
1177
1178 /* Keep the statistics accurate. */
1179 ++_chunks_used;
1180 _bytes_used += BLOCKSIZE;
1181 _chunks_free -= BLOCKSIZE >> type;
1182 _bytes_free -= BLOCKSIZE;
1183
8094989b
GM
1184#ifdef GC_MALLOC_CHECK
1185 _free_internal (ADDRESS (block));
1186#else
74ad5c7f 1187 free (ADDRESS (block));
8094989b 1188#endif
74ad5c7f
KH
1189 }
1190 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1191 {
1192 /* If some fragments of this block are free, link this
1193 fragment into the fragment list after the first free
1194 fragment of this block. */
1195 next = (struct list *) ptr;
1196 next->next = prev->next;
1197 next->prev = prev;
1198 prev->next = next;
1199 if (next->next != NULL)
1200 next->next->prev = next;
1201 ++_heapinfo[block].busy.info.frag.nfree;
1202 }
1203 else
1204 {
1205 /* No fragments of this block are free, so link this
1206 fragment into the fragment list and announce that
1207 it is the first free fragment of this block. */
1208 prev = (struct list *) ptr;
1209 _heapinfo[block].busy.info.frag.nfree = 1;
1210 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1211 ((unsigned long int) ((char *) ptr - (char *) NULL)
1212 % BLOCKSIZE >> type);
1213 prev->next = _fraghead[type].next;
1214 prev->prev = &_fraghead[type];
1215 prev->prev->next = prev;
1216 if (prev->next != NULL)
1217 prev->next->prev = prev;
1218 }
1219 break;
1220 }
177c0ea7 1221
5dcab13e 1222 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
1223}
1224
1225/* Return memory to the heap. */
ca9c0567
PE
1226
1227FREE_RETURN_TYPE
74ad5c7f
KH
1228free (ptr)
1229 __ptr_t ptr;
1230{
1231 if (__free_hook != NULL)
1232 (*__free_hook) (ptr);
1233 else
1234 _free_internal (ptr);
1235}
1236
1237/* Define the `cfree' alias for `free'. */
1238#ifdef weak_alias
1239weak_alias (free, cfree)
1240#else
1241void
1242cfree (ptr)
1243 __ptr_t ptr;
1244{
1245 free (ptr);
1246}
1247#endif
1248/* Change the size of a block allocated by `malloc'.
1249 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1250 Written May 1989 by Mike Haertel.
1251
1252This library is free software; you can redistribute it and/or
1253modify it under the terms of the GNU Library General Public License as
1254published by the Free Software Foundation; either version 2 of the
1255License, or (at your option) any later version.
1256
1257This library is distributed in the hope that it will be useful,
1258but WITHOUT ANY WARRANTY; without even the implied warranty of
1259MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1260Library General Public License for more details.
1261
1262You should have received a copy of the GNU Library General Public
1263License along with this library; see the file COPYING.LIB. If
1264not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1265Cambridge, MA 02139, USA.
1266
1267 The author may be reached (Email) at the address mike@ai.mit.edu,
1268 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1269
1270#ifndef _MALLOC_INTERNAL
1271#define _MALLOC_INTERNAL
1272#include <malloc.h>
1273#endif
1274
1275
1276
1277/* Cope with systems lacking `memmove'. */
1278#if (defined (MEMMOVE_MISSING) || \
1279 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1280
1281#ifdef emacs
1282#undef __malloc_safe_bcopy
1283#define __malloc_safe_bcopy safe_bcopy
1284#else
1285
1286/* Snarfed directly from Emacs src/dispnew.c:
1287 XXX Should use system bcopy if it handles overlap. */
1288
1289/* Like bcopy except never gets confused by overlap. */
1290
1291void
1292__malloc_safe_bcopy (afrom, ato, size)
1293 __ptr_t afrom;
1294 __ptr_t ato;
1295 __malloc_size_t size;
1296{
1297 char *from = afrom, *to = ato;
1298
1299 if (size <= 0 || from == to)
1300 return;
1301
1302 /* If the source and destination don't overlap, then bcopy can
1303 handle it. If they do overlap, but the destination is lower in
1304 memory than the source, we'll assume bcopy can handle that. */
1305 if (to < from || from + size <= to)
1306 bcopy (from, to, size);
1307
1308 /* Otherwise, we'll copy from the end. */
1309 else
1310 {
1311 register char *endf = from + size;
1312 register char *endt = to + size;
1313
1314 /* If TO - FROM is large, then we should break the copy into
1315 nonoverlapping chunks of TO - FROM bytes each. However, if
1316 TO - FROM is small, then the bcopy function call overhead
1317 makes this not worth it. The crossover point could be about
1318 anywhere. Since I don't think the obvious copy loop is too
1319 bad, I'm trying to err in its favor. */
1320 if (to - from < 64)
1321 {
1322 do
1323 *--endt = *--endf;
1324 while (endf != from);
1325 }
1326 else
1327 {
1328 for (;;)
1329 {
1330 endt -= (to - from);
1331 endf -= (to - from);
1332
1333 if (endt < to)
1334 break;
1335
1336 bcopy (endf, endt, to - from);
1337 }
1338
1339 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1340 little left over. The amount left over is
1341 (endt + (to - from)) - to, which is endt - from. */
1342 bcopy (from, to, endt - from);
1343 }
1344 }
1345}
1346#endif /* emacs */
1347
1348#ifndef memmove
0a27e8ed 1349extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
1350#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1351#endif
1352
1353#endif
1354
1355
1356#define min(A, B) ((A) < (B) ? (A) : (B))
1357
1358/* Debugging hook for realloc. */
0a27e8ed 1359__ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f
KH
1360
1361/* Resize the given region to the new size, returning a pointer
1362 to the (possibly moved) region. This is optimized for speed;
1363 some benchmarks seem to indicate that greater compactness is
1364 achieved by unconditionally allocating and copying to a
1365 new region. This module has incestuous knowledge of the
1366 internals of both free and malloc. */
1367__ptr_t
1368_realloc_internal (ptr, size)
1369 __ptr_t ptr;
1370 __malloc_size_t size;
1371{
1372 __ptr_t result;
1373 int type;
1374 __malloc_size_t block, blocks, oldlimit;
1375
1376 if (size == 0)
1377 {
1378 _free_internal (ptr);
1379 return _malloc_internal (0);
1380 }
1381 else if (ptr == NULL)
1382 return _malloc_internal (size);
1383
1384 block = BLOCK (ptr);
1385
5dcab13e 1386 PROTECT_MALLOC_STATE (0);
177c0ea7 1387
74ad5c7f
KH
1388 type = _heapinfo[block].busy.type;
1389 switch (type)
1390 {
1391 case 0:
1392 /* Maybe reallocate a large block to a small fragment. */
1393 if (size <= BLOCKSIZE / 2)
1394 {
1395 result = _malloc_internal (size);
1396 if (result != NULL)
1397 {
1398 memcpy (result, ptr, size);
1399 _free_internal (ptr);
1400 return result;
1401 }
1402 }
1403
1404 /* The new size is a large allocation as well;
1405 see if we can hold it in place. */
1406 blocks = BLOCKIFY (size);
1407 if (blocks < _heapinfo[block].busy.info.size)
1408 {
1409 /* The new size is smaller; return
1410 excess memory to the free list. */
1411 _heapinfo[block + blocks].busy.type = 0;
1412 _heapinfo[block + blocks].busy.info.size
1413 = _heapinfo[block].busy.info.size - blocks;
1414 _heapinfo[block].busy.info.size = blocks;
1415 /* We have just created a new chunk by splitting a chunk in two.
1416 Now we will free this chunk; increment the statistics counter
1417 so it doesn't become wrong when _free_internal decrements it. */
1418 ++_chunks_used;
1419 _free_internal (ADDRESS (block + blocks));
1420 result = ptr;
1421 }
1422 else if (blocks == _heapinfo[block].busy.info.size)
1423 /* No size change necessary. */
1424 result = ptr;
1425 else
1426 {
1427 /* Won't fit, so allocate a new region that will.
1428 Free the old region first in case there is sufficient
1429 adjacent free space to grow without moving. */
1430 blocks = _heapinfo[block].busy.info.size;
1431 /* Prevent free from actually returning memory to the system. */
1432 oldlimit = _heaplimit;
1433 _heaplimit = 0;
1434 _free_internal (ptr);
1435 result = _malloc_internal (size);
5dcab13e 1436 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1437 if (_heaplimit == 0)
1438 _heaplimit = oldlimit;
1439 if (result == NULL)
1440 {
1441 /* Now we're really in trouble. We have to unfree
1442 the thing we just freed. Unfortunately it might
1443 have been coalesced with its neighbors. */
1444 if (_heapindex == block)
1445 (void) _malloc_internal (blocks * BLOCKSIZE);
1446 else
1447 {
1448 __ptr_t previous
1449 = _malloc_internal ((block - _heapindex) * BLOCKSIZE);
1450 (void) _malloc_internal (blocks * BLOCKSIZE);
1451 _free_internal (previous);
1452 }
1453 return NULL;
1454 }
1455 if (ptr != result)
1456 memmove (result, ptr, blocks * BLOCKSIZE);
1457 }
1458 break;
1459
1460 default:
1461 /* Old size is a fragment; type is logarithm
1462 to base two of the fragment size. */
1463 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1464 size <= (__malloc_size_t) (1 << type))
1465 /* The new size is the same kind of fragment. */
1466 result = ptr;
1467 else
1468 {
1469 /* The new size is different; allocate a new space,
1470 and copy the lesser of the new size and the old. */
1471 result = _malloc_internal (size);
1472 if (result == NULL)
1473 return NULL;
1474 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1475 _free_internal (ptr);
1476 }
1477 break;
1478 }
1479
5dcab13e 1480 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
1481 return result;
1482}
1483
1484__ptr_t
1485realloc (ptr, size)
1486 __ptr_t ptr;
1487 __malloc_size_t size;
1488{
1489 if (!__malloc_initialized && !__malloc_initialize ())
1490 return NULL;
1491
1492 return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal)
1493 (ptr, size);
1494}
1495/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1496
1497This library is free software; you can redistribute it and/or
1498modify it under the terms of the GNU Library General Public License as
1499published by the Free Software Foundation; either version 2 of the
1500License, or (at your option) any later version.
1501
1502This library is distributed in the hope that it will be useful,
1503but WITHOUT ANY WARRANTY; without even the implied warranty of
1504MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1505Library General Public License for more details.
1506
1507You should have received a copy of the GNU Library General Public
1508License along with this library; see the file COPYING.LIB. If
1509not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1510Cambridge, MA 02139, USA.
1511
1512 The author may be reached (Email) at the address mike@ai.mit.edu,
1513 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1514
1515#ifndef _MALLOC_INTERNAL
1516#define _MALLOC_INTERNAL
1517#include <malloc.h>
1518#endif
1519
1520/* Allocate an array of NMEMB elements each SIZE bytes long.
1521 The entire array is initialized to zeros. */
1522__ptr_t
1523calloc (nmemb, size)
1524 register __malloc_size_t nmemb;
1525 register __malloc_size_t size;
1526{
1527 register __ptr_t result = malloc (nmemb * size);
1528
1529 if (result != NULL)
1530 (void) memset (result, 0, nmemb * size);
1531
1532 return result;
1533}
1534/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1535This file is part of the GNU C Library.
1536
1537The GNU C Library is free software; you can redistribute it and/or modify
1538it under the terms of the GNU General Public License as published by
1539the Free Software Foundation; either version 2, or (at your option)
1540any later version.
1541
1542The GNU C Library is distributed in the hope that it will be useful,
1543but WITHOUT ANY WARRANTY; without even the implied warranty of
1544MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1545GNU General Public License for more details.
1546
1547You should have received a copy of the GNU General Public License
1548along with the GNU C Library; see the file COPYING. If not, write to
1549the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
1550
1551#ifndef _MALLOC_INTERNAL
1552#define _MALLOC_INTERNAL
1553#include <malloc.h>
1554#endif
1555
1556#ifndef __GNU_LIBRARY__
1557#define __sbrk sbrk
1558#endif
1559
1560#ifdef __GNU_LIBRARY__
1561/* It is best not to declare this and cast its result on foreign operating
1562 systems with potentially hostile include files. */
1563
1564#include <stddef.h>
0a27e8ed 1565extern __ptr_t __sbrk PP ((ptrdiff_t increment));
74ad5c7f
KH
1566#endif
1567
1568#ifndef NULL
1569#define NULL 0
1570#endif
1571
1572/* Allocate INCREMENT more bytes of data space,
1573 and return the start of data space, or NULL on errors.
1574 If INCREMENT is negative, shrink data space. */
1575__ptr_t
1576__default_morecore (increment)
1577 __malloc_ptrdiff_t increment;
1578{
ef6d1039
SM
1579 __ptr_t result;
1580#if defined(CYGWIN)
1581 if (!bss_sbrk_did_unexec)
1582 {
1583 return bss_sbrk (increment);
1584 }
1585#endif
1586 result = (__ptr_t) __sbrk (increment);
74ad5c7f
KH
1587 if (result == (__ptr_t) -1)
1588 return NULL;
1589 return result;
1590}
1591/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1592
1593This library is free software; you can redistribute it and/or
1594modify it under the terms of the GNU Library General Public License as
1595published by the Free Software Foundation; either version 2 of the
1596License, or (at your option) any later version.
1597
1598This library is distributed in the hope that it will be useful,
1599but WITHOUT ANY WARRANTY; without even the implied warranty of
1600MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1601Library General Public License for more details.
1602
1603You should have received a copy of the GNU Library General Public
1604License along with this library; see the file COPYING.LIB. If
1605not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1606Cambridge, MA 02139, USA. */
1607
1608#ifndef _MALLOC_INTERNAL
1609#define _MALLOC_INTERNAL
1610#include <malloc.h>
1611#endif
1612
1613#if __DJGPP__ - 0 == 1
1614
1615/* There is some problem with memalign in DJGPP v1 and we are supposed
1616 to omit it. Noone told me why, they just told me to do it. */
1617
1618#else
1619
eec2d1de
EZ
1620__ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1621 __malloc_size_t __alignment));
74ad5c7f
KH
1622
1623__ptr_t
1624memalign (alignment, size)
1625 __malloc_size_t alignment;
1626 __malloc_size_t size;
1627{
1628 __ptr_t result;
1629 unsigned long int adj, lastadj;
1630
1631 if (__memalign_hook)
1632 return (*__memalign_hook) (alignment, size);
1633
1634 /* Allocate a block with enough extra space to pad the block with up to
1635 (ALIGNMENT - 1) bytes if necessary. */
1636 result = malloc (size + alignment - 1);
1637 if (result == NULL)
1638 return NULL;
1639
1640 /* Figure out how much we will need to pad this particular block
1641 to achieve the required alignment. */
1642 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1643
1644 do
1645 {
1646 /* Reallocate the block with only as much excess as it needs. */
1647 free (result);
1648 result = malloc (adj + size);
1649 if (result == NULL) /* Impossible unless interrupted. */
1650 return NULL;
1651
1652 lastadj = adj;
1653 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1654 /* It's conceivable we might have been so unlucky as to get a
1655 different block with weaker alignment. If so, this block is too
1656 short to contain SIZE after alignment correction. So we must
1657 try again and get another block, slightly larger. */
1658 } while (adj > lastadj);
1659
1660 if (adj != 0)
1661 {
1662 /* Record this block in the list of aligned blocks, so that `free'
1663 can identify the pointer it is passed, which will be in the middle
1664 of an allocated block. */
1665
1666 struct alignlist *l;
1667 for (l = _aligned_blocks; l != NULL; l = l->next)
1668 if (l->aligned == NULL)
1669 /* This slot is free. Use it. */
1670 break;
1671 if (l == NULL)
1672 {
1673 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1674 if (l == NULL)
1675 {
1676 free (result);
1677 return NULL;
1678 }
1679 l->next = _aligned_blocks;
1680 _aligned_blocks = l;
1681 }
1682 l->exact = result;
1683 result = l->aligned = (char *) result + alignment - adj;
1684 }
1685
1686 return result;
1687}
1688
1689#endif /* Not DJGPP v1 */
1690/* Allocate memory on a page boundary.
1691 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1692
1693This library is free software; you can redistribute it and/or
1694modify it under the terms of the GNU Library General Public License as
1695published by the Free Software Foundation; either version 2 of the
1696License, or (at your option) any later version.
1697
1698This library is distributed in the hope that it will be useful,
1699but WITHOUT ANY WARRANTY; without even the implied warranty of
1700MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1701Library General Public License for more details.
1702
1703You should have received a copy of the GNU Library General Public
1704License along with this library; see the file COPYING.LIB. If
1705not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1706Cambridge, MA 02139, USA.
1707
1708 The author may be reached (Email) at the address mike@ai.mit.edu,
1709 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1710
1711#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1712
1713/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1714 on MSDOS, where it conflicts with a system header file. */
1715
1716#define ELIDE_VALLOC
1717
1718#endif
1719
1720#ifndef ELIDE_VALLOC
1721
1722#if defined (__GNU_LIBRARY__) || defined (_LIBC)
1723#include <stddef.h>
1724#include <sys/cdefs.h>
47582ab3
KH
1725#if defined (__GLIBC__) && __GLIBC__ >= 2
1726/* __getpagesize is already declared in <unistd.h> with return type int */
1727#else
0a27e8ed 1728extern size_t __getpagesize PP ((void));
47582ab3 1729#endif
74ad5c7f
KH
1730#else
1731#include "getpagesize.h"
1732#define __getpagesize() getpagesize()
1733#endif
1734
1735#ifndef _MALLOC_INTERNAL
1736#define _MALLOC_INTERNAL
1737#include <malloc.h>
1738#endif
1739
1740static __malloc_size_t pagesize;
1741
1742__ptr_t
1743valloc (size)
1744 __malloc_size_t size;
1745{
1746 if (pagesize == 0)
1747 pagesize = __getpagesize ();
1748
1749 return memalign (pagesize, size);
1750}
1751
1752#endif /* Not ELIDE_VALLOC. */
a3ba27da
GM
1753
1754#ifdef GC_MCHECK
1755
1756/* Standard debugging hooks for `malloc'.
1757 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1758 Written May 1989 by Mike Haertel.
1759
1760This library is free software; you can redistribute it and/or
1761modify it under the terms of the GNU Library General Public License as
1762published by the Free Software Foundation; either version 2 of the
1763License, or (at your option) any later version.
1764
1765This library is distributed in the hope that it will be useful,
1766but WITHOUT ANY WARRANTY; without even the implied warranty of
1767MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1768Library General Public License for more details.
1769
1770You should have received a copy of the GNU Library General Public
1771License along with this library; see the file COPYING.LIB. If
1772not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1773Cambridge, MA 02139, USA.
1774
1775 The author may be reached (Email) at the address mike@ai.mit.edu,
1776 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1777
1778#ifdef emacs
1779#include <stdio.h>
1780#else
1781#ifndef _MALLOC_INTERNAL
1782#define _MALLOC_INTERNAL
1783#include <malloc.h>
1784#include <stdio.h>
1785#endif
1786#endif
1787
1788/* Old hook values. */
1789static void (*old_free_hook) __P ((__ptr_t ptr));
1790static __ptr_t (*old_malloc_hook) __P ((__malloc_size_t size));
1791static __ptr_t (*old_realloc_hook) __P ((__ptr_t ptr, __malloc_size_t size));
1792
1793/* Function to call when something awful happens. */
1794static void (*abortfunc) __P ((enum mcheck_status));
1795
1796/* Arbitrary magical numbers. */
1797#define MAGICWORD 0xfedabeeb
1798#define MAGICFREE 0xd8675309
1799#define MAGICBYTE ((char) 0xd7)
1800#define MALLOCFLOOD ((char) 0x93)
1801#define FREEFLOOD ((char) 0x95)
1802
1803struct hdr
1804 {
1805 __malloc_size_t size; /* Exact size requested by user. */
1806 unsigned long int magic; /* Magic number to check header integrity. */
1807 };
1808
1809#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1810#define flood memset
1811#else
1812static void flood __P ((__ptr_t, int, __malloc_size_t));
1813static void
1814flood (ptr, val, size)
1815 __ptr_t ptr;
1816 int val;
1817 __malloc_size_t size;
1818{
1819 char *cp = ptr;
1820 while (size--)
1821 *cp++ = val;
1822}
1823#endif
1824
1825static enum mcheck_status checkhdr __P ((const struct hdr *));
1826static enum mcheck_status
1827checkhdr (hdr)
1828 const struct hdr *hdr;
1829{
1830 enum mcheck_status status;
1831 switch (hdr->magic)
1832 {
1833 default:
1834 status = MCHECK_HEAD;
1835 break;
1836 case MAGICFREE:
1837 status = MCHECK_FREE;
1838 break;
1839 case MAGICWORD:
1840 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1841 status = MCHECK_TAIL;
1842 else
1843 status = MCHECK_OK;
1844 break;
1845 }
1846 if (status != MCHECK_OK)
1847 (*abortfunc) (status);
1848 return status;
1849}
1850
1851static void freehook __P ((__ptr_t));
1852static void
1853freehook (ptr)
1854 __ptr_t ptr;
1855{
1856 struct hdr *hdr;
177c0ea7 1857
a3ba27da
GM
1858 if (ptr)
1859 {
1860 hdr = ((struct hdr *) ptr) - 1;
1861 checkhdr (hdr);
1862 hdr->magic = MAGICFREE;
1863 flood (ptr, FREEFLOOD, hdr->size);
1864 }
1865 else
1866 hdr = NULL;
177c0ea7 1867
a3ba27da
GM
1868 __free_hook = old_free_hook;
1869 free (hdr);
1870 __free_hook = freehook;
1871}
1872
1873static __ptr_t mallochook __P ((__malloc_size_t));
1874static __ptr_t
1875mallochook (size)
1876 __malloc_size_t size;
1877{
1878 struct hdr *hdr;
1879
1880 __malloc_hook = old_malloc_hook;
1881 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
1882 __malloc_hook = mallochook;
1883 if (hdr == NULL)
1884 return NULL;
1885
1886 hdr->size = size;
1887 hdr->magic = MAGICWORD;
1888 ((char *) &hdr[1])[size] = MAGICBYTE;
1889 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
1890 return (__ptr_t) (hdr + 1);
1891}
1892
1893static __ptr_t reallochook __P ((__ptr_t, __malloc_size_t));
1894static __ptr_t
1895reallochook (ptr, size)
1896 __ptr_t ptr;
1897 __malloc_size_t size;
1898{
1899 struct hdr *hdr = NULL;
1900 __malloc_size_t osize = 0;
177c0ea7 1901
a3ba27da
GM
1902 if (ptr)
1903 {
1904 hdr = ((struct hdr *) ptr) - 1;
1905 osize = hdr->size;
1906
1907 checkhdr (hdr);
1908 if (size < osize)
1909 flood ((char *) ptr + size, FREEFLOOD, osize - size);
1910 }
177c0ea7 1911
a3ba27da
GM
1912 __free_hook = old_free_hook;
1913 __malloc_hook = old_malloc_hook;
1914 __realloc_hook = old_realloc_hook;
1915 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
1916 __free_hook = freehook;
1917 __malloc_hook = mallochook;
1918 __realloc_hook = reallochook;
1919 if (hdr == NULL)
1920 return NULL;
1921
1922 hdr->size = size;
1923 hdr->magic = MAGICWORD;
1924 ((char *) &hdr[1])[size] = MAGICBYTE;
1925 if (size > osize)
1926 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1927 return (__ptr_t) (hdr + 1);
1928}
1929
1930static void
1931mabort (status)
1932 enum mcheck_status status;
1933{
1934 const char *msg;
1935 switch (status)
1936 {
1937 case MCHECK_OK:
1938 msg = "memory is consistent, library is buggy";
1939 break;
1940 case MCHECK_HEAD:
1941 msg = "memory clobbered before allocated block";
1942 break;
1943 case MCHECK_TAIL:
1944 msg = "memory clobbered past end of allocated block";
1945 break;
1946 case MCHECK_FREE:
1947 msg = "block freed twice";
1948 break;
1949 default:
1950 msg = "bogus mcheck_status, library is buggy";
1951 break;
1952 }
1953#ifdef __GNU_LIBRARY__
1954 __libc_fatal (msg);
1955#else
1956 fprintf (stderr, "mcheck: %s\n", msg);
1957 fflush (stderr);
1958 abort ();
1959#endif
1960}
1961
1962static int mcheck_used = 0;
1963
1964int
1965mcheck (func)
1966 void (*func) __P ((enum mcheck_status));
1967{
1968 abortfunc = (func != NULL) ? func : &mabort;
1969
1970 /* These hooks may not be safely inserted if malloc is already in use. */
1971 if (!__malloc_initialized && !mcheck_used)
1972 {
1973 old_free_hook = __free_hook;
1974 __free_hook = freehook;
1975 old_malloc_hook = __malloc_hook;
1976 __malloc_hook = mallochook;
1977 old_realloc_hook = __realloc_hook;
1978 __realloc_hook = reallochook;
1979 mcheck_used = 1;
1980 }
1981
1982 return mcheck_used ? 0 : -1;
1983}
1984
1985enum mcheck_status
1986mprobe (__ptr_t ptr)
1987{
1988 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1989}
1990
1991#endif /* GC_MCHECK */
ab5796a9
MB
1992
1993/* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
1994 (do not change this comment) */