2000-09-24 Simon Josefsson <simon@josefsson.org>
[bpt/emacs.git] / src / gmalloc.c
CommitLineData
74ad5c7f
KH
1/* This file is no longer automatically generated from libc. */
2
3#define _MALLOC_INTERNAL
4
5/* The malloc headers and source files from the C library follow here. */
6
7/* Declarations for `malloc' and friends.
68c45bf0 8 Copyright 1990, 91, 92, 93, 95, 96, 99 Free Software Foundation, Inc.
74ad5c7f
KH
9 Written May 1989 by Mike Haertel.
10
11This library is free software; you can redistribute it and/or
12modify it under the terms of the GNU Library General Public License as
13published by the Free Software Foundation; either version 2 of the
14License, or (at your option) any later version.
15
16This library is distributed in the hope that it will be useful,
17but WITHOUT ANY WARRANTY; without even the implied warranty of
18MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19Library General Public License for more details.
20
21You should have received a copy of the GNU Library General Public
22License along with this library; see the file COPYING.LIB. If
23not, write to the Free Software Foundation, Inc., 675 Mass Ave,
24Cambridge, MA 02139, USA.
25
26 The author may be reached (Email) at the address mike@ai.mit.edu,
27 or (US mail) as Mike Haertel c/o Free Software Foundation. */
28
29#ifndef _MALLOC_H
30
31#define _MALLOC_H 1
32
33#ifdef _MALLOC_INTERNAL
34
35#ifdef HAVE_CONFIG_H
36#include <config.h>
37#endif
38
9e67b01b
DL
39#if defined __cplusplus || (defined (__STDC__) && __STDC__) || \
40 defined STDC_HEADERS || defined PROTOTYPES
0a27e8ed
RS
41#undef PP
42#define PP(args) args
74ad5c7f
KH
43#undef __ptr_t
44#define __ptr_t void *
45#else /* Not C++ or ANSI C. */
0a27e8ed
RS
46#undef PP
47#define PP(args) ()
74ad5c7f
KH
48#undef __ptr_t
49#define __ptr_t char *
50#endif /* C++ or ANSI C. */
51
52#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
53#include <string.h>
54#else
55#ifndef memset
56#define memset(s, zero, n) bzero ((s), (n))
57#endif
58#ifndef memcpy
59#define memcpy(d, s, n) bcopy ((s), (d), (n))
60#endif
61#endif
62
ca9c0567 63#ifdef HAVE_LIMITS_H
74ad5c7f 64#include <limits.h>
ca9c0567 65#endif
74ad5c7f
KH
66#ifndef CHAR_BIT
67#define CHAR_BIT 8
68#endif
74ad5c7f
KH
69
70#ifdef HAVE_UNISTD_H
71#include <unistd.h>
72#endif
73
74#endif /* _MALLOC_INTERNAL. */
75
76
77#ifdef __cplusplus
78extern "C"
79{
80#endif
81
ca9c0567 82#ifdef STDC_HEADERS
74ad5c7f
KH
83#include <stddef.h>
84#define __malloc_size_t size_t
85#define __malloc_ptrdiff_t ptrdiff_t
86#else
87#define __malloc_size_t unsigned int
88#define __malloc_ptrdiff_t int
89#endif
90
91#ifndef NULL
92#define NULL 0
93#endif
94
ca9c0567
PE
95#ifndef FREE_RETURN_TYPE
96#define FREE_RETURN_TYPE void
97#endif
98
74ad5c7f
KH
99
100/* Allocate SIZE bytes of memory. */
0a27e8ed 101extern __ptr_t malloc PP ((__malloc_size_t __size));
74ad5c7f
KH
102/* Re-allocate the previously allocated block
103 in __ptr_t, making the new block SIZE bytes long. */
0a27e8ed 104extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f 105/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
0a27e8ed 106extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
74ad5c7f 107/* Free a block allocated by `malloc', `realloc' or `calloc'. */
ca9c0567 108extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr));
74ad5c7f
KH
109
110/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
111#if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
0a27e8ed
RS
112extern __ptr_t memalign PP ((__malloc_size_t __alignment,
113 __malloc_size_t __size));
74ad5c7f
KH
114#endif
115
116/* Allocate SIZE bytes on a page boundary. */
117#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
0a27e8ed 118extern __ptr_t valloc PP ((__malloc_size_t __size));
74ad5c7f
KH
119#endif
120
121
122#ifdef _MALLOC_INTERNAL
123
124/* The allocator divides the heap into blocks of fixed size; large
125 requests receive one or more whole blocks, and small requests
126 receive a fragment of a block. Fragment sizes are powers of two,
127 and all fragments of a block are the same size. When all the
128 fragments in a block have been freed, the block itself is freed. */
129#define INT_BIT (CHAR_BIT * sizeof(int))
130#define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
131#define BLOCKSIZE (1 << BLOCKLOG)
132#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
133
134/* Determine the amount of memory spanned by the initial heap table
135 (not an absolute limit). */
136#define HEAP (INT_BIT > 16 ? 4194304 : 65536)
137
138/* Number of contiguous free blocks allowed to build up at the end of
139 memory before they will be returned to the system. */
140#define FINAL_FREE_BLOCKS 8
141
142/* Data structure giving per-block information. */
143typedef union
144 {
145 /* Heap information for a busy block. */
146 struct
147 {
148 /* Zero for a large (multiblock) object, or positive giving the
149 logarithm to the base two of the fragment size. */
150 int type;
151 union
152 {
153 struct
154 {
155 __malloc_size_t nfree; /* Free frags in a fragmented block. */
156 __malloc_size_t first; /* First free fragment of the block. */
157 } frag;
158 /* For a large object, in its first block, this has the number
159 of blocks in the object. In the other blocks, this has a
160 negative number which says how far back the first block is. */
161 __malloc_ptrdiff_t size;
162 } info;
163 } busy;
164 /* Heap information for a free block
165 (that may be the first of a free cluster). */
166 struct
167 {
168 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
169 __malloc_size_t next; /* Index of next free cluster. */
170 __malloc_size_t prev; /* Index of previous free cluster. */
171 } free;
172 } malloc_info;
173
174/* Pointer to first block of the heap. */
175extern char *_heapbase;
176
177/* Table indexed by block number giving per-block information. */
178extern malloc_info *_heapinfo;
179
180/* Address to block number and vice versa. */
181#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
182#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
183
184/* Current search index for the heap table. */
185extern __malloc_size_t _heapindex;
186
187/* Limit of valid info table indices. */
188extern __malloc_size_t _heaplimit;
189
190/* Doubly linked lists of free fragments. */
191struct list
192 {
193 struct list *next;
194 struct list *prev;
195 };
196
197/* Free list headers for each fragment size. */
198extern struct list _fraghead[];
199
200/* List of blocks allocated with `memalign' (or `valloc'). */
201struct alignlist
202 {
203 struct alignlist *next;
204 __ptr_t aligned; /* The address that memaligned returned. */
205 __ptr_t exact; /* The address that malloc returned. */
206 };
207extern struct alignlist *_aligned_blocks;
208
209/* Instrumentation. */
210extern __malloc_size_t _chunks_used;
211extern __malloc_size_t _bytes_used;
212extern __malloc_size_t _chunks_free;
213extern __malloc_size_t _bytes_free;
214
215/* Internal versions of `malloc', `realloc', and `free'
216 used when these functions need to call each other.
217 They are the same but don't call the hooks. */
0a27e8ed
RS
218extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
219extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
220extern void _free_internal PP ((__ptr_t __ptr));
74ad5c7f
KH
221
222#endif /* _MALLOC_INTERNAL. */
223
224/* Given an address in the middle of a malloc'd object,
225 return the address of the beginning of the object. */
0a27e8ed 226extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
74ad5c7f
KH
227
228/* Underlying allocation function; successive calls should
229 return contiguous pieces of memory. */
0a27e8ed 230extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
231
232/* Default value of `__morecore'. */
0a27e8ed 233extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
74ad5c7f
KH
234
235/* If not NULL, this function is called after each time
236 `__morecore' is called to increase the data size. */
0a27e8ed 237extern void (*__after_morecore_hook) PP ((void));
74ad5c7f
KH
238
239/* Number of extra blocks to get each time we ask for more core.
240 This reduces the frequency of calling `(*__morecore)'. */
241extern __malloc_size_t __malloc_extra_blocks;
242
243/* Nonzero if `malloc' has been called and done its initialization. */
244extern int __malloc_initialized;
245/* Function called to initialize malloc data structures. */
0a27e8ed 246extern int __malloc_initialize PP ((void));
74ad5c7f
KH
247
248/* Hooks for debugging versions. */
0a27e8ed
RS
249extern void (*__malloc_initialize_hook) PP ((void));
250extern void (*__free_hook) PP ((__ptr_t __ptr));
251extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
252extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
253extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
254 __malloc_size_t __alignment));
74ad5c7f
KH
255
256/* Return values for `mprobe': these are the kinds of inconsistencies that
257 `mcheck' enables detection of. */
258enum mcheck_status
259 {
260 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
261 MCHECK_OK, /* Block is fine. */
262 MCHECK_FREE, /* Block freed twice. */
263 MCHECK_HEAD, /* Memory before the block was clobbered. */
264 MCHECK_TAIL /* Memory after the block was clobbered. */
265 };
266
267/* Activate a standard collection of debugging hooks. This must be called
268 before `malloc' is ever called. ABORTFUNC is called with an error code
269 (see enum above) when an inconsistency is detected. If ABORTFUNC is
270 null, the standard function prints on stderr and then calls `abort'. */
0a27e8ed 271extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
74ad5c7f
KH
272
273/* Check for aberrations in a particular malloc'd block. You must have
274 called `mcheck' already. These are the same checks that `mcheck' does
275 when you free or reallocate a block. */
0a27e8ed 276extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
74ad5c7f
KH
277
278/* Activate a standard collection of tracing hooks. */
0a27e8ed
RS
279extern void mtrace PP ((void));
280extern void muntrace PP ((void));
74ad5c7f
KH
281
282/* Statistics available to the user. */
283struct mstats
284 {
285 __malloc_size_t bytes_total; /* Total size of the heap. */
286 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
287 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
288 __malloc_size_t chunks_free; /* Chunks in the free list. */
289 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
290 };
291
292/* Pick up the current statistics. */
0a27e8ed 293extern struct mstats mstats PP ((void));
74ad5c7f
KH
294
295/* Call WARNFUN with a warning message when memory usage is high. */
0a27e8ed
RS
296extern void memory_warnings PP ((__ptr_t __start,
297 void (*__warnfun) PP ((const char *))));
74ad5c7f
KH
298
299
300/* Relocating allocator. */
301
302/* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
0a27e8ed 303extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
304
305/* Free the storage allocated in HANDLEPTR. */
0a27e8ed 306extern void r_alloc_free PP ((__ptr_t *__handleptr));
74ad5c7f
KH
307
308/* Adjust the block at HANDLEPTR to be SIZE bytes long. */
0a27e8ed 309extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
74ad5c7f
KH
310
311
312#ifdef __cplusplus
313}
314#endif
315
316#endif /* malloc.h */
317/* Memory allocator `malloc'.
318 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
319 Written May 1989 by Mike Haertel.
320
321This library is free software; you can redistribute it and/or
322modify it under the terms of the GNU Library General Public License as
323published by the Free Software Foundation; either version 2 of the
324License, or (at your option) any later version.
325
326This library is distributed in the hope that it will be useful,
327but WITHOUT ANY WARRANTY; without even the implied warranty of
328MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
329Library General Public License for more details.
330
331You should have received a copy of the GNU Library General Public
332License along with this library; see the file COPYING.LIB. If
333not, write to the Free Software Foundation, Inc., 675 Mass Ave,
334Cambridge, MA 02139, USA.
335
336 The author may be reached (Email) at the address mike@ai.mit.edu,
337 or (US mail) as Mike Haertel c/o Free Software Foundation. */
338
339#ifndef _MALLOC_INTERNAL
340#define _MALLOC_INTERNAL
341#include <malloc.h>
342#endif
343#include <errno.h>
344
345/* How to really get more memory. */
0a27e8ed 346__ptr_t (*__morecore) PP ((ptrdiff_t __size)) = __default_morecore;
74ad5c7f
KH
347
348/* Debugging hook for `malloc'. */
0a27e8ed 349__ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
74ad5c7f
KH
350
351/* Pointer to the base of the first block. */
352char *_heapbase;
353
354/* Block information table. Allocated with align/__free (not malloc/free). */
355malloc_info *_heapinfo;
356
357/* Number of info entries. */
358static __malloc_size_t heapsize;
359
360/* Search index in the info table. */
361__malloc_size_t _heapindex;
362
363/* Limit of valid info table indices. */
364__malloc_size_t _heaplimit;
365
366/* Free lists for each fragment size. */
367struct list _fraghead[BLOCKLOG];
368
369/* Instrumentation. */
370__malloc_size_t _chunks_used;
371__malloc_size_t _bytes_used;
372__malloc_size_t _chunks_free;
373__malloc_size_t _bytes_free;
374
375/* Are you experienced? */
376int __malloc_initialized;
377
378__malloc_size_t __malloc_extra_blocks;
379
0a27e8ed
RS
380void (*__malloc_initialize_hook) PP ((void));
381void (*__after_morecore_hook) PP ((void));
74ad5c7f 382
5dcab13e
GM
383#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
384
385/* Some code for hunting a bug writing into _heapinfo.
386
387 Call this macro with argument PROT non-zero to protect internal
388 malloc state against writing to it, call it with a zero argument to
389 make it readable and writable.
390
391 Note that this only works if BLOCKSIZE == page size, which is
392 the case on the i386. */
393
394#include <sys/types.h>
395#include <sys/mman.h>
396
397static int state_protected_p;
398static __malloc_size_t last_state_size;
399static malloc_info *last_heapinfo;
400
401void
402protect_malloc_state (protect_p)
403 int protect_p;
404{
405 /* If _heapinfo has been relocated, make sure its old location
406 isn't left read-only; it will be reused by malloc. */
407 if (_heapinfo != last_heapinfo
408 && last_heapinfo
409 && state_protected_p)
410 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
411
412 last_state_size = _heaplimit * sizeof *_heapinfo;
413 last_heapinfo = _heapinfo;
414
415 if (protect_p != state_protected_p)
416 {
417 state_protected_p = protect_p;
418 if (mprotect (_heapinfo, last_state_size,
419 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
420 abort ();
421 }
422}
423
424#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
425
426#else
427#define PROTECT_MALLOC_STATE(PROT) /* empty */
428#endif
429
74ad5c7f
KH
430
431/* Aligned allocation. */
0a27e8ed 432static __ptr_t align PP ((__malloc_size_t));
74ad5c7f
KH
433static __ptr_t
434align (size)
435 __malloc_size_t size;
436{
437 __ptr_t result;
438 unsigned long int adj;
439
440 result = (*__morecore) (size);
441 adj = (unsigned long int) ((unsigned long int) ((char *) result -
442 (char *) NULL)) % BLOCKSIZE;
443 if (adj != 0)
444 {
445 __ptr_t new;
446 adj = BLOCKSIZE - adj;
447 new = (*__morecore) (adj);
448 result = (char *) result + adj;
449 }
450
451 if (__after_morecore_hook)
452 (*__after_morecore_hook) ();
453
454 return result;
455}
456
457/* Get SIZE bytes, if we can get them starting at END.
458 Return the address of the space we got.
459 If we cannot get space at END, fail and return 0. */
0a27e8ed 460static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
74ad5c7f
KH
461static __ptr_t
462get_contiguous_space (size, position)
463 __malloc_ptrdiff_t size;
464 __ptr_t position;
465{
466 __ptr_t before;
467 __ptr_t after;
468
469 before = (*__morecore) (0);
470 /* If we can tell in advance that the break is at the wrong place,
471 fail now. */
472 if (before != position)
473 return 0;
474
475 /* Allocate SIZE bytes and get the address of them. */
476 after = (*__morecore) (size);
477 if (!after)
478 return 0;
479
480 /* It was not contiguous--reject it. */
481 if (after != position)
482 {
483 (*__morecore) (- size);
484 return 0;
485 }
486
487 return after;
488}
489
490
491/* This is called when `_heapinfo' and `heapsize' have just
492 been set to describe a new info table. Set up the table
493 to describe itself and account for it in the statistics. */
0a27e8ed 494static void register_heapinfo PP ((void));
74ad5c7f
KH
495#ifdef __GNUC__
496__inline__
497#endif
498static void
499register_heapinfo ()
500{
501 __malloc_size_t block, blocks;
502
503 block = BLOCK (_heapinfo);
504 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
505
506 /* Account for the _heapinfo block itself in the statistics. */
507 _bytes_used += blocks * BLOCKSIZE;
508 ++_chunks_used;
509
510 /* Describe the heapinfo block itself in the heapinfo. */
511 _heapinfo[block].busy.type = 0;
512 _heapinfo[block].busy.info.size = blocks;
513 /* Leave back-pointers for malloc_find_address. */
514 while (--blocks > 0)
515 _heapinfo[block + blocks].busy.info.size = -blocks;
516}
517
518/* Set everything up and remember that we have. */
519int
520__malloc_initialize ()
521{
522 if (__malloc_initialized)
523 return 0;
524
525 if (__malloc_initialize_hook)
526 (*__malloc_initialize_hook) ();
527
528 heapsize = HEAP / BLOCKSIZE;
529 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
530 if (_heapinfo == NULL)
531 return 0;
532 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
533 _heapinfo[0].free.size = 0;
534 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
535 _heapindex = 0;
536 _heapbase = (char *) _heapinfo;
537 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
538
539 register_heapinfo ();
540
541 __malloc_initialized = 1;
5dcab13e 542 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
543 return 1;
544}
545
546static int morecore_recursing;
547
548/* Get neatly aligned memory, initializing or
549 growing the heap info table as necessary. */
0a27e8ed 550static __ptr_t morecore PP ((__malloc_size_t));
74ad5c7f
KH
551static __ptr_t
552morecore (size)
553 __malloc_size_t size;
554{
555 __ptr_t result;
556 malloc_info *newinfo, *oldinfo;
557 __malloc_size_t newsize;
558
559 if (morecore_recursing)
560 /* Avoid recursion. The caller will know how to handle a null return. */
561 return NULL;
562
563 result = align (size);
564 if (result == NULL)
565 return NULL;
566
5dcab13e
GM
567 PROTECT_MALLOC_STATE (0);
568
74ad5c7f
KH
569 /* Check if we need to grow the info table. */
570 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
571 {
572 /* Calculate the new _heapinfo table size. We do not account for the
573 added blocks in the table itself, as we hope to place them in
574 existing free space, which is already covered by part of the
575 existing table. */
576 newsize = heapsize;
577 do
578 newsize *= 2;
579 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
580
581 /* We must not reuse existing core for the new info table when called
582 from realloc in the case of growing a large block, because the
583 block being grown is momentarily marked as free. In this case
584 _heaplimit is zero so we know not to reuse space for internal
585 allocation. */
586 if (_heaplimit != 0)
587 {
588 /* First try to allocate the new info table in core we already
589 have, in the usual way using realloc. If realloc cannot
590 extend it in place or relocate it to existing sufficient core,
591 we will get called again, and the code above will notice the
592 `morecore_recursing' flag and return null. */
593 int save = errno; /* Don't want to clobber errno with ENOMEM. */
594 morecore_recursing = 1;
595 newinfo = (malloc_info *) _realloc_internal
596 (_heapinfo, newsize * sizeof (malloc_info));
597 morecore_recursing = 0;
598 if (newinfo == NULL)
599 errno = save;
600 else
601 {
602 /* We found some space in core, and realloc has put the old
603 table's blocks on the free list. Now zero the new part
604 of the table and install the new table location. */
605 memset (&newinfo[heapsize], 0,
606 (newsize - heapsize) * sizeof (malloc_info));
607 _heapinfo = newinfo;
608 heapsize = newsize;
609 goto got_heap;
610 }
611 }
612
613 /* Allocate new space for the malloc info table. */
614 while (1)
615 {
616 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
617
618 /* Did it fail? */
619 if (newinfo == NULL)
620 {
621 (*__morecore) (-size);
622 return NULL;
623 }
624
625 /* Is it big enough to record status for its own space?
626 If so, we win. */
627 if ((__malloc_size_t) BLOCK ((char *) newinfo
628 + newsize * sizeof (malloc_info))
629 < newsize)
630 break;
631
632 /* Must try again. First give back most of what we just got. */
633 (*__morecore) (- newsize * sizeof (malloc_info));
634 newsize *= 2;
635 }
636
637 /* Copy the old table to the beginning of the new,
638 and zero the rest of the new table. */
639 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
640 memset (&newinfo[heapsize], 0,
641 (newsize - heapsize) * sizeof (malloc_info));
642 oldinfo = _heapinfo;
643 _heapinfo = newinfo;
644 heapsize = newsize;
645
646 register_heapinfo ();
647
648 /* Reset _heaplimit so _free_internal never decides
649 it can relocate or resize the info table. */
650 _heaplimit = 0;
651 _free_internal (oldinfo);
5dcab13e 652 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
653
654 /* The new heap limit includes the new table just allocated. */
655 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
656 return result;
657 }
658
659 got_heap:
660 _heaplimit = BLOCK ((char *) result + size);
661 return result;
662}
663
664/* Allocate memory from the heap. */
665__ptr_t
666_malloc_internal (size)
667 __malloc_size_t size;
668{
669 __ptr_t result;
670 __malloc_size_t block, blocks, lastblocks, start;
671 register __malloc_size_t i;
672 struct list *next;
673
674 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
675 valid address you can realloc and free (though not dereference).
676
677 It turns out that some extant code (sunrpc, at least Ultrix's version)
678 expects `malloc (0)' to return non-NULL and breaks otherwise.
679 Be compatible. */
680
681#if 0
682 if (size == 0)
683 return NULL;
684#endif
685
5dcab13e
GM
686 PROTECT_MALLOC_STATE (0);
687
74ad5c7f
KH
688 if (size < sizeof (struct list))
689 size = sizeof (struct list);
690
691#ifdef SUNOS_LOCALTIME_BUG
692 if (size < 16)
693 size = 16;
694#endif
695
696 /* Determine the allocation policy based on the request size. */
697 if (size <= BLOCKSIZE / 2)
698 {
699 /* Small allocation to receive a fragment of a block.
700 Determine the logarithm to base two of the fragment size. */
701 register __malloc_size_t log = 1;
702 --size;
703 while ((size /= 2) != 0)
704 ++log;
705
706 /* Look in the fragment lists for a
707 free fragment of the desired size. */
708 next = _fraghead[log].next;
709 if (next != NULL)
710 {
711 /* There are free fragments of this size.
712 Pop a fragment out of the fragment list and return it.
713 Update the block's nfree and first counters. */
714 result = (__ptr_t) next;
715 next->prev->next = next->next;
716 if (next->next != NULL)
717 next->next->prev = next->prev;
718 block = BLOCK (result);
719 if (--_heapinfo[block].busy.info.frag.nfree != 0)
720 _heapinfo[block].busy.info.frag.first = (unsigned long int)
721 ((unsigned long int) ((char *) next->next - (char *) NULL)
722 % BLOCKSIZE) >> log;
723
724 /* Update the statistics. */
725 ++_chunks_used;
726 _bytes_used += 1 << log;
727 --_chunks_free;
728 _bytes_free -= 1 << log;
729 }
730 else
731 {
732 /* No free fragments of the desired size, so get a new block
733 and break it into fragments, returning the first. */
8094989b
GM
734#ifdef GC_MALLOC_CHECK
735 result = _malloc_internal (BLOCKSIZE);
5dcab13e 736 PROTECT_MALLOC_STATE (0);
8094989b 737#else
74ad5c7f 738 result = malloc (BLOCKSIZE);
8094989b 739#endif
74ad5c7f 740 if (result == NULL)
5dcab13e
GM
741 {
742 PROTECT_MALLOC_STATE (1);
743 return NULL;
744 }
74ad5c7f
KH
745
746 /* Link all fragments but the first into the free list. */
747 next = (struct list *) ((char *) result + (1 << log));
748 next->next = NULL;
749 next->prev = &_fraghead[log];
750 _fraghead[log].next = next;
751
752 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
753 {
754 next = (struct list *) ((char *) result + (i << log));
755 next->next = _fraghead[log].next;
756 next->prev = &_fraghead[log];
757 next->prev->next = next;
758 next->next->prev = next;
759 }
760
761 /* Initialize the nfree and first counters for this block. */
762 block = BLOCK (result);
763 _heapinfo[block].busy.type = log;
764 _heapinfo[block].busy.info.frag.nfree = i - 1;
765 _heapinfo[block].busy.info.frag.first = i - 1;
766
767 _chunks_free += (BLOCKSIZE >> log) - 1;
768 _bytes_free += BLOCKSIZE - (1 << log);
769 _bytes_used -= BLOCKSIZE - (1 << log);
770 }
771 }
772 else
773 {
774 /* Large allocation to receive one or more blocks.
775 Search the free list in a circle starting at the last place visited.
776 If we loop completely around without finding a large enough
777 space we will have to get more memory from the system. */
778 blocks = BLOCKIFY (size);
779 start = block = _heapindex;
780 while (_heapinfo[block].free.size < blocks)
781 {
782 block = _heapinfo[block].free.next;
783 if (block == start)
784 {
785 /* Need to get more from the system. Get a little extra. */
786 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
787 block = _heapinfo[0].free.prev;
788 lastblocks = _heapinfo[block].free.size;
789 /* Check to see if the new core will be contiguous with the
790 final free block; if so we don't need to get as much. */
791 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
792 /* We can't do this if we will have to make the heap info
793 table bigger to accomodate the new space. */
794 block + wantblocks <= heapsize &&
795 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
796 ADDRESS (block + lastblocks)))
797 {
798 /* We got it contiguously. Which block we are extending
799 (the `final free block' referred to above) might have
800 changed, if it got combined with a freed info table. */
801 block = _heapinfo[0].free.prev;
802 _heapinfo[block].free.size += (wantblocks - lastblocks);
803 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
804 _heaplimit += wantblocks - lastblocks;
805 continue;
806 }
807 result = morecore (wantblocks * BLOCKSIZE);
808 if (result == NULL)
809 return NULL;
810 block = BLOCK (result);
811 /* Put the new block at the end of the free list. */
812 _heapinfo[block].free.size = wantblocks;
813 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
814 _heapinfo[block].free.next = 0;
815 _heapinfo[0].free.prev = block;
816 _heapinfo[_heapinfo[block].free.prev].free.next = block;
817 ++_chunks_free;
818 /* Now loop to use some of that block for this allocation. */
819 }
820 }
821
822 /* At this point we have found a suitable free list entry.
823 Figure out how to remove what we need from the list. */
824 result = ADDRESS (block);
825 if (_heapinfo[block].free.size > blocks)
826 {
827 /* The block we found has a bit left over,
828 so relink the tail end back into the free list. */
829 _heapinfo[block + blocks].free.size
830 = _heapinfo[block].free.size - blocks;
831 _heapinfo[block + blocks].free.next
832 = _heapinfo[block].free.next;
833 _heapinfo[block + blocks].free.prev
834 = _heapinfo[block].free.prev;
835 _heapinfo[_heapinfo[block].free.prev].free.next
836 = _heapinfo[_heapinfo[block].free.next].free.prev
837 = _heapindex = block + blocks;
838 }
839 else
840 {
841 /* The block exactly matches our requirements,
842 so just remove it from the list. */
843 _heapinfo[_heapinfo[block].free.next].free.prev
844 = _heapinfo[block].free.prev;
845 _heapinfo[_heapinfo[block].free.prev].free.next
846 = _heapindex = _heapinfo[block].free.next;
847 --_chunks_free;
848 }
849
850 _heapinfo[block].busy.type = 0;
851 _heapinfo[block].busy.info.size = blocks;
852 ++_chunks_used;
853 _bytes_used += blocks * BLOCKSIZE;
854 _bytes_free -= blocks * BLOCKSIZE;
855
856 /* Mark all the blocks of the object just allocated except for the
857 first with a negative number so you can find the first block by
858 adding that adjustment. */
859 while (--blocks > 0)
860 _heapinfo[block + blocks].busy.info.size = -blocks;
861 }
862
5dcab13e 863 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
864 return result;
865}
866
867__ptr_t
868malloc (size)
869 __malloc_size_t size;
870{
871 if (!__malloc_initialized && !__malloc_initialize ())
872 return NULL;
873
874 return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size);
875}
876\f
877#ifndef _LIBC
878
879/* On some ANSI C systems, some libc functions call _malloc, _free
880 and _realloc. Make them use the GNU functions. */
881
882__ptr_t
883_malloc (size)
884 __malloc_size_t size;
885{
886 return malloc (size);
887}
888
889void
890_free (ptr)
891 __ptr_t ptr;
892{
893 free (ptr);
894}
895
896__ptr_t
897_realloc (ptr, size)
898 __ptr_t ptr;
899 __malloc_size_t size;
900{
901 return realloc (ptr, size);
902}
903
904#endif
905/* Free a block of memory allocated by `malloc'.
906 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
907 Written May 1989 by Mike Haertel.
908
909This library is free software; you can redistribute it and/or
910modify it under the terms of the GNU Library General Public License as
911published by the Free Software Foundation; either version 2 of the
912License, or (at your option) any later version.
913
914This library is distributed in the hope that it will be useful,
915but WITHOUT ANY WARRANTY; without even the implied warranty of
916MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
917Library General Public License for more details.
918
919You should have received a copy of the GNU Library General Public
920License along with this library; see the file COPYING.LIB. If
921not, write to the Free Software Foundation, Inc., 675 Mass Ave,
922Cambridge, MA 02139, USA.
923
924 The author may be reached (Email) at the address mike@ai.mit.edu,
925 or (US mail) as Mike Haertel c/o Free Software Foundation. */
926
927#ifndef _MALLOC_INTERNAL
928#define _MALLOC_INTERNAL
929#include <malloc.h>
930#endif
931
932
933/* Cope with systems lacking `memmove'. */
934#ifndef memmove
935#if (defined (MEMMOVE_MISSING) || \
936 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
937#ifdef emacs
938#undef __malloc_safe_bcopy
939#define __malloc_safe_bcopy safe_bcopy
940#endif
941/* This function is defined in realloc.c. */
0a27e8ed 942extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
943#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
944#endif
945#endif
946
947
948/* Debugging hook for free. */
0a27e8ed 949void (*__free_hook) PP ((__ptr_t __ptr));
74ad5c7f
KH
950
951/* List of blocks allocated by memalign. */
952struct alignlist *_aligned_blocks = NULL;
953
954/* Return memory to the heap.
955 Like `free' but don't call a __free_hook if there is one. */
956void
957_free_internal (ptr)
958 __ptr_t ptr;
959{
960 int type;
961 __malloc_size_t block, blocks;
962 register __malloc_size_t i;
963 struct list *prev, *next;
964 __ptr_t curbrk;
965 const __malloc_size_t lesscore_threshold
966 /* Threshold of free space at which we will return some to the system. */
967 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
968
969 register struct alignlist *l;
970
971 if (ptr == NULL)
972 return;
973
5dcab13e
GM
974 PROTECT_MALLOC_STATE (0);
975
74ad5c7f
KH
976 for (l = _aligned_blocks; l != NULL; l = l->next)
977 if (l->aligned == ptr)
978 {
979 l->aligned = NULL; /* Mark the slot in the list as free. */
980 ptr = l->exact;
981 break;
982 }
983
984 block = BLOCK (ptr);
985
986 type = _heapinfo[block].busy.type;
987 switch (type)
988 {
989 case 0:
990 /* Get as many statistics as early as we can. */
991 --_chunks_used;
992 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
993 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
994
995 /* Find the free cluster previous to this one in the free list.
996 Start searching at the last block referenced; this may benefit
997 programs with locality of allocation. */
998 i = _heapindex;
999 if (i > block)
1000 while (i > block)
1001 i = _heapinfo[i].free.prev;
1002 else
1003 {
1004 do
1005 i = _heapinfo[i].free.next;
1006 while (i > 0 && i < block);
1007 i = _heapinfo[i].free.prev;
1008 }
1009
1010 /* Determine how to link this block into the free list. */
1011 if (block == i + _heapinfo[i].free.size)
1012 {
1013 /* Coalesce this block with its predecessor. */
1014 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1015 block = i;
1016 }
1017 else
1018 {
1019 /* Really link this block back into the free list. */
1020 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1021 _heapinfo[block].free.next = _heapinfo[i].free.next;
1022 _heapinfo[block].free.prev = i;
1023 _heapinfo[i].free.next = block;
1024 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1025 ++_chunks_free;
1026 }
1027
1028 /* Now that the block is linked in, see if we can coalesce it
1029 with its successor (by deleting its successor from the list
1030 and adding in its size). */
1031 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1032 {
1033 _heapinfo[block].free.size
1034 += _heapinfo[_heapinfo[block].free.next].free.size;
1035 _heapinfo[block].free.next
1036 = _heapinfo[_heapinfo[block].free.next].free.next;
1037 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1038 --_chunks_free;
1039 }
1040
1041 /* How many trailing free blocks are there now? */
1042 blocks = _heapinfo[block].free.size;
1043
1044 /* Where is the current end of accessible core? */
1045 curbrk = (*__morecore) (0);
1046
1047 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1048 {
1049 /* The end of the malloc heap is at the end of accessible core.
1050 It's possible that moving _heapinfo will allow us to
1051 return some space to the system. */
1052
1053 __malloc_size_t info_block = BLOCK (_heapinfo);
1054 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1055 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1056 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1057 __malloc_size_t next_block = _heapinfo[block].free.next;
1058 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1059
1060 if (/* Win if this block being freed is last in core, the info table
1061 is just before it, the previous free block is just before the
1062 info table, and the two free blocks together form a useful
1063 amount to return to the system. */
1064 (block + blocks == _heaplimit &&
1065 info_block + info_blocks == block &&
1066 prev_block != 0 && prev_block + prev_blocks == info_block &&
1067 blocks + prev_blocks >= lesscore_threshold) ||
1068 /* Nope, not the case. We can also win if this block being
1069 freed is just before the info table, and the table extends
1070 to the end of core or is followed only by a free block,
1071 and the total free space is worth returning to the system. */
1072 (block + blocks == info_block &&
1073 ((info_block + info_blocks == _heaplimit &&
1074 blocks >= lesscore_threshold) ||
1075 (info_block + info_blocks == next_block &&
1076 next_block + next_blocks == _heaplimit &&
1077 blocks + next_blocks >= lesscore_threshold)))
1078 )
1079 {
1080 malloc_info *newinfo;
1081 __malloc_size_t oldlimit = _heaplimit;
1082
1083 /* Free the old info table, clearing _heaplimit to avoid
1084 recursion into this code. We don't want to return the
1085 table's blocks to the system before we have copied them to
1086 the new location. */
1087 _heaplimit = 0;
1088 _free_internal (_heapinfo);
1089 _heaplimit = oldlimit;
1090
1091 /* Tell malloc to search from the beginning of the heap for
1092 free blocks, so it doesn't reuse the ones just freed. */
1093 _heapindex = 0;
1094
1095 /* Allocate new space for the info table and move its data. */
1096 newinfo = (malloc_info *) _malloc_internal (info_blocks
1097 * BLOCKSIZE);
5dcab13e 1098 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1099 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1100 _heapinfo = newinfo;
1101
1102 /* We should now have coalesced the free block with the
1103 blocks freed from the old info table. Examine the entire
1104 trailing free block to decide below whether to return some
1105 to the system. */
1106 block = _heapinfo[0].free.prev;
1107 blocks = _heapinfo[block].free.size;
1108 }
1109
1110 /* Now see if we can return stuff to the system. */
1111 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1112 {
1113 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1114 _heaplimit -= blocks;
1115 (*__morecore) (-bytes);
1116 _heapinfo[_heapinfo[block].free.prev].free.next
1117 = _heapinfo[block].free.next;
1118 _heapinfo[_heapinfo[block].free.next].free.prev
1119 = _heapinfo[block].free.prev;
1120 block = _heapinfo[block].free.prev;
1121 --_chunks_free;
1122 _bytes_free -= bytes;
1123 }
1124 }
1125
1126 /* Set the next search to begin at this block. */
1127 _heapindex = block;
1128 break;
1129
1130 default:
1131 /* Do some of the statistics. */
1132 --_chunks_used;
1133 _bytes_used -= 1 << type;
1134 ++_chunks_free;
1135 _bytes_free += 1 << type;
1136
1137 /* Get the address of the first free fragment in this block. */
1138 prev = (struct list *) ((char *) ADDRESS (block) +
1139 (_heapinfo[block].busy.info.frag.first << type));
1140
1141 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1142 {
1143 /* If all fragments of this block are free, remove them
1144 from the fragment list and free the whole block. */
1145 next = prev;
1146 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1147 next = next->next;
1148 prev->prev->next = next;
1149 if (next != NULL)
1150 next->prev = prev->prev;
1151 _heapinfo[block].busy.type = 0;
1152 _heapinfo[block].busy.info.size = 1;
1153
1154 /* Keep the statistics accurate. */
1155 ++_chunks_used;
1156 _bytes_used += BLOCKSIZE;
1157 _chunks_free -= BLOCKSIZE >> type;
1158 _bytes_free -= BLOCKSIZE;
1159
8094989b
GM
1160#ifdef GC_MALLOC_CHECK
1161 _free_internal (ADDRESS (block));
1162#else
74ad5c7f 1163 free (ADDRESS (block));
8094989b 1164#endif
74ad5c7f
KH
1165 }
1166 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1167 {
1168 /* If some fragments of this block are free, link this
1169 fragment into the fragment list after the first free
1170 fragment of this block. */
1171 next = (struct list *) ptr;
1172 next->next = prev->next;
1173 next->prev = prev;
1174 prev->next = next;
1175 if (next->next != NULL)
1176 next->next->prev = next;
1177 ++_heapinfo[block].busy.info.frag.nfree;
1178 }
1179 else
1180 {
1181 /* No fragments of this block are free, so link this
1182 fragment into the fragment list and announce that
1183 it is the first free fragment of this block. */
1184 prev = (struct list *) ptr;
1185 _heapinfo[block].busy.info.frag.nfree = 1;
1186 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1187 ((unsigned long int) ((char *) ptr - (char *) NULL)
1188 % BLOCKSIZE >> type);
1189 prev->next = _fraghead[type].next;
1190 prev->prev = &_fraghead[type];
1191 prev->prev->next = prev;
1192 if (prev->next != NULL)
1193 prev->next->prev = prev;
1194 }
1195 break;
1196 }
5dcab13e
GM
1197
1198 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
1199}
1200
1201/* Return memory to the heap. */
ca9c0567
PE
1202
1203FREE_RETURN_TYPE
74ad5c7f
KH
1204free (ptr)
1205 __ptr_t ptr;
1206{
1207 if (__free_hook != NULL)
1208 (*__free_hook) (ptr);
1209 else
1210 _free_internal (ptr);
1211}
1212
1213/* Define the `cfree' alias for `free'. */
1214#ifdef weak_alias
1215weak_alias (free, cfree)
1216#else
1217void
1218cfree (ptr)
1219 __ptr_t ptr;
1220{
1221 free (ptr);
1222}
1223#endif
1224/* Change the size of a block allocated by `malloc'.
1225 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1226 Written May 1989 by Mike Haertel.
1227
1228This library is free software; you can redistribute it and/or
1229modify it under the terms of the GNU Library General Public License as
1230published by the Free Software Foundation; either version 2 of the
1231License, or (at your option) any later version.
1232
1233This library is distributed in the hope that it will be useful,
1234but WITHOUT ANY WARRANTY; without even the implied warranty of
1235MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1236Library General Public License for more details.
1237
1238You should have received a copy of the GNU Library General Public
1239License along with this library; see the file COPYING.LIB. If
1240not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1241Cambridge, MA 02139, USA.
1242
1243 The author may be reached (Email) at the address mike@ai.mit.edu,
1244 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1245
1246#ifndef _MALLOC_INTERNAL
1247#define _MALLOC_INTERNAL
1248#include <malloc.h>
1249#endif
1250
1251
1252
1253/* Cope with systems lacking `memmove'. */
1254#if (defined (MEMMOVE_MISSING) || \
1255 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1256
1257#ifdef emacs
1258#undef __malloc_safe_bcopy
1259#define __malloc_safe_bcopy safe_bcopy
1260#else
1261
1262/* Snarfed directly from Emacs src/dispnew.c:
1263 XXX Should use system bcopy if it handles overlap. */
1264
1265/* Like bcopy except never gets confused by overlap. */
1266
1267void
1268__malloc_safe_bcopy (afrom, ato, size)
1269 __ptr_t afrom;
1270 __ptr_t ato;
1271 __malloc_size_t size;
1272{
1273 char *from = afrom, *to = ato;
1274
1275 if (size <= 0 || from == to)
1276 return;
1277
1278 /* If the source and destination don't overlap, then bcopy can
1279 handle it. If they do overlap, but the destination is lower in
1280 memory than the source, we'll assume bcopy can handle that. */
1281 if (to < from || from + size <= to)
1282 bcopy (from, to, size);
1283
1284 /* Otherwise, we'll copy from the end. */
1285 else
1286 {
1287 register char *endf = from + size;
1288 register char *endt = to + size;
1289
1290 /* If TO - FROM is large, then we should break the copy into
1291 nonoverlapping chunks of TO - FROM bytes each. However, if
1292 TO - FROM is small, then the bcopy function call overhead
1293 makes this not worth it. The crossover point could be about
1294 anywhere. Since I don't think the obvious copy loop is too
1295 bad, I'm trying to err in its favor. */
1296 if (to - from < 64)
1297 {
1298 do
1299 *--endt = *--endf;
1300 while (endf != from);
1301 }
1302 else
1303 {
1304 for (;;)
1305 {
1306 endt -= (to - from);
1307 endf -= (to - from);
1308
1309 if (endt < to)
1310 break;
1311
1312 bcopy (endf, endt, to - from);
1313 }
1314
1315 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1316 little left over. The amount left over is
1317 (endt + (to - from)) - to, which is endt - from. */
1318 bcopy (from, to, endt - from);
1319 }
1320 }
1321}
1322#endif /* emacs */
1323
1324#ifndef memmove
0a27e8ed 1325extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
74ad5c7f
KH
1326#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1327#endif
1328
1329#endif
1330
1331
1332#define min(A, B) ((A) < (B) ? (A) : (B))
1333
1334/* Debugging hook for realloc. */
0a27e8ed 1335__ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
74ad5c7f
KH
1336
1337/* Resize the given region to the new size, returning a pointer
1338 to the (possibly moved) region. This is optimized for speed;
1339 some benchmarks seem to indicate that greater compactness is
1340 achieved by unconditionally allocating and copying to a
1341 new region. This module has incestuous knowledge of the
1342 internals of both free and malloc. */
1343__ptr_t
1344_realloc_internal (ptr, size)
1345 __ptr_t ptr;
1346 __malloc_size_t size;
1347{
1348 __ptr_t result;
1349 int type;
1350 __malloc_size_t block, blocks, oldlimit;
1351
1352 if (size == 0)
1353 {
1354 _free_internal (ptr);
1355 return _malloc_internal (0);
1356 }
1357 else if (ptr == NULL)
1358 return _malloc_internal (size);
1359
1360 block = BLOCK (ptr);
1361
5dcab13e
GM
1362 PROTECT_MALLOC_STATE (0);
1363
74ad5c7f
KH
1364 type = _heapinfo[block].busy.type;
1365 switch (type)
1366 {
1367 case 0:
1368 /* Maybe reallocate a large block to a small fragment. */
1369 if (size <= BLOCKSIZE / 2)
1370 {
1371 result = _malloc_internal (size);
1372 if (result != NULL)
1373 {
1374 memcpy (result, ptr, size);
1375 _free_internal (ptr);
1376 return result;
1377 }
1378 }
1379
1380 /* The new size is a large allocation as well;
1381 see if we can hold it in place. */
1382 blocks = BLOCKIFY (size);
1383 if (blocks < _heapinfo[block].busy.info.size)
1384 {
1385 /* The new size is smaller; return
1386 excess memory to the free list. */
1387 _heapinfo[block + blocks].busy.type = 0;
1388 _heapinfo[block + blocks].busy.info.size
1389 = _heapinfo[block].busy.info.size - blocks;
1390 _heapinfo[block].busy.info.size = blocks;
1391 /* We have just created a new chunk by splitting a chunk in two.
1392 Now we will free this chunk; increment the statistics counter
1393 so it doesn't become wrong when _free_internal decrements it. */
1394 ++_chunks_used;
1395 _free_internal (ADDRESS (block + blocks));
1396 result = ptr;
1397 }
1398 else if (blocks == _heapinfo[block].busy.info.size)
1399 /* No size change necessary. */
1400 result = ptr;
1401 else
1402 {
1403 /* Won't fit, so allocate a new region that will.
1404 Free the old region first in case there is sufficient
1405 adjacent free space to grow without moving. */
1406 blocks = _heapinfo[block].busy.info.size;
1407 /* Prevent free from actually returning memory to the system. */
1408 oldlimit = _heaplimit;
1409 _heaplimit = 0;
1410 _free_internal (ptr);
1411 result = _malloc_internal (size);
5dcab13e 1412 PROTECT_MALLOC_STATE (0);
74ad5c7f
KH
1413 if (_heaplimit == 0)
1414 _heaplimit = oldlimit;
1415 if (result == NULL)
1416 {
1417 /* Now we're really in trouble. We have to unfree
1418 the thing we just freed. Unfortunately it might
1419 have been coalesced with its neighbors. */
1420 if (_heapindex == block)
1421 (void) _malloc_internal (blocks * BLOCKSIZE);
1422 else
1423 {
1424 __ptr_t previous
1425 = _malloc_internal ((block - _heapindex) * BLOCKSIZE);
1426 (void) _malloc_internal (blocks * BLOCKSIZE);
1427 _free_internal (previous);
1428 }
1429 return NULL;
1430 }
1431 if (ptr != result)
1432 memmove (result, ptr, blocks * BLOCKSIZE);
1433 }
1434 break;
1435
1436 default:
1437 /* Old size is a fragment; type is logarithm
1438 to base two of the fragment size. */
1439 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1440 size <= (__malloc_size_t) (1 << type))
1441 /* The new size is the same kind of fragment. */
1442 result = ptr;
1443 else
1444 {
1445 /* The new size is different; allocate a new space,
1446 and copy the lesser of the new size and the old. */
1447 result = _malloc_internal (size);
1448 if (result == NULL)
1449 return NULL;
1450 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1451 _free_internal (ptr);
1452 }
1453 break;
1454 }
1455
5dcab13e 1456 PROTECT_MALLOC_STATE (1);
74ad5c7f
KH
1457 return result;
1458}
1459
1460__ptr_t
1461realloc (ptr, size)
1462 __ptr_t ptr;
1463 __malloc_size_t size;
1464{
1465 if (!__malloc_initialized && !__malloc_initialize ())
1466 return NULL;
1467
1468 return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal)
1469 (ptr, size);
1470}
1471/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1472
1473This library is free software; you can redistribute it and/or
1474modify it under the terms of the GNU Library General Public License as
1475published by the Free Software Foundation; either version 2 of the
1476License, or (at your option) any later version.
1477
1478This library is distributed in the hope that it will be useful,
1479but WITHOUT ANY WARRANTY; without even the implied warranty of
1480MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1481Library General Public License for more details.
1482
1483You should have received a copy of the GNU Library General Public
1484License along with this library; see the file COPYING.LIB. If
1485not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1486Cambridge, MA 02139, USA.
1487
1488 The author may be reached (Email) at the address mike@ai.mit.edu,
1489 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1490
1491#ifndef _MALLOC_INTERNAL
1492#define _MALLOC_INTERNAL
1493#include <malloc.h>
1494#endif
1495
1496/* Allocate an array of NMEMB elements each SIZE bytes long.
1497 The entire array is initialized to zeros. */
1498__ptr_t
1499calloc (nmemb, size)
1500 register __malloc_size_t nmemb;
1501 register __malloc_size_t size;
1502{
1503 register __ptr_t result = malloc (nmemb * size);
1504
1505 if (result != NULL)
1506 (void) memset (result, 0, nmemb * size);
1507
1508 return result;
1509}
1510/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1511This file is part of the GNU C Library.
1512
1513The GNU C Library is free software; you can redistribute it and/or modify
1514it under the terms of the GNU General Public License as published by
1515the Free Software Foundation; either version 2, or (at your option)
1516any later version.
1517
1518The GNU C Library is distributed in the hope that it will be useful,
1519but WITHOUT ANY WARRANTY; without even the implied warranty of
1520MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1521GNU General Public License for more details.
1522
1523You should have received a copy of the GNU General Public License
1524along with the GNU C Library; see the file COPYING. If not, write to
1525the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
1526
1527#ifndef _MALLOC_INTERNAL
1528#define _MALLOC_INTERNAL
1529#include <malloc.h>
1530#endif
1531
1532#ifndef __GNU_LIBRARY__
1533#define __sbrk sbrk
1534#endif
1535
1536#ifdef __GNU_LIBRARY__
1537/* It is best not to declare this and cast its result on foreign operating
1538 systems with potentially hostile include files. */
1539
1540#include <stddef.h>
0a27e8ed 1541extern __ptr_t __sbrk PP ((ptrdiff_t increment));
74ad5c7f
KH
1542#endif
1543
1544#ifndef NULL
1545#define NULL 0
1546#endif
1547
1548/* Allocate INCREMENT more bytes of data space,
1549 and return the start of data space, or NULL on errors.
1550 If INCREMENT is negative, shrink data space. */
1551__ptr_t
1552__default_morecore (increment)
1553 __malloc_ptrdiff_t increment;
1554{
1555 __ptr_t result = (__ptr_t) __sbrk (increment);
1556 if (result == (__ptr_t) -1)
1557 return NULL;
1558 return result;
1559}
1560/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1561
1562This library is free software; you can redistribute it and/or
1563modify it under the terms of the GNU Library General Public License as
1564published by the Free Software Foundation; either version 2 of the
1565License, or (at your option) any later version.
1566
1567This library is distributed in the hope that it will be useful,
1568but WITHOUT ANY WARRANTY; without even the implied warranty of
1569MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1570Library General Public License for more details.
1571
1572You should have received a copy of the GNU Library General Public
1573License along with this library; see the file COPYING.LIB. If
1574not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1575Cambridge, MA 02139, USA. */
1576
1577#ifndef _MALLOC_INTERNAL
1578#define _MALLOC_INTERNAL
1579#include <malloc.h>
1580#endif
1581
1582#if __DJGPP__ - 0 == 1
1583
1584/* There is some problem with memalign in DJGPP v1 and we are supposed
1585 to omit it. Noone told me why, they just told me to do it. */
1586
1587#else
1588
0a27e8ed 1589__ptr_t (*__memalign_hook) PP ((size_t __size, size_t __alignment));
74ad5c7f
KH
1590
1591__ptr_t
1592memalign (alignment, size)
1593 __malloc_size_t alignment;
1594 __malloc_size_t size;
1595{
1596 __ptr_t result;
1597 unsigned long int adj, lastadj;
1598
1599 if (__memalign_hook)
1600 return (*__memalign_hook) (alignment, size);
1601
1602 /* Allocate a block with enough extra space to pad the block with up to
1603 (ALIGNMENT - 1) bytes if necessary. */
1604 result = malloc (size + alignment - 1);
1605 if (result == NULL)
1606 return NULL;
1607
1608 /* Figure out how much we will need to pad this particular block
1609 to achieve the required alignment. */
1610 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1611
1612 do
1613 {
1614 /* Reallocate the block with only as much excess as it needs. */
1615 free (result);
1616 result = malloc (adj + size);
1617 if (result == NULL) /* Impossible unless interrupted. */
1618 return NULL;
1619
1620 lastadj = adj;
1621 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1622 /* It's conceivable we might have been so unlucky as to get a
1623 different block with weaker alignment. If so, this block is too
1624 short to contain SIZE after alignment correction. So we must
1625 try again and get another block, slightly larger. */
1626 } while (adj > lastadj);
1627
1628 if (adj != 0)
1629 {
1630 /* Record this block in the list of aligned blocks, so that `free'
1631 can identify the pointer it is passed, which will be in the middle
1632 of an allocated block. */
1633
1634 struct alignlist *l;
1635 for (l = _aligned_blocks; l != NULL; l = l->next)
1636 if (l->aligned == NULL)
1637 /* This slot is free. Use it. */
1638 break;
1639 if (l == NULL)
1640 {
1641 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1642 if (l == NULL)
1643 {
1644 free (result);
1645 return NULL;
1646 }
1647 l->next = _aligned_blocks;
1648 _aligned_blocks = l;
1649 }
1650 l->exact = result;
1651 result = l->aligned = (char *) result + alignment - adj;
1652 }
1653
1654 return result;
1655}
1656
1657#endif /* Not DJGPP v1 */
1658/* Allocate memory on a page boundary.
1659 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1660
1661This library is free software; you can redistribute it and/or
1662modify it under the terms of the GNU Library General Public License as
1663published by the Free Software Foundation; either version 2 of the
1664License, or (at your option) any later version.
1665
1666This library is distributed in the hope that it will be useful,
1667but WITHOUT ANY WARRANTY; without even the implied warranty of
1668MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1669Library General Public License for more details.
1670
1671You should have received a copy of the GNU Library General Public
1672License along with this library; see the file COPYING.LIB. If
1673not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1674Cambridge, MA 02139, USA.
1675
1676 The author may be reached (Email) at the address mike@ai.mit.edu,
1677 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1678
1679#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1680
1681/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1682 on MSDOS, where it conflicts with a system header file. */
1683
1684#define ELIDE_VALLOC
1685
1686#endif
1687
1688#ifndef ELIDE_VALLOC
1689
1690#if defined (__GNU_LIBRARY__) || defined (_LIBC)
1691#include <stddef.h>
1692#include <sys/cdefs.h>
47582ab3
KH
1693#if defined (__GLIBC__) && __GLIBC__ >= 2
1694/* __getpagesize is already declared in <unistd.h> with return type int */
1695#else
0a27e8ed 1696extern size_t __getpagesize PP ((void));
47582ab3 1697#endif
74ad5c7f
KH
1698#else
1699#include "getpagesize.h"
1700#define __getpagesize() getpagesize()
1701#endif
1702
1703#ifndef _MALLOC_INTERNAL
1704#define _MALLOC_INTERNAL
1705#include <malloc.h>
1706#endif
1707
1708static __malloc_size_t pagesize;
1709
1710__ptr_t
1711valloc (size)
1712 __malloc_size_t size;
1713{
1714 if (pagesize == 0)
1715 pagesize = __getpagesize ();
1716
1717 return memalign (pagesize, size);
1718}
1719
1720#endif /* Not ELIDE_VALLOC. */