1 /* malloc.c - dynamic memory allocation for bash. */
3 /* Copyright (C) 1985-2005 Free Software Foundation, Inc.
5 This file is part of GNU Bash, the Bourne-Again SHell.
7 Bash is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 Bash is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with Bash. If not, see <http://www.gnu.org/licenses/>.
22 * @(#)nmalloc.c 1 (Caltech) 2/21/82
24 * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
26 * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
28 * This is a very fast storage allocator. It allocates blocks of a small
29 * number of different sizes, and keeps free lists of each size. Blocks
30 * that don't exactly fit are passed up to the next larger size. In this
31 * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
32 * This is designed for use in a program that uses vast quantities of
33 * memory, but bombs when it runs out. To make it a little better, it
34 * warns the user when he starts to get near the end.
36 * June 84, ACT: modified rcheck code to check the range given to malloc,
37 * rather than the range determined by the 2-power used.
39 * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
40 * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
41 * You should call malloc_init to reinitialize after loading dumped Emacs.
42 * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
43 * realloc knows how to return same block given, just changing its size,
44 * if the power of 2 is correct.
48 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
49 * smallest allocatable block is 8 bytes. The overhead information will
50 * go in the first int of the block, and the returned pointer will point
54 /* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to
55 uncover callers that refer to freed memory, and to have malloc() write 0xdf
56 into memory as it's allocated to avoid referring to previous contents. */
58 /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE;
59 handled by configure. */
61 #if defined (HAVE_CONFIG_H)
63 #endif /* HAVE_CONFIG_H */
66 # include "bashtypes.h"
69 # include <sys/types.h>
72 #if defined (HAVE_UNISTD_H)
76 /* Determine which kind of system this is. */
79 #if defined (HAVE_STRING_H)
87 /* Define getpagesize () if the system does not. */
88 #ifndef HAVE_GETPAGESIZE
89 # include "getpagesize.h"
96 #ifdef MALLOC_REGISTER
103 /* System-specific omissions. */
110 #define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */
111 #define ISFREE ((char) 0x54) /* magic byte that implies free block */
112 /* this is for error checking only */
113 #define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by
114 memalign, with the rest of the word
115 being the distance to the true
116 beginning of the block. */
119 /* We have a flag indicating whether memory is allocated, an index in
120 nextf[], a size field, and a sentinel value to determine whether or
121 not a caller wrote before the start of allocated memory; to realloc()
122 memory we either copy mh_nbytes or just change mh_nbytes if there is
123 enough room in the block for the new size. Range checking is always
126 bits64_t mh_align; /* 8 */
128 char mi_alloc; /* ISALLOC or ISFREE */ /* 1 */
129 char mi_index; /* index in nextf[] */ /* 1 */
130 /* Remainder are valid only when block is allocated */
131 u_bits16_t mi_magic2; /* should be == MAGIC2 */ /* 2 */
132 u_bits32_t mi_nbytes; /* # of bytes allocated */ /* 4 */
135 #define mh_alloc minfo.mi_alloc
136 #define mh_index minfo.mi_index
137 #define mh_nbytes minfo.mi_nbytes
138 #define mh_magic2 minfo.mi_magic2
140 #define MOVERHEAD sizeof(union mhead)
141 #define MALIGN_MASK 7 /* one less than desired alignment */
143 typedef union _malloc_guard {
148 /* Access free-list pointer of a block.
149 It is stored at block + sizeof (char *).
150 This is not a field in the minfo structure member of union mhead
151 because we want sizeof (union mhead)
152 to describe the overhead for when the block is in use,
153 and we do not want the free-list pointer to count in that. */
156 (*(union mhead **) (sizeof (char *) + (char *) (a)))
158 /* To implement range checking, we write magic values in at the beginning
159 and end of each allocated block, and make sure they are undisturbed
160 whenever a free or a realloc occurs. */
162 /* Written in the 2 bytes before the block's real space (-4 bytes) */
163 #define MAGIC2 0x5555
164 #define MSLOP 4 /* 4 bytes extra for u_bits32_t size */
166 /* How many bytes are actually allocated for a request of size N --
167 rounded up to nearest multiple of 8 after accounting for malloc
169 #define ALLOCATED_BYTES(n) \
170 (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK)
175 if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, CPP_STRING(p), file, line); \
179 /* Minimum and maximum bucket indices for block splitting (and to bound
180 the search for a block to split). */
181 #define SPLIT_MIN 2 /* XXX - was 3 */
185 /* Minimum and maximum bucket indices for block coalescing. */
186 #define COMBINE_MIN 2
187 #define COMBINE_MAX (pagebucket - 1) /* XXX */
189 #define LESSCORE_MIN 10
190 #define LESSCORE_FRC 13
194 /* Flags for the internal functions. */
195 #define MALLOC_WRAPPER 0x01 /* wrapper function */
196 #define MALLOC_INTERNAL 0x02 /* internal function calling another */
197 #define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */
198 #define MALLOC_NOREG 0x08 /* don't register this allocation or free */
201 #define ERR_DUPFREE 0x01
202 #define ERR_UNALLOC 0x02
203 #define ERR_UNDERFLOW 0x04
204 #define ERR_ASSERT_FAILED 0x08
206 /* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted
207 appropriately by the caller to account for malloc overhead. This only
208 checks that the recorded size is not too big for the bucket. We
209 can't check whether or not it's in between NU and NU-1 because we
210 might have encountered a busy bucket when allocating and moved up to
212 #define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)])
214 /* Use this when we want to be sure that NB is in bucket NU. */
215 #define RIGHT_BUCKET(nb, nu) \
216 (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)]))
218 /* nextf[i] is free list of blocks of size 2**(i + 3) */
220 static union mhead *nextf[NBUCKETS];
222 /* busy[i] is nonzero while allocation or free of block size i is in progress. */
224 static char busy[NBUCKETS];
226 static int pagesz; /* system page size. */
227 static int pagebucket; /* bucket for requests a page in size */
228 static int maxbuck; /* highest bucket receiving allocation request. */
230 static char *memtop; /* top of heap */
232 static const unsigned long binsizes[NBUCKETS] = {
233 8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
234 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
235 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
236 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
237 2147483648UL, 4294967295UL
240 /* binsizes[x] == (1 << ((x) + 3)) */
241 #define binsize(x) binsizes[(x)]
243 /* Declarations for internal functions */
244 static PTR_T internal_malloc __P((size_t, const char *, int, int));
245 static PTR_T internal_realloc __P((PTR_T, size_t, const char *, int, int));
246 static void internal_free __P((PTR_T, const char *, int, int));
247 static PTR_T internal_memalign __P((size_t, size_t, const char *, int, int));
249 static PTR_T internal_calloc __P((size_t, size_t, const char *, int, int));
250 static void internal_cfree __P((PTR_T, const char *, int, int));
253 static PTR_T internal_valloc __P((size_t, const char *, int, int));
257 extern void botch ();
259 static void botch __P((const char *, const char *, int));
261 static void xbotch __P((PTR_T, int, const char *, const char *, int));
264 extern char *sbrk ();
265 #endif /* !HAVE_DECL_SBRK */
268 extern int interrupt_immediately, running_trap;
269 extern int signal_is_trapped __P((int));
273 struct _malstats _mstats;
274 #endif /* MALLOC_STATS */
276 /* Debugging variables available to applications. */
277 int malloc_flags = 0; /* future use */
278 int malloc_trace = 0; /* trace allocations and frees to stderr */
279 int malloc_register = 0; /* future use */
282 char _malloc_trace_buckets[NBUCKETS];
284 /* These should really go into a header file. */
285 extern void mtrace_alloc __P((const char *, PTR_T, size_t, const char *, int));
286 extern void mtrace_free __P((PTR_T, int, const char *, int));
291 botch (s, file, line)
296 fprintf (stderr, _("malloc: failed assertion: %s\n"), s);
297 (void)fflush (stderr);
302 /* print the file and line number that caused the assertion failure and
303 call botch() to do whatever the application wants with the information */
305 xbotch (mem, e, s, file, line)
312 fprintf (stderr, _("\r\nmalloc: %s:%d: assertion botched\r\n"),
313 file ? file : _("unknown"), line);
314 #ifdef MALLOC_REGISTER
315 if (mem != NULL && malloc_register)
316 mregister_describe_mem (mem, stderr);
318 (void)fflush (stderr);
319 botch(s, file, line);
322 /* Coalesce two adjacent free blocks off the free list for size NU - 1,
323 as long as we can find two adjacent free blocks. nextf[NU -1] is
324 assumed to not be busy; the caller (morecore()) checks for this.
325 BUSY[NU] must be set to 1. */
330 register union mhead *mp, *mp1, *mp2;
335 if (nextf[nbuck] == 0 || busy[nbuck])
339 siz = binsize (nbuck);
341 mp2 = mp1 = nextf[nbuck];
343 while (mp && mp != (union mhead *)((char *)mp1 + siz))
356 /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
357 CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */
358 if (mp2 != mp1 && CHAIN(mp2) != mp1)
361 xbotch ((PTR_T)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL, 0);
365 if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz))
368 return; /* not adjacent */
372 /* Since they are adjacent, remove them from the free list */
373 if (mp1 == nextf[nbuck])
374 nextf[nbuck] = CHAIN (mp);
376 CHAIN (mp2) = CHAIN (mp);
380 _mstats.tbcoalesce++;
381 _mstats.ncoalesce[nbuck]++;
384 /* And add the combined two blocks to nextf[NU]. */
385 mp1->mh_alloc = ISFREE;
387 CHAIN (mp1) = nextf[nu];
391 /* Split a block at index > NU (but less than SPLIT_MAX) into a set of
392 blocks of the correct size, and attach them to nextf[NU]. nextf[NU]
393 is assumed to be empty. Must be called with signals blocked (e.g.,
394 by morecore()). BUSY[NU] must be set to 1. */
399 register union mhead *mp;
400 int nbuck, nblks, split_max;
403 split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX;
407 for (nbuck = split_max; nbuck > nu; nbuck--)
409 if (busy[nbuck] || nextf[nbuck] == 0)
416 for (nbuck = nu + 1; nbuck <= split_max; nbuck++)
418 if (busy[nbuck] || nextf[nbuck] == 0)
424 if (nbuck > split_max || nbuck <= nu)
427 /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
428 and nbuck is below some threshold. */
430 /* Remove the block from the chain of larger blocks. */
433 nextf[nbuck] = CHAIN (mp);
438 _mstats.nsplit[nbuck]++;
441 /* Figure out how many blocks we'll get. */
443 nblks = binsize (nbuck) / siz;
445 /* Split the block and put it on the requested chain. */
449 mp->mh_alloc = ISFREE;
451 if (--nblks <= 0) break;
452 CHAIN (mp) = (union mhead *)((char *)mp + siz);
453 mp = (union mhead *)((char *)mp + siz);
458 /* Take the memory block MP and add it to a chain < NU. NU is the right bucket,
459 but is busy. This avoids memory orphaning. */
466 int nbuck, nblks, split_max;
470 while (nbuck >= SPLIT_MIN && busy[nbuck])
472 if (nbuck < SPLIT_MIN)
477 _mstats.nsplit[nu]++;
480 /* Figure out how many blocks we'll get. */
481 siz = binsize (nu); /* original block size */
482 nblks = siz / binsize (nbuck); /* should be 2 most of the time */
484 /* And add it to nextf[nbuck] */
485 siz = binsize (nbuck); /* XXX - resetting here */
489 mp->mh_alloc = ISFREE;
490 mp->mh_index = nbuck;
491 if (--nblks <= 0) break;
492 CHAIN (mp) = (union mhead *)((char *)mp + siz);
493 mp = (union mhead *)((char *)mp + siz);
496 CHAIN (mp) = nextf[nbuck];
502 _malloc_block_signals (setp, osetp)
503 sigset_t *setp, *osetp;
505 #ifdef HAVE_POSIX_SIGNALS
508 sigprocmask (SIG_BLOCK, setp, osetp);
510 # if defined (HAVE_BSD_SIGNALS)
511 *osetp = sigsetmask (-1);
517 _malloc_unblock_signals (setp, osetp)
518 sigset_t *setp, *osetp;
520 #ifdef HAVE_POSIX_SIGNALS
521 sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL);
523 # if defined (HAVE_BSD_SIGNALS)
529 /* Return some memory to the system by reducing the break. This is only
530 called with NU > pagebucket, so we're always assured of giving back
531 more than one page of memory. */
533 lesscore (nu) /* give system back some memory */
534 register int nu; /* size index we're discarding */
539 /* Should check for errors here, I guess. */
545 _mstats.tsbrk -= siz;
546 _mstats.nlesscore[nu]++;
550 /* Ask system for more memory; add to NEXTF[NU]. BUSY[NU] must be set to 1. */
553 register int nu; /* size index to get more of */
555 register union mhead *mp;
558 long sbrk_amt; /* amount to get via sbrk() */
562 /* Block all signals in case we are executed from a signal handler. */
565 # if defined (SIGCHLD)
566 if (interrupt_immediately || running_trap || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
568 if (interrupt_immediately || running_trap || signal_is_trapped (SIGINT))
572 _malloc_block_signals (&set, &oset);
576 siz = binsize (nu); /* size of desired block for nextf[nu] */
579 goto morecore_done; /* oops */
582 _mstats.nmorecore[nu]++;
585 /* Try to split a larger block here, if we're within the range of sizes
594 /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
595 if we can, and we're within the range of the block coalescing limits. */
596 if (nu >= COMBINE_MIN && nu < COMBINE_MAX && busy[nu - 1] == 0 && nextf[nu - 1])
603 /* Take at least a page, and figure out how many blocks of the requested
604 size we're getting. */
608 nblks = sbrk_amt / siz;
612 /* We always want to request an integral multiple of the page size
613 from the kernel, so let's compute whether or not `siz' is such
614 an amount. If it is, we can just request it. If not, we want
615 the smallest integral multiple of pagesize that is larger than
616 `siz' and will satisfy the request. */
617 sbrk_amt = siz & (pagesz - 1);
621 sbrk_amt = siz + pagesz - sbrk_amt;
627 _mstats.tsbrk += sbrk_amt;
630 mp = (union mhead *) sbrk (sbrk_amt);
632 /* Totally out of memory. */
638 /* shouldn't happen, but just in case -- require 8-byte alignment */
639 if ((long)mp & MALIGN_MASK)
641 mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK);
645 /* save new header and link the nblks blocks together */
649 mp->mh_alloc = ISFREE;
651 if (--nblks <= 0) break;
652 CHAIN (mp) = (union mhead *)((char *)mp + siz);
653 mp = (union mhead *)((char *)mp + siz);
659 _malloc_unblock_signals (&set, &oset);
663 malloc_debug_dummy ()
665 write (1, "malloc_debug_dummy\n", 19);
669 #define PREPOP_SIZE 32
675 register union mhead *mp;
679 pagesz = getpagesize ();
683 /* OK, how much do we need to allocate to make things page-aligned?
684 Some of this partial page will be wasted space, but we'll use as
685 much as we can. Once we figure out how much to advance the break
686 pointer, go ahead and do it. */
687 memtop = curbrk = sbrk (0);
688 sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1)); /* sbrk(0) % pagesz */
690 sbrk_needed += pagesz;
692 /* Now allocate the wasted space. */
697 _mstats.tsbrk += sbrk_needed;
699 curbrk = sbrk (sbrk_needed);
700 if ((long)curbrk == -1)
702 memtop += sbrk_needed;
704 /* Take the memory which would otherwise be wasted and populate the most
705 popular bin (2 == 32 bytes) with it. Add whatever we need to curbrk
706 to make things 32-byte aligned, compute how many 32-byte chunks we're
707 going to get, and set up the bin. */
708 curbrk += sbrk_needed & (PREPOP_SIZE - 1);
709 sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1);
710 nunits = sbrk_needed / PREPOP_SIZE;
714 mp = (union mhead *)curbrk;
716 nextf[PREPOP_BIN] = mp;
719 mp->mh_alloc = ISFREE;
720 mp->mh_index = PREPOP_BIN;
721 if (--nunits <= 0) break;
722 CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE);
723 mp = (union mhead *)((char *)mp + PREPOP_SIZE);
729 /* compute which bin corresponds to the page size. */
730 for (nunits = 7; nunits < NBUCKETS; nunits++)
731 if (pagesz <= binsize(nunits))
739 internal_malloc (n, file, line, flags) /* get a block */
744 register union mhead *p;
746 register char *m, *z;
750 /* Get the system page size and align break pointer so future sbrks will
751 be page-aligned. The page size must be at least 1K -- anything
752 smaller is increased. */
754 if (pagealign () < 0)
755 return ((PTR_T)NULL);
757 /* Figure out how many bytes are required, rounding up to the nearest
758 multiple of 8, then figure out which nextf[] area to use. Try to
759 be smart about where to start searching -- if the number of bytes
760 needed is greater than the page size, we can start at pagebucket. */
761 nbytes = ALLOCATED_BYTES(n);
762 nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket;
763 for ( ; nunits < NBUCKETS; nunits++)
764 if (nbytes <= binsize(nunits))
767 /* Silently reject too-large requests. */
768 if (nunits >= NBUCKETS)
769 return ((PTR_T) NULL);
771 /* In case this is reentrant use of malloc from signal handler,
772 pick a block size that no other malloc level is currently
773 trying to allocate. That's the easiest harmless way not to
774 interfere with the other level of execution. */
776 if (busy[nunits]) _mstats.nrecurse++;
778 while (busy[nunits]) nunits++;
781 if (nunits > maxbuck)
784 /* If there are no blocks of the appropriate size, go get some */
785 if (nextf[nunits] == 0)
788 /* Get one block off the list, and set the new list head */
789 if ((p = nextf[nunits]) == NULL)
794 nextf[nunits] = CHAIN (p);
797 /* Check for free block clobbered */
798 /* If not for this check, we would gobble a clobbered free chain ptr
799 and bomb out on the NEXT allocate of this size block */
800 if (p->mh_alloc != ISFREE || p->mh_index != nunits)
801 xbotch ((PTR_T)(p+1), 0, _("malloc: block on free list clobbered"), file, line);
803 /* Fill in the info, and set up the magic numbers for range checking. */
804 p->mh_alloc = ISALLOC;
805 p->mh_magic2 = MAGIC2;
811 m = (char *) (p + 1) + n;
812 *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
816 MALLOC_MEMSET ((char *)(p + 1), 0xdf, n); /* scramble previous contents */
819 _mstats.nmalloc[nunits]++;
820 _mstats.tmalloc[nunits]++;
822 _mstats.bytesreq += n;
823 #endif /* MALLOC_STATS */
826 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
827 mtrace_alloc ("malloc", p + 1, n, file, line);
828 else if (_malloc_trace_buckets[nunits])
829 mtrace_alloc ("malloc", p + 1, n, file, line);
832 #ifdef MALLOC_REGISTER
833 if (malloc_register && (flags & MALLOC_NOREG) == 0)
834 mregister_alloc ("malloc", p + 1, n, file, line);
838 if (_malloc_nwatch > 0)
839 _malloc_ckwatch (p + 1, file, line, W_ALLOC, n);
842 return (PTR_T) (p + 1);
846 internal_free (mem, file, line, flags)
851 register union mhead *p;
852 register char *ap, *z;
854 register unsigned int nbytes;
855 int ubytes; /* caller-requested size */
858 if ((ap = (char *)mem) == 0)
861 p = (union mhead *) ap - 1;
863 if (p->mh_alloc == ISMEMALIGN)
866 p = (union mhead *) ap - 1;
869 #if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER)
870 if (malloc_trace || malloc_register)
871 ubytes = p->mh_nbytes;
874 if (p->mh_alloc != ISALLOC)
876 if (p->mh_alloc == ISFREE)
877 xbotch (mem, ERR_DUPFREE,
878 _("free: called with already freed block argument"), file, line);
880 xbotch (mem, ERR_UNALLOC,
881 _("free: called with unallocated block argument"), file, line);
884 ASSERT (p->mh_magic2 == MAGIC2);
886 nunits = p->mh_index;
887 nbytes = ALLOCATED_BYTES(p->mh_nbytes);
888 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
889 are now used for the number of bytes allocated, a simple check of
890 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
891 We sanity-check the value of mh_nbytes against the size of the blocks
892 in the appropriate bucket before we use it. This can still cause problems
893 and obscure errors if mh_nbytes is wrong but still within range; the
894 checks against the size recorded at the end of the chunk will probably
895 fail then. Using MALLOC_REGISTER will help here, since it saves the
896 original number of bytes requested. */
898 if (IN_BUCKET(nbytes, nunits) == 0)
899 xbotch (mem, ERR_UNDERFLOW,
900 _("free: underflow detected; mh_nbytes out of range"), file, line);
904 *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++;
905 if (mg.i != p->mh_nbytes)
906 xbotch (mem, ERR_ASSERT_FAILED, _("free: start and end chunk sizes differ"), file, line);
909 if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == sbrk (0)))
911 if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop))
914 /* If above LESSCORE_FRC, give back unconditionally. This should be set
915 high enough to be infrequently encountered. If between LESSCORE_MIN
916 and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if
917 there's already a block on the free list. */
918 if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0)
921 /* keeps the tracing and registering code in one place */
928 MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes);
931 ASSERT (nunits < NBUCKETS);
933 if (busy[nunits] == 1)
935 xsplit (p, nunits); /* split block and add to different chain */
939 p->mh_alloc = ISFREE;
940 /* Protect against signal handlers calling malloc. */
942 /* Put this block on the free list. */
943 CHAIN (p) = nextf[nunits];
948 ; /* Empty statement in case this is the end of the function */
951 _mstats.nmalloc[nunits]--;
953 #endif /* MALLOC_STATS */
956 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
957 mtrace_free (mem, ubytes, file, line);
958 else if (_malloc_trace_buckets[nunits])
959 mtrace_free (mem, ubytes, file, line);
962 #ifdef MALLOC_REGISTER
963 if (malloc_register && (flags & MALLOC_NOREG) == 0)
964 mregister_free (mem, ubytes, file, line);
968 if (_malloc_nwatch > 0)
969 _malloc_ckwatch (mem, file, line, W_FREE, ubytes);
974 internal_realloc (mem, n, file, line, flags)
980 register union mhead *p;
981 register u_bits32_t tocopy;
982 register unsigned int nbytes;
984 register char *m, *z;
993 internal_free (mem, file, line, MALLOC_INTERNAL);
996 if ((p = (union mhead *) mem) == 0)
997 return internal_malloc (n, file, line, MALLOC_INTERNAL);
1000 nunits = p->mh_index;
1001 ASSERT (nunits < NBUCKETS);
1003 if (p->mh_alloc != ISALLOC)
1004 xbotch (mem, ERR_UNALLOC,
1005 _("realloc: called with unallocated block argument"), file, line);
1007 ASSERT (p->mh_magic2 == MAGIC2);
1008 nbytes = ALLOCATED_BYTES(p->mh_nbytes);
1009 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
1010 are now used for the number of bytes allocated, a simple check of
1011 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
1012 We sanity-check the value of mh_nbytes against the size of the blocks
1013 in the appropriate bucket before we use it. This can still cause problems
1014 and obscure errors if mh_nbytes is wrong but still within range; the
1015 checks against the size recorded at the end of the chunk will probably
1016 fail then. Using MALLOC_REGISTER will help here, since it saves the
1017 original number of bytes requested. */
1018 if (IN_BUCKET(nbytes, nunits) == 0)
1019 xbotch (mem, ERR_UNDERFLOW,
1020 _("realloc: underflow detected; mh_nbytes out of range"), file, line);
1022 m = (char *)mem + (tocopy = p->mh_nbytes);
1024 *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++;
1025 if (mg.i != p->mh_nbytes)
1026 xbotch (mem, ERR_ASSERT_FAILED, _("realloc: start and end chunk sizes differ"), file, line);
1029 if (_malloc_nwatch > 0)
1030 _malloc_ckwatch (p + 1, file, line, W_REALLOC, n);
1033 _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy;
1036 /* See if desired size rounds to same power of 2 as actual size. */
1037 nbytes = ALLOCATED_BYTES(n);
1039 /* If ok, use the same block, just marking its size as changed. */
1040 if (RIGHT_BUCKET(nbytes, nunits))
1043 m = (char *)mem + p->mh_nbytes;
1045 /* Compensate for increment above. */
1048 *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0;
1049 m = (char *)mem + (p->mh_nbytes = n);
1053 *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
1065 if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0)
1067 FASTCOPY (mem, m, tocopy);
1068 internal_free (mem, file, line, MALLOC_INTERNAL);
1071 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
1072 mtrace_alloc ("realloc", m, n, file, line);
1073 else if (_malloc_trace_buckets[nunits])
1074 mtrace_alloc ("realloc", m, n, file, line);
1077 #ifdef MALLOC_REGISTER
1078 if (malloc_register && (flags & MALLOC_NOREG) == 0)
1079 mregister_alloc ("realloc", m, n, file, line);
1083 if (_malloc_nwatch > 0)
1084 _malloc_ckwatch (m, file, line, W_RESIZED, n);
1091 internal_memalign (alignment, size, file, line, flags)
1098 register char *aligned;
1099 register union mhead *p;
1101 ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL);
1105 /* If entire block has the desired alignment, just accept it. */
1106 if (((long) ptr & (alignment - 1)) == 0)
1108 /* Otherwise, get address of byte in the block that has that alignment. */
1110 aligned = (char *) (((long) ptr + alignment - 1) & -alignment);
1112 aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1));
1115 /* Store a suitable indication of how to free the block,
1116 so that free can find the true beginning of it. */
1117 p = (union mhead *) aligned - 1;
1118 p->mh_nbytes = aligned - ptr;
1119 p->mh_alloc = ISMEMALIGN;
1124 #if !defined (NO_VALLOC)
1125 /* This runs into trouble with getpagesize on HPUX, and Multimax machines.
1126 Patching out seems cleaner than the ugly fix needed. */
1128 internal_valloc (size, file, line, flags)
1133 return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL);
1135 #endif /* !NO_VALLOC */
1139 internal_calloc (n, s, file, line, flags)
1148 result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL);
1150 memset (result, 0, total);
1155 internal_cfree (p, file, line, flags)
1160 internal_free (p, file, line, flags|MALLOC_INTERNAL);
1162 #endif /* !NO_CALLOC */
1166 malloc_free_blocks (size)
1170 register union mhead *p;
1173 for (p = nextf[size]; p; p = CHAIN (p))
1180 #if defined (MALLOC_WRAPFUNCS)
1182 sh_malloc (bytes, file, line)
1187 return internal_malloc (bytes, file, line, MALLOC_WRAPPER);
1191 sh_realloc (ptr, size, file, line)
1197 return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER);
1201 sh_free (mem, file, line)
1206 internal_free (mem, file, line, MALLOC_WRAPPER);
1210 sh_memalign (alignment, size, file, line)
1216 return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER);
1221 sh_calloc (n, s, file, line)
1226 return internal_calloc (n, s, file, line, MALLOC_WRAPPER);
1230 sh_cfree (mem, file, line)
1235 internal_cfree (mem, file, line, MALLOC_WRAPPER);
1241 sh_valloc (size, file, line)
1246 return internal_valloc (size, file, line, MALLOC_WRAPPER);
1248 #endif /* !NO_VALLOC */
1250 #endif /* MALLOC_WRAPFUNCS */
1252 /* Externally-available functions that call their internal counterparts. */
1258 return internal_malloc (size, (char *)NULL, 0, 0);
1262 realloc (mem, nbytes)
1266 return internal_realloc (mem, nbytes, (char *)NULL, 0, 0);
1273 internal_free (mem, (char *)NULL, 0, 0);
1277 memalign (alignment, size)
1281 return internal_memalign (alignment, size, (char *)NULL, 0, 0);
1289 return internal_valloc (size, (char *)NULL, 0, 0);
1298 return internal_calloc (n, s, (char *)NULL, 0, 0);
1305 internal_cfree (mem, (char *)NULL, 0, 0);