1 /* malloc.c - dynamic memory allocation for bash. */
3 /* Copyright (C) 1985-2005 Free Software Foundation, Inc.
5 This file is part of GNU Bash, the Bourne-Again SHell.
7 Bash is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 Bash is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with Bash. If not, see <http://www.gnu.org/licenses/>.
22 * @(#)nmalloc.c 1 (Caltech) 2/21/82
24 * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
26 * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
28 * This is a very fast storage allocator. It allocates blocks of a small
29 * number of different sizes, and keeps free lists of each size. Blocks
30 * that don't exactly fit are passed up to the next larger size. In this
31 * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
32 * This is designed for use in a program that uses vast quantities of
33 * memory, but bombs when it runs out. To make it a little better, it
34 * warns the user when he starts to get near the end.
36 * June 84, ACT: modified rcheck code to check the range given to malloc,
37 * rather than the range determined by the 2-power used.
39 * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
40 * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
41 * You should call malloc_init to reinitialize after loading dumped Emacs.
42 * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
43 * realloc knows how to return same block given, just changing its size,
44 * if the power of 2 is correct.
48 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
49 * smallest allocatable block is 8 bytes. The overhead information will
50 * go in the first int of the block, and the returned pointer will point
54 /* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to
55 uncover callers that refer to freed memory, and to have malloc() write 0xdf
56 into memory as it's allocated to avoid referring to previous contents. */
58 /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE;
59 handled by configure. */
61 #if defined (HAVE_CONFIG_H)
63 #endif /* HAVE_CONFIG_H */
66 # include "bashtypes.h"
69 # include <sys/types.h>
72 #if defined (HAVE_UNISTD_H)
76 /* Determine which kind of system this is. */
79 #if defined (HAVE_STRING_H)
87 /* Define getpagesize () if the system does not. */
88 #ifndef HAVE_GETPAGESIZE
89 # include "getpagesize.h"
96 #ifdef MALLOC_REGISTER
103 /* System-specific omissions. */
110 #define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */
111 #define ISFREE ((char) 0x54) /* magic byte that implies free block */
112 /* this is for error checking only */
113 #define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by
114 memalign, with the rest of the word
115 being the distance to the true
116 beginning of the block. */
119 /* We have a flag indicating whether memory is allocated, an index in
120 nextf[], a size field, and a sentinel value to determine whether or
121 not a caller wrote before the start of allocated memory; to realloc()
122 memory we either copy mh_nbytes or just change mh_nbytes if there is
123 enough room in the block for the new size. Range checking is always
126 bits64_t mh_align; /* 8 */
128 char mi_alloc; /* ISALLOC or ISFREE */ /* 1 */
129 char mi_index; /* index in nextf[] */ /* 1 */
130 /* Remainder are valid only when block is allocated */
131 u_bits16_t mi_magic2; /* should be == MAGIC2 */ /* 2 */
132 u_bits32_t mi_nbytes; /* # of bytes allocated */ /* 4 */
135 #define mh_alloc minfo.mi_alloc
136 #define mh_index minfo.mi_index
137 #define mh_nbytes minfo.mi_nbytes
138 #define mh_magic2 minfo.mi_magic2
140 #define MOVERHEAD sizeof(union mhead)
141 #define MALIGN_MASK 7 /* one less than desired alignment */
143 typedef union _malloc_guard {
148 /* Access free-list pointer of a block.
149 It is stored at block + sizeof (char *).
150 This is not a field in the minfo structure member of union mhead
151 because we want sizeof (union mhead)
152 to describe the overhead for when the block is in use,
153 and we do not want the free-list pointer to count in that. */
156 (*(union mhead **) (sizeof (char *) + (char *) (a)))
158 /* To implement range checking, we write magic values in at the beginning
159 and end of each allocated block, and make sure they are undisturbed
160 whenever a free or a realloc occurs. */
162 /* Written in the 2 bytes before the block's real space (-4 bytes) */
163 #define MAGIC2 0x5555
164 #define MSLOP 4 /* 4 bytes extra for u_bits32_t size */
166 /* How many bytes are actually allocated for a request of size N --
167 rounded up to nearest multiple of 8 after accounting for malloc
169 #define ALLOCATED_BYTES(n) \
170 (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK)
175 if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, __STRING(p), file, line); \
179 /* Minimum and maximum bucket indices for block splitting (and to bound
180 the search for a block to split). */
181 #define SPLIT_MIN 2 /* XXX - was 3 */
185 /* Minimum and maximum bucket indices for block coalescing. */
186 #define COMBINE_MIN 2
187 #define COMBINE_MAX (pagebucket - 1) /* XXX */
189 #define LESSCORE_MIN 10
190 #define LESSCORE_FRC 13
194 /* Flags for the internal functions. */
195 #define MALLOC_WRAPPER 0x01 /* wrapper function */
196 #define MALLOC_INTERNAL 0x02 /* internal function calling another */
197 #define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */
198 #define MALLOC_NOREG 0x08 /* don't register this allocation or free */
201 #define ERR_DUPFREE 0x01
202 #define ERR_UNALLOC 0x02
203 #define ERR_UNDERFLOW 0x04
204 #define ERR_ASSERT_FAILED 0x08
206 /* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted
207 appropriately by the caller to account for malloc overhead. This only
208 checks that the recorded size is not too big for the bucket. We
209 can't check whether or not it's in between NU and NU-1 because we
210 might have encountered a busy bucket when allocating and moved up to
212 #define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)])
214 /* Use this when we want to be sure that NB is in bucket NU. */
215 #define RIGHT_BUCKET(nb, nu) \
216 (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)]))
218 /* nextf[i] is free list of blocks of size 2**(i + 3) */
220 static union mhead *nextf[NBUCKETS];
222 /* busy[i] is nonzero while allocation or free of block size i is in progress. */
224 static char busy[NBUCKETS];
226 static int pagesz; /* system page size. */
227 static int pagebucket; /* bucket for requests a page in size */
228 static int maxbuck; /* highest bucket receiving allocation request. */
230 static char *memtop; /* top of heap */
232 static const unsigned long binsizes[NBUCKETS] = {
233 8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
234 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
235 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
236 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
237 2147483648UL, 4294967295UL
240 /* binsizes[x] == (1 << ((x) + 3)) */
241 #define binsize(x) binsizes[(x)]
243 /* Declarations for internal functions */
244 static PTR_T internal_malloc __P((size_t, const char *, int, int));
245 static PTR_T internal_realloc __P((PTR_T, size_t, const char *, int, int));
246 static void internal_free __P((PTR_T, const char *, int, int));
247 static PTR_T internal_memalign __P((size_t, size_t, const char *, int, int));
249 static PTR_T internal_calloc __P((size_t, size_t, const char *, int, int));
250 static void internal_cfree __P((PTR_T, const char *, int, int));
253 static PTR_T internal_valloc __P((size_t, const char *, int, int));
257 extern void botch ();
259 static void botch __P((const char *, const char *, int));
261 static void xbotch __P((PTR_T, int, const char *, const char *, int));
264 extern char *sbrk ();
265 #endif /* !HAVE_DECL_SBRK */
268 extern int interrupt_immediately;
269 extern int signal_is_trapped __P((int));
273 struct _malstats _mstats;
274 #endif /* MALLOC_STATS */
276 /* Debugging variables available to applications. */
277 int malloc_flags = 0; /* future use */
278 int malloc_trace = 0; /* trace allocations and frees to stderr */
279 int malloc_register = 0; /* future use */
282 char _malloc_trace_buckets[NBUCKETS];
284 /* These should really go into a header file. */
285 extern void mtrace_alloc __P((const char *, PTR_T, size_t, const char *, int));
286 extern void mtrace_free __P((PTR_T, int, const char *, int));
291 botch (s, file, line)
296 fprintf (stderr, _("malloc: failed assertion: %s\n"), s);
297 (void)fflush (stderr);
302 /* print the file and line number that caused the assertion failure and
303 call botch() to do whatever the application wants with the information */
305 xbotch (mem, e, s, file, line)
312 fprintf (stderr, _("\r\nmalloc: %s:%d: assertion botched\r\n"),
313 file ? file : _("unknown"), line);
314 #ifdef MALLOC_REGISTER
315 if (mem != NULL && malloc_register)
316 mregister_describe_mem (mem, stderr);
318 (void)fflush (stderr);
319 botch(s, file, line);
322 /* Coalesce two adjacent free blocks off the free list for size NU - 1,
323 as long as we can find two adjacent free blocks. nextf[NU -1] is
324 assumed to not be busy; the caller (morecore()) checks for this.
325 BUSY[NU] must be set to 1. */
330 register union mhead *mp, *mp1, *mp2;
335 if (nextf[nbuck] == 0 || busy[nbuck])
339 siz = binsize (nbuck);
341 mp2 = mp1 = nextf[nbuck];
343 while (mp && mp != (union mhead *)((char *)mp1 + siz))
356 /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
357 CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */
358 if (mp2 != mp1 && CHAIN(mp2) != mp1)
361 xbotch ((PTR_T)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL, 0);
365 if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz))
368 return; /* not adjacent */
372 /* Since they are adjacent, remove them from the free list */
373 if (mp1 == nextf[nbuck])
374 nextf[nbuck] = CHAIN (mp);
376 CHAIN (mp2) = CHAIN (mp);
380 _mstats.tbcoalesce++;
381 _mstats.ncoalesce[nbuck]++;
384 /* And add the combined two blocks to nextf[NU]. */
385 mp1->mh_alloc = ISFREE;
387 CHAIN (mp1) = nextf[nu];
391 /* Split a block at index > NU (but less than SPLIT_MAX) into a set of
392 blocks of the correct size, and attach them to nextf[NU]. nextf[NU]
393 is assumed to be empty. Must be called with signals blocked (e.g.,
394 by morecore()). BUSY[NU] must be set to 1. */
399 register union mhead *mp;
400 int nbuck, nblks, split_max;
403 split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX;
407 for (nbuck = split_max; nbuck > nu; nbuck--)
409 if (busy[nbuck] || nextf[nbuck] == 0)
416 for (nbuck = nu + 1; nbuck <= split_max; nbuck++)
418 if (busy[nbuck] || nextf[nbuck] == 0)
424 if (nbuck > split_max || nbuck <= nu)
427 /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
428 and nbuck is below some threshold. */
430 /* Remove the block from the chain of larger blocks. */
433 nextf[nbuck] = CHAIN (mp);
438 _mstats.nsplit[nbuck]++;
441 /* Figure out how many blocks we'll get. */
443 nblks = binsize (nbuck) / siz;
445 /* Split the block and put it on the requested chain. */
449 mp->mh_alloc = ISFREE;
451 if (--nblks <= 0) break;
452 CHAIN (mp) = (union mhead *)((char *)mp + siz);
453 mp = (union mhead *)((char *)mp + siz);
458 /* Take the memory block MP and add it to a chain < NU. NU is the right bucket,
459 but is busy. This avoids memory orphaning. */
466 int nbuck, nblks, split_max;
470 while (nbuck >= SPLIT_MIN && busy[nbuck])
472 if (nbuck < SPLIT_MIN)
477 _mstats.nsplit[nu]++;
480 /* Figure out how many blocks we'll get. */
481 siz = binsize (nu); /* original block size */
482 nblks = siz / binsize (nbuck); /* should be 2 most of the time */
484 /* And add it to nextf[nbuck] */
485 siz = binsize (nbuck); /* XXX - resetting here */
489 mp->mh_alloc = ISFREE;
490 mp->mh_index = nbuck;
491 if (--nblks <= 0) break;
492 CHAIN (mp) = (union mhead *)((char *)mp + siz);
493 mp = (union mhead *)((char *)mp + siz);
496 CHAIN (mp) = nextf[nbuck];
502 block_signals (setp, osetp)
503 sigset_t *setp, *osetp;
505 #ifdef HAVE_POSIX_SIGNALS
508 sigprocmask (SIG_BLOCK, setp, osetp);
510 # if defined (HAVE_BSD_SIGNALS)
511 *osetp = sigsetmask (-1);
517 unblock_signals (setp, osetp)
518 sigset_t *setp, *osetp;
520 #ifdef HAVE_POSIX_SIGNALS
521 sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL);
523 # if defined (HAVE_BSD_SIGNALS)
529 /* Return some memory to the system by reducing the break. This is only
530 called with NU > pagebucket, so we're always assured of giving back
531 more than one page of memory. */
533 lesscore (nu) /* give system back some memory */
534 register int nu; /* size index we're discarding */
539 /* Should check for errors here, I guess. */
545 _mstats.tsbrk -= siz;
546 _mstats.nlesscore[nu]++;
550 /* Ask system for more memory; add to NEXTF[NU]. BUSY[NU] must be set to 1. */
553 register int nu; /* size index to get more of */
555 register union mhead *mp;
558 long sbrk_amt; /* amount to get via sbrk() */
562 /* Block all signals in case we are executed from a signal handler. */
565 if (interrupt_immediately || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
568 block_signals (&set, &oset);
572 siz = binsize (nu); /* size of desired block for nextf[nu] */
575 goto morecore_done; /* oops */
578 _mstats.nmorecore[nu]++;
581 /* Try to split a larger block here, if we're within the range of sizes
590 /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
591 if we can, and we're within the range of the block coalescing limits. */
592 if (nu >= COMBINE_MIN && nu < COMBINE_MAX && busy[nu - 1] == 0 && nextf[nu - 1])
599 /* Take at least a page, and figure out how many blocks of the requested
600 size we're getting. */
604 nblks = sbrk_amt / siz;
608 /* We always want to request an integral multiple of the page size
609 from the kernel, so let's compute whether or not `siz' is such
610 an amount. If it is, we can just request it. If not, we want
611 the smallest integral multiple of pagesize that is larger than
612 `siz' and will satisfy the request. */
613 sbrk_amt = siz & (pagesz - 1);
617 sbrk_amt = siz + pagesz - sbrk_amt;
623 _mstats.tsbrk += sbrk_amt;
626 mp = (union mhead *) sbrk (sbrk_amt);
628 /* Totally out of memory. */
634 /* shouldn't happen, but just in case -- require 8-byte alignment */
635 if ((long)mp & MALIGN_MASK)
637 mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK);
641 /* save new header and link the nblks blocks together */
645 mp->mh_alloc = ISFREE;
647 if (--nblks <= 0) break;
648 CHAIN (mp) = (union mhead *)((char *)mp + siz);
649 mp = (union mhead *)((char *)mp + siz);
655 unblock_signals (&set, &oset);
659 malloc_debug_dummy ()
661 write (1, "malloc_debug_dummy\n", 19);
665 #define PREPOP_SIZE 32
671 register union mhead *mp;
675 pagesz = getpagesize ();
679 /* OK, how much do we need to allocate to make things page-aligned?
680 Some of this partial page will be wasted space, but we'll use as
681 much as we can. Once we figure out how much to advance the break
682 pointer, go ahead and do it. */
683 memtop = curbrk = sbrk (0);
684 sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1)); /* sbrk(0) % pagesz */
686 sbrk_needed += pagesz;
688 /* Now allocate the wasted space. */
693 _mstats.tsbrk += sbrk_needed;
695 curbrk = sbrk (sbrk_needed);
696 if ((long)curbrk == -1)
698 memtop += sbrk_needed;
700 /* Take the memory which would otherwise be wasted and populate the most
701 popular bin (2 == 32 bytes) with it. Add whatever we need to curbrk
702 to make things 32-byte aligned, compute how many 32-byte chunks we're
703 going to get, and set up the bin. */
704 curbrk += sbrk_needed & (PREPOP_SIZE - 1);
705 sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1);
706 nunits = sbrk_needed / PREPOP_SIZE;
710 mp = (union mhead *)curbrk;
712 nextf[PREPOP_BIN] = mp;
715 mp->mh_alloc = ISFREE;
716 mp->mh_index = PREPOP_BIN;
717 if (--nunits <= 0) break;
718 CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE);
719 mp = (union mhead *)((char *)mp + PREPOP_SIZE);
725 /* compute which bin corresponds to the page size. */
726 for (nunits = 7; nunits < NBUCKETS; nunits++)
727 if (pagesz <= binsize(nunits))
735 internal_malloc (n, file, line, flags) /* get a block */
740 register union mhead *p;
742 register char *m, *z;
746 /* Get the system page size and align break pointer so future sbrks will
747 be page-aligned. The page size must be at least 1K -- anything
748 smaller is increased. */
750 if (pagealign () < 0)
751 return ((PTR_T)NULL);
753 /* Figure out how many bytes are required, rounding up to the nearest
754 multiple of 8, then figure out which nextf[] area to use. Try to
755 be smart about where to start searching -- if the number of bytes
756 needed is greater than the page size, we can start at pagebucket. */
757 nbytes = ALLOCATED_BYTES(n);
758 nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket;
759 for ( ; nunits < NBUCKETS; nunits++)
760 if (nbytes <= binsize(nunits))
763 /* Silently reject too-large requests. */
764 if (nunits >= NBUCKETS)
765 return ((PTR_T) NULL);
767 /* In case this is reentrant use of malloc from signal handler,
768 pick a block size that no other malloc level is currently
769 trying to allocate. That's the easiest harmless way not to
770 interfere with the other level of execution. */
772 if (busy[nunits]) _mstats.nrecurse++;
774 while (busy[nunits]) nunits++;
777 if (nunits > maxbuck)
780 /* If there are no blocks of the appropriate size, go get some */
781 if (nextf[nunits] == 0)
784 /* Get one block off the list, and set the new list head */
785 if ((p = nextf[nunits]) == NULL)
790 nextf[nunits] = CHAIN (p);
793 /* Check for free block clobbered */
794 /* If not for this check, we would gobble a clobbered free chain ptr
795 and bomb out on the NEXT allocate of this size block */
796 if (p->mh_alloc != ISFREE || p->mh_index != nunits)
797 xbotch ((PTR_T)(p+1), 0, _("malloc: block on free list clobbered"), file, line);
799 /* Fill in the info, and set up the magic numbers for range checking. */
800 p->mh_alloc = ISALLOC;
801 p->mh_magic2 = MAGIC2;
807 m = (char *) (p + 1) + n;
808 *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
812 MALLOC_MEMSET ((char *)(p + 1), 0xdf, n); /* scramble previous contents */
815 _mstats.nmalloc[nunits]++;
816 _mstats.tmalloc[nunits]++;
818 _mstats.bytesreq += n;
819 #endif /* MALLOC_STATS */
822 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
823 mtrace_alloc ("malloc", p + 1, n, file, line);
824 else if (_malloc_trace_buckets[nunits])
825 mtrace_alloc ("malloc", p + 1, n, file, line);
828 #ifdef MALLOC_REGISTER
829 if (malloc_register && (flags & MALLOC_NOREG) == 0)
830 mregister_alloc ("malloc", p + 1, n, file, line);
834 if (_malloc_nwatch > 0)
835 _malloc_ckwatch (p + 1, file, line, W_ALLOC, n);
838 return (PTR_T) (p + 1);
842 internal_free (mem, file, line, flags)
847 register union mhead *p;
848 register char *ap, *z;
850 register unsigned int nbytes;
851 int ubytes; /* caller-requested size */
854 if ((ap = (char *)mem) == 0)
857 p = (union mhead *) ap - 1;
859 if (p->mh_alloc == ISMEMALIGN)
862 p = (union mhead *) ap - 1;
865 #if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER)
866 if (malloc_trace || malloc_register)
867 ubytes = p->mh_nbytes;
870 if (p->mh_alloc != ISALLOC)
872 if (p->mh_alloc == ISFREE)
873 xbotch (mem, ERR_DUPFREE,
874 _("free: called with already freed block argument"), file, line);
876 xbotch (mem, ERR_UNALLOC,
877 _("free: called with unallocated block argument"), file, line);
880 ASSERT (p->mh_magic2 == MAGIC2);
882 nunits = p->mh_index;
883 nbytes = ALLOCATED_BYTES(p->mh_nbytes);
884 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
885 are now used for the number of bytes allocated, a simple check of
886 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
887 We sanity-check the value of mh_nbytes against the size of the blocks
888 in the appropriate bucket before we use it. This can still cause problems
889 and obscure errors if mh_nbytes is wrong but still within range; the
890 checks against the size recorded at the end of the chunk will probably
891 fail then. Using MALLOC_REGISTER will help here, since it saves the
892 original number of bytes requested. */
894 if (IN_BUCKET(nbytes, nunits) == 0)
895 xbotch (mem, ERR_UNDERFLOW,
896 _("free: underflow detected; mh_nbytes out of range"), file, line);
900 *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++;
901 if (mg.i != p->mh_nbytes)
902 xbotch (mem, ERR_ASSERT_FAILED, _("free: start and end chunk sizes differ"), file, line);
905 if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop))
907 if (((char *)p + binsize(nunits) == memtop) && nunits >= LESSCORE_MIN)
910 /* If above LESSCORE_FRC, give back unconditionally. This should be set
911 high enough to be infrequently encountered. If between LESSCORE_MIN
912 and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if
913 there's already a block on the free list. */
914 if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0)
917 /* keeps the tracing and registering code in one place */
924 MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes);
927 ASSERT (nunits < NBUCKETS);
929 if (busy[nunits] == 1)
931 xsplit (p, nunits); /* split block and add to different chain */
935 p->mh_alloc = ISFREE;
936 /* Protect against signal handlers calling malloc. */
938 /* Put this block on the free list. */
939 CHAIN (p) = nextf[nunits];
944 ; /* Empty statement in case this is the end of the function */
947 _mstats.nmalloc[nunits]--;
949 #endif /* MALLOC_STATS */
952 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
953 mtrace_free (mem, ubytes, file, line);
954 else if (_malloc_trace_buckets[nunits])
955 mtrace_free (mem, ubytes, file, line);
958 #ifdef MALLOC_REGISTER
959 if (malloc_register && (flags & MALLOC_NOREG) == 0)
960 mregister_free (mem, ubytes, file, line);
964 if (_malloc_nwatch > 0)
965 _malloc_ckwatch (mem, file, line, W_FREE, ubytes);
970 internal_realloc (mem, n, file, line, flags)
976 register union mhead *p;
977 register u_bits32_t tocopy;
978 register unsigned int nbytes;
980 register char *m, *z;
989 internal_free (mem, file, line, MALLOC_INTERNAL);
992 if ((p = (union mhead *) mem) == 0)
993 return internal_malloc (n, file, line, MALLOC_INTERNAL);
996 nunits = p->mh_index;
997 ASSERT (nunits < NBUCKETS);
999 if (p->mh_alloc != ISALLOC)
1000 xbotch (mem, ERR_UNALLOC,
1001 _("realloc: called with unallocated block argument"), file, line);
1003 ASSERT (p->mh_magic2 == MAGIC2);
1004 nbytes = ALLOCATED_BYTES(p->mh_nbytes);
1005 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
1006 are now used for the number of bytes allocated, a simple check of
1007 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
1008 We sanity-check the value of mh_nbytes against the size of the blocks
1009 in the appropriate bucket before we use it. This can still cause problems
1010 and obscure errors if mh_nbytes is wrong but still within range; the
1011 checks against the size recorded at the end of the chunk will probably
1012 fail then. Using MALLOC_REGISTER will help here, since it saves the
1013 original number of bytes requested. */
1014 if (IN_BUCKET(nbytes, nunits) == 0)
1015 xbotch (mem, ERR_UNDERFLOW,
1016 _("realloc: underflow detected; mh_nbytes out of range"), file, line);
1018 m = (char *)mem + (tocopy = p->mh_nbytes);
1020 *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++;
1021 if (mg.i != p->mh_nbytes)
1022 xbotch (mem, ERR_ASSERT_FAILED, _("realloc: start and end chunk sizes differ"), file, line);
1025 if (_malloc_nwatch > 0)
1026 _malloc_ckwatch (p + 1, file, line, W_REALLOC, n);
1029 _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy;
1032 /* See if desired size rounds to same power of 2 as actual size. */
1033 nbytes = ALLOCATED_BYTES(n);
1035 /* If ok, use the same block, just marking its size as changed. */
1036 if (RIGHT_BUCKET(nbytes, nunits))
1039 m = (char *)mem + p->mh_nbytes;
1041 /* Compensate for increment above. */
1044 *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0;
1045 m = (char *)mem + (p->mh_nbytes = n);
1049 *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
1061 if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0)
1063 FASTCOPY (mem, m, tocopy);
1064 internal_free (mem, file, line, MALLOC_INTERNAL);
1067 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
1068 mtrace_alloc ("realloc", m, n, file, line);
1069 else if (_malloc_trace_buckets[nunits])
1070 mtrace_alloc ("realloc", m, n, file, line);
1073 #ifdef MALLOC_REGISTER
1074 if (malloc_register && (flags & MALLOC_NOREG) == 0)
1075 mregister_alloc ("realloc", m, n, file, line);
1079 if (_malloc_nwatch > 0)
1080 _malloc_ckwatch (m, file, line, W_RESIZED, n);
1087 internal_memalign (alignment, size, file, line, flags)
1094 register char *aligned;
1095 register union mhead *p;
1097 ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL);
1101 /* If entire block has the desired alignment, just accept it. */
1102 if (((long) ptr & (alignment - 1)) == 0)
1104 /* Otherwise, get address of byte in the block that has that alignment. */
1106 aligned = (char *) (((long) ptr + alignment - 1) & -alignment);
1108 aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1));
1111 /* Store a suitable indication of how to free the block,
1112 so that free can find the true beginning of it. */
1113 p = (union mhead *) aligned - 1;
1114 p->mh_nbytes = aligned - ptr;
1115 p->mh_alloc = ISMEMALIGN;
1120 #if !defined (NO_VALLOC)
1121 /* This runs into trouble with getpagesize on HPUX, and Multimax machines.
1122 Patching out seems cleaner than the ugly fix needed. */
1124 internal_valloc (size, file, line, flags)
1129 return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL);
1131 #endif /* !NO_VALLOC */
1135 internal_calloc (n, s, file, line, flags)
1144 result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL);
1146 memset (result, 0, total);
1151 internal_cfree (p, file, line, flags)
1156 internal_free (p, file, line, flags|MALLOC_INTERNAL);
1158 #endif /* !NO_CALLOC */
1162 malloc_free_blocks (size)
1166 register union mhead *p;
1169 for (p = nextf[size]; p; p = CHAIN (p))
1176 #if defined (MALLOC_WRAPFUNCS)
1178 sh_malloc (bytes, file, line)
1183 return internal_malloc (bytes, file, line, MALLOC_WRAPPER);
1187 sh_realloc (ptr, size, file, line)
1193 return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER);
1197 sh_free (mem, file, line)
1202 internal_free (mem, file, line, MALLOC_WRAPPER);
1206 sh_memalign (alignment, size, file, line)
1212 return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER);
1217 sh_calloc (n, s, file, line)
1222 return internal_calloc (n, s, file, line, MALLOC_WRAPPER);
1226 sh_cfree (mem, file, line)
1231 internal_cfree (mem, file, line, MALLOC_WRAPPER);
1237 sh_valloc (size, file, line)
1242 return internal_valloc (size, file, line, MALLOC_WRAPPER);
1244 #endif /* !NO_VALLOC */
1246 #endif /* MALLOC_WRAPFUNCS */
1248 /* Externally-available functions that call their internal counterparts. */
1254 return internal_malloc (size, (char *)NULL, 0, 0);
1258 realloc (mem, nbytes)
1262 return internal_realloc (mem, nbytes, (char *)NULL, 0, 0);
1269 internal_free (mem, (char *)NULL, 0, 0);
1273 memalign (alignment, size)
1277 return internal_memalign (alignment, size, (char *)NULL, 0, 0);
1285 return internal_valloc (size, (char *)NULL, 0, 0);
1294 return internal_calloc (n, s, (char *)NULL, 0, 0);
1301 internal_cfree (mem, (char *)NULL, 0, 0);