1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996,1997,1998,1999,2000,01,02 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Library General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Library General Public License for more details.
17 You should have received a copy of the GNU Library General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 This is a version (aka ptmalloc2) of malloc/free/realloc written by
24 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
26 * Version ptmalloc2-20011215
29 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
31 Note: There may be an updated version of this malloc obtainable at
32 http://www.malloc.de/malloc/ptmalloc2.tar.gz
33 Check before installing!
37 In order to compile this implementation, a Makefile is provided with
38 the ptmalloc2 distribution, which has pre-defined targets for some
39 popular systems (e.g. "make posix" for Posix threads). All that is
40 typically required with regard to compiler flags is the selection of
41 the thread package via defining one out of USE_PTHREADS, USE_THR or
42 USE_SPROC. Check the thread-m.h file for what effects this has.
43 Many/most systems will additionally require USE_TSD_DATA_HACK to be
44 defined, so this is the default for "make posix".
46 * Why use this malloc?
48 This is not the fastest, most space-conserving, most portable, or
49 most tunable malloc ever written. However it is among the fastest
50 while also being among the most space-conserving, portable and tunable.
51 Consistent balance across these factors results in a good general-purpose
52 allocator for malloc-intensive programs.
54 The main properties of the algorithms are:
55 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
56 with ties normally decided via FIFO (i.e. least recently used).
57 * For small (<= 64 bytes by default) requests, it is a caching
58 allocator, that maintains pools of quickly recycled chunks.
59 * In between, and for combinations of large and small requests, it does
60 the best it can trying to meet both goals at once.
61 * For very large requests (>= 128KB by default), it relies on system
62 memory mapping facilities, if supported.
64 For a longer but slightly out of date high-level description, see
65 http://gee.cs.oswego.edu/dl/html/malloc.html
67 You may already by default be using a C library containing a malloc
68 that is based on some version of this malloc (for example in
69 linux). You might still want to use the one in this file in order to
70 customize settings or to avoid overheads associated with library
73 * Contents, described in more detail in "description of public routines" below.
75 Standard (ANSI/SVID/...) functions:
77 calloc(size_t n_elements, size_t element_size);
79 realloc(Void_t* p, size_t n);
80 memalign(size_t alignment, size_t n);
83 mallopt(int parameter_number, int parameter_value)
86 independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
87 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
90 malloc_trim(size_t pad);
91 malloc_usable_size(Void_t* p);
96 Supported pointer representation: 4 or 8 bytes
97 Supported size_t representation: 4 or 8 bytes
98 Note that size_t is allowed to be 4 bytes even if pointers are 8.
99 You can adjust this by defining INTERNAL_SIZE_T
101 Alignment: 2 * sizeof(size_t) (default)
102 (i.e., 8 byte alignment with 4byte size_t). This suffices for
103 nearly all current machines and C compilers. However, you can
104 define MALLOC_ALIGNMENT to be wider than this if necessary.
106 Minimum overhead per allocated chunk: 4 or 8 bytes
107 Each malloced chunk has a hidden word of overhead holding size
108 and status information.
110 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
111 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
113 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
114 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
115 needed; 4 (8) for a trailing size field and 8 (16) bytes for
116 free list pointers. Thus, the minimum allocatable size is
119 Even a request for zero bytes (i.e., malloc(0)) returns a
120 pointer to something of the minimum allocatable size.
122 The maximum overhead wastage (i.e., number of extra bytes
123 allocated than were requested in malloc) is less than or equal
124 to the minimum size, except for requests >= mmap_threshold that
125 are serviced via mmap(), where the worst case wastage is 2 *
126 sizeof(size_t) bytes plus the remainder from a system page (the
127 minimal mmap unit); typically 4096 or 8192 bytes.
129 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
130 8-byte size_t: 2^64 minus about two pages
132 It is assumed that (possibly signed) size_t values suffice to
133 represent chunk sizes. `Possibly signed' is due to the fact
134 that `size_t' may be defined on a system as either a signed or
135 an unsigned type. The ISO C standard says that it must be
136 unsigned, but a few systems are known not to adhere to this.
137 Additionally, even when size_t is unsigned, sbrk (which is by
138 default used to obtain memory from system) accepts signed
139 arguments, and may not be able to handle size_t-wide arguments
140 with negative sign bit. Generally, values that would
141 appear as negative after accounting for overhead and alignment
142 are supported only via mmap(), which does not have this
145 Requests for sizes outside the allowed range will perform an optional
146 failure action and then return null. (Requests may also
147 also fail because a system is out of memory.)
149 Thread-safety: thread-safe unless NO_THREADS is defined
151 Compliance: I believe it is compliant with the 1997 Single Unix Specification
152 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
155 * Synopsis of compile-time options:
157 People have reported using previous versions of this malloc on all
158 versions of Unix, sometimes by tweaking some of the defines
159 below. It has been tested most extensively on Solaris and
160 Linux. It is also reported to work on WIN32 platforms.
161 People also report using it in stand-alone embedded systems.
163 The implementation is in straight, hand-tuned ANSI C. It is not
164 at all modular. (Sorry!) It uses a lot of macros. To be at all
165 usable, this code should be compiled using an optimizing compiler
166 (for example gcc -O3) that can simplify expressions and control
167 paths. (FAQ: some macros import variables as arguments rather than
168 declare locals because people reported that some debuggers
169 otherwise get confused.)
173 Compilation Environment options:
175 __STD_C derived from C compiler defines
178 USE_MEMCPY 1 if HAVE_MEMCPY is defined
179 HAVE_MMAP defined as 1
181 HAVE_MREMAP 0 unless linux defined
182 USE_ARENAS the same as HAVE_MMAP
183 malloc_getpagesize derived from system #includes, or 4096 if not
184 HAVE_USR_INCLUDE_MALLOC_H NOT defined
185 LACKS_UNISTD_H NOT defined unless WIN32
186 LACKS_SYS_PARAM_H NOT defined unless WIN32
187 LACKS_SYS_MMAN_H NOT defined unless WIN32
189 Changing default word sizes:
191 INTERNAL_SIZE_T size_t
192 MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T)
194 Configuration and functionality options:
196 USE_DL_PREFIX NOT defined
197 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
198 USE_MALLOC_LOCK NOT defined
199 MALLOC_DEBUG NOT defined
200 REALLOC_ZERO_BYTES_FREES 1
201 MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
204 Options for customizing MORECORE:
208 MORECORE_CONTIGUOUS 1
209 MORECORE_CANNOT_TRIM NOT defined
211 MMAP_AS_MORECORE_SIZE (1024 * 1024)
213 Tuning options that are also dynamically changeable via mallopt:
216 DEFAULT_TRIM_THRESHOLD 128 * 1024
218 DEFAULT_MMAP_THRESHOLD 128 * 1024
219 DEFAULT_MMAP_MAX 65536
221 There are several other #defined constants and macros that you
222 probably don't want to touch unless you are extending or adapting malloc. */
225 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
226 compiler, or a C compiler sufficiently close to ANSI to get away
231 #if defined(__STDC__) || defined(__cplusplus)
240 Void_t* is the pointer type that malloc should say it returns
244 #if (__STD_C || defined(WIN32))
252 #include <stddef.h> /* for size_t */
253 #include <stdlib.h> /* for getenv(), abort() */
255 #include <sys/types.h>
262 /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
264 /* #define LACKS_UNISTD_H */
266 #ifndef LACKS_UNISTD_H
270 /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
272 /* #define LACKS_SYS_PARAM_H */
275 #include <stdio.h> /* needed for malloc_stats */
276 #include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
282 Because freed chunks may be overwritten with bookkeeping fields, this
283 malloc will often die when freed memory is overwritten by user
284 programs. This can be very effective (albeit in an annoying way)
285 in helping track down dangling pointers.
287 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
288 enabled that will catch more memory errors. You probably won't be
289 able to make much sense of the actual assertion errors, but they
290 should help you locate incorrectly overwritten memory. The checking
291 is fairly extensive, and will slow down execution
292 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
293 will attempt to check every non-mmapped allocated and free chunk in
294 the course of computing the summmaries. (By nature, mmapped regions
295 cannot be checked very much automatically.)
297 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
298 this code. The assertions in the check routines spell out in more
299 detail the assumptions and invariants underlying the algorithms.
301 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
302 checking that all accesses to malloced memory stay within their
303 bounds. However, there are several add-ons and adaptations of this
304 or other mallocs available that do this.
310 #define assert(x) ((void)0)
315 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
318 The default version is the same as size_t.
320 While not strictly necessary, it is best to define this as an
321 unsigned type, even if size_t is a signed type. This may avoid some
322 artificial size limitations on some systems.
324 On a 64-bit machine, you may be able to reduce malloc overhead by
325 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
326 expense of not being able to handle more than 2^32 of malloced
327 space. If this limitation is acceptable, you are encouraged to set
328 this unless you are on a platform requiring 16byte alignments. In
329 this case the alignment requirements turn out to negate any
330 potential advantages of decreasing size_t word size.
332 Implementors: Beware of the possible combinations of:
333 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
334 and might be the same width as int or as long
335 - size_t might have different width and signedness as INTERNAL_SIZE_T
336 - int and long might be 32 or 64 bits, and might be the same width
337 To deal with this, most comparisons and difference computations
338 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
339 aware of the fact that casting an unsigned int to a wider long does
340 not sign-extend. (This also makes checking for negative numbers
341 awkward.) Some of these casts result in harmless compiler warnings
345 #ifndef INTERNAL_SIZE_T
346 #define INTERNAL_SIZE_T size_t
349 /* The corresponding word size */
350 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
354 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
355 It must be a power of two at least 2 * SIZE_SZ, even on machines
356 for which smaller alignments would suffice. It may be defined as
357 larger than this though. Note however that code and data structures
358 are optimized for the case of 8-byte alignment.
362 #ifndef MALLOC_ALIGNMENT
363 #define MALLOC_ALIGNMENT (2 * SIZE_SZ)
366 /* The corresponding bit mask value */
367 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
372 REALLOC_ZERO_BYTES_FREES should be set if a call to
373 realloc with zero bytes should be the same as a call to free.
374 This is required by the C standard. Otherwise, since this malloc
375 returns a unique pointer for malloc(0), so does realloc(p, 0).
378 #ifndef REALLOC_ZERO_BYTES_FREES
379 #define REALLOC_ZERO_BYTES_FREES 1
383 TRIM_FASTBINS controls whether free() of a very small chunk can
384 immediately lead to trimming. Setting to true (1) can reduce memory
385 footprint, but will almost always slow down programs that use a lot
388 Define this only if you are willing to give up some speed to more
389 aggressively reduce system-level memory footprint when releasing
390 memory in programs that use many small chunks. You can get
391 essentially the same effect by setting MXFAST to 0, but this can
392 lead to even greater slowdowns in programs using many small chunks.
393 TRIM_FASTBINS is an in-between compile-time option, that disables
394 only those chunks bordering topmost memory from being placed in
398 #ifndef TRIM_FASTBINS
399 #define TRIM_FASTBINS 0
404 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
405 This is necessary when you only want to use this malloc in one part
406 of a program, using your regular system malloc elsewhere.
409 /* #define USE_DL_PREFIX */
413 Two-phase name translation.
414 All of the actual routines are given mangled names.
415 When wrappers are used, they become the public callable versions.
416 When DL_PREFIX is used, the callable names are prefixed.
420 #define public_cALLOc dlcalloc
421 #define public_fREe dlfree
422 #define public_cFREe dlcfree
423 #define public_mALLOc dlmalloc
424 #define public_mEMALIGn dlmemalign
425 #define public_rEALLOc dlrealloc
426 #define public_vALLOc dlvalloc
427 #define public_pVALLOc dlpvalloc
428 #define public_mALLINFo dlmallinfo
429 #define public_mALLOPt dlmallopt
430 #define public_mTRIm dlmalloc_trim
431 #define public_mSTATs dlmalloc_stats
432 #define public_mUSABLe dlmalloc_usable_size
433 #define public_iCALLOc dlindependent_calloc
434 #define public_iCOMALLOc dlindependent_comalloc
435 #define public_gET_STATe dlget_state
436 #define public_sET_STATe dlset_state
437 #else /* USE_DL_PREFIX */
440 /* Special defines for the GNU C library. */
441 #define public_cALLOc __libc_calloc
442 #define public_fREe __libc_free
443 #define public_cFREe __libc_cfree
444 #define public_mALLOc __libc_malloc
445 #define public_mEMALIGn __libc_memalign
446 #define public_rEALLOc __libc_realloc
447 #define public_vALLOc __libc_valloc
448 #define public_pVALLOc __libc_pvalloc
449 #define public_mALLINFo __libc_mallinfo
450 #define public_mALLOPt __libc_mallopt
451 #define public_mTRIm __malloc_trim
452 #define public_mSTATs __malloc_stats
453 #define public_mUSABLe __malloc_usable_size
454 #define public_iCALLOc __libc_independent_calloc
455 #define public_iCOMALLOc __libc_independent_comalloc
456 #define public_gET_STATe __malloc_get_state
457 #define public_sET_STATe __malloc_set_state
458 #define malloc_getpagesize __getpagesize()
461 #define munmap __munmap
462 #define mremap __mremap
463 #define mprotect __mprotect
464 #define MORECORE (*__morecore)
465 #define MORECORE_FAILURE 0
467 Void_t * __default_morecore (ptrdiff_t);
468 Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;
471 #define public_cALLOc calloc
472 #define public_fREe free
473 #define public_cFREe cfree
474 #define public_mALLOc malloc
475 #define public_mEMALIGn memalign
476 #define public_rEALLOc realloc
477 #define public_vALLOc valloc
478 #define public_pVALLOc pvalloc
479 #define public_mALLINFo mallinfo
480 #define public_mALLOPt mallopt
481 #define public_mTRIm malloc_trim
482 #define public_mSTATs malloc_stats
483 #define public_mUSABLe malloc_usable_size
484 #define public_iCALLOc independent_calloc
485 #define public_iCOMALLOc independent_comalloc
486 #define public_gET_STATe malloc_get_state
487 #define public_sET_STATe malloc_set_state
489 #endif /* USE_DL_PREFIX */
493 HAVE_MEMCPY should be defined if you are not otherwise using
494 ANSI STD C, but still have memcpy and memset in your C library
495 and want to use them in calloc and realloc. Otherwise simple
496 macro versions are defined below.
498 USE_MEMCPY should be defined as 1 if you actually want to
499 have memset and memcpy called. People report that the macro
500 versions are faster than libc versions on some systems.
502 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
503 (of <= 36 bytes) are manually unrolled in realloc and calloc.
517 #if (__STD_C || defined(HAVE_MEMCPY))
520 /* On Win32 memset and memcpy are already declared in windows.h */
523 void* memset(void*, int, size_t);
524 void* memcpy(void*, const void*, size_t);
533 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
534 malloc fails to be able to return memory, either because memory is
535 exhausted or because of illegal arguments.
537 By default, sets errno if running on STD_C platform, else does nothing.
540 #ifndef MALLOC_FAILURE_ACTION
542 #define MALLOC_FAILURE_ACTION \
546 #define MALLOC_FAILURE_ACTION
551 MORECORE-related declarations. By default, rely on sbrk
555 #ifdef LACKS_UNISTD_H
556 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
558 extern Void_t* sbrk(ptrdiff_t);
560 extern Void_t* sbrk();
566 MORECORE is the name of the routine to call to obtain more memory
567 from the system. See below for general guidance on writing
568 alternative MORECORE functions, as well as a version for WIN32 and a
569 sample version for pre-OSX macos.
573 #define MORECORE sbrk
577 MORECORE_FAILURE is the value returned upon failure of MORECORE
578 as well as mmap. Since it cannot be an otherwise valid memory address,
579 and must reflect values of standard sys calls, you probably ought not
583 #ifndef MORECORE_FAILURE
584 #define MORECORE_FAILURE (-1)
588 If MORECORE_CONTIGUOUS is true, take advantage of fact that
589 consecutive calls to MORECORE with positive arguments always return
590 contiguous increasing addresses. This is true of unix sbrk. Even
591 if not defined, when regions happen to be contiguous, malloc will
592 permit allocations spanning regions obtained from different
593 calls. But defining this when applicable enables some stronger
594 consistency checks and space efficiencies.
597 #ifndef MORECORE_CONTIGUOUS
598 #define MORECORE_CONTIGUOUS 1
602 Define MORECORE_CANNOT_TRIM if your version of MORECORE
603 cannot release space back to the system when given negative
604 arguments. This is generally necessary only if you are using
605 a hand-crafted MORECORE function that cannot handle negative arguments.
608 /* #define MORECORE_CANNOT_TRIM */
610 /* MORECORE_CLEARS (default 1)
611 The degree to which the routine mapped to MORECORE zeroes out
612 memory: never (0), only for newly allocated space (1) or always
613 (2). The distinction between (1) and (2) is necessary because on
614 some systems, if the application first decrements and then
615 increments the break value, the contents of the reallocated space
619 #ifndef MORECORE_CLEARS
620 #define MORECORE_CLEARS 1
625 Define HAVE_MMAP as true to optionally make malloc() use mmap() to
626 allocate very large blocks. These will be returned to the
627 operating system immediately after a free(). Also, if mmap
628 is available, it is used as a backup strategy in cases where
629 MORECORE fails to provide space from system.
631 This malloc is best tuned to work with mmap for large requests.
632 If you do not have mmap, operations involving very large chunks (1MB
633 or so) may be slower than you'd like.
640 Standard unix mmap using /dev/zero clears memory so calloc doesn't
645 #define MMAP_CLEARS 1
650 #define MMAP_CLEARS 0
656 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
657 sbrk fails, and mmap is used as a backup (which is done only if
658 HAVE_MMAP). The value must be a multiple of page size. This
659 backup strategy generally applies only when systems have "holes" in
660 address space, so sbrk cannot perform contiguous expansion, but
661 there is still space available on system. On systems for which
662 this is known to be useful (i.e. most linux kernels), this occurs
663 only when programs allocate huge amounts of memory. Between this,
664 and the fact that mmap regions tend to be limited, the size should
665 be large, to avoid too many mmap calls and thus avoid running out
669 #ifndef MMAP_AS_MORECORE_SIZE
670 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
674 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
675 large blocks. This is currently only possible on Linux with
676 kernel versions newer than 1.3.77.
681 #define HAVE_MREMAP 1
683 #define HAVE_MREMAP 0
686 #endif /* HAVE_MMAP */
688 /* Define USE_ARENAS to enable support for multiple `arenas'. These
689 are allocated using mmap(), are necessary for threads and
690 occasionally useful to overcome address space limitations affecting
694 #define USE_ARENAS HAVE_MMAP
699 The system page size. To the extent possible, this malloc manages
700 memory from the system in page-size units. Note that this value is
701 cached during initialization into a field of malloc_state. So even
702 if malloc_getpagesize is a function, it is only called once.
704 The following mechanics for getpagesize were adapted from bsd/gnu
705 getpagesize.h. If none of the system-probes here apply, a value of
706 4096 is used, which should be OK: If they don't apply, then using
707 the actual value probably doesn't impact performance.
711 #ifndef malloc_getpagesize
713 #ifndef LACKS_UNISTD_H
717 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
718 # ifndef _SC_PAGE_SIZE
719 # define _SC_PAGE_SIZE _SC_PAGESIZE
723 # ifdef _SC_PAGE_SIZE
724 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
726 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
727 extern size_t getpagesize();
728 # define malloc_getpagesize getpagesize()
730 # ifdef WIN32 /* use supplied emulation of getpagesize */
731 # define malloc_getpagesize getpagesize()
733 # ifndef LACKS_SYS_PARAM_H
734 # include <sys/param.h>
736 # ifdef EXEC_PAGESIZE
737 # define malloc_getpagesize EXEC_PAGESIZE
741 # define malloc_getpagesize NBPG
743 # define malloc_getpagesize (NBPG * CLSIZE)
747 # define malloc_getpagesize NBPC
750 # define malloc_getpagesize PAGESIZE
751 # else /* just guess */
752 # define malloc_getpagesize (4096)
763 This version of malloc supports the standard SVID/XPG mallinfo
764 routine that returns a struct containing usage properties and
765 statistics. It should work on any SVID/XPG compliant system that has
766 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
767 install such a thing yourself, cut out the preliminary declarations
768 as described above and below and save them in a malloc.h file. But
769 there's no compelling reason to bother to do this.)
771 The main declaration needed is the mallinfo struct that is returned
772 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
773 bunch of fields that are not even meaningful in this version of
774 malloc. These fields are are instead filled by mallinfo() with
775 other numbers that might be of interest.
777 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
778 /usr/include/malloc.h file that includes a declaration of struct
779 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
780 version is declared below. These must be precisely the same for
781 mallinfo() to work. The original SVID version of this struct,
782 defined on most systems with mallinfo, declares all fields as
783 ints. But some others define as unsigned long. If your system
784 defines the fields using a type of different width than listed here,
785 you must #include your system version and #define
786 HAVE_USR_INCLUDE_MALLOC_H.
789 /* #define HAVE_USR_INCLUDE_MALLOC_H */
791 #ifdef HAVE_USR_INCLUDE_MALLOC_H
792 #include "/usr/include/malloc.h"
796 /* ---------- description of public routines ------------ */
800 Returns a pointer to a newly allocated chunk of at least n bytes, or null
801 if no space is available. Additionally, on failure, errno is
802 set to ENOMEM on ANSI C systems.
804 If n is zero, malloc returns a minumum-sized chunk. (The minimum
805 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
806 systems.) On most systems, size_t is an unsigned type, so calls
807 with negative arguments are interpreted as requests for huge amounts
808 of space, which will often fail. The maximum supported value of n
809 differs across systems, but is in all cases less than the maximum
810 representable value of a size_t.
813 Void_t* public_mALLOc(size_t);
815 Void_t* public_mALLOc();
820 Releases the chunk of memory pointed to by p, that had been previously
821 allocated using malloc or a related routine such as realloc.
822 It has no effect if p is null. It can have arbitrary (i.e., bad!)
823 effects if p has already been freed.
825 Unless disabled (using mallopt), freeing very large spaces will
826 when possible, automatically trigger operations that give
827 back unused memory to the system, thus reducing program footprint.
830 void public_fREe(Void_t*);
836 calloc(size_t n_elements, size_t element_size);
837 Returns a pointer to n_elements * element_size bytes, with all locations
841 Void_t* public_cALLOc(size_t, size_t);
843 Void_t* public_cALLOc();
847 realloc(Void_t* p, size_t n)
848 Returns a pointer to a chunk of size n that contains the same data
849 as does chunk p up to the minimum of (n, p's size) bytes, or null
850 if no space is available.
852 The returned pointer may or may not be the same as p. The algorithm
853 prefers extending p when possible, otherwise it employs the
854 equivalent of a malloc-copy-free sequence.
856 If p is null, realloc is equivalent to malloc.
858 If space is not available, realloc returns null, errno is set (if on
859 ANSI) and p is NOT freed.
861 if n is for fewer bytes than already held by p, the newly unused
862 space is lopped off and freed if possible. Unless the #define
863 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
864 zero (re)allocates a minimum-sized chunk.
866 Large chunks that were internally obtained via mmap will always
867 be reallocated using malloc-copy-free sequences unless
868 the system supports MREMAP (currently only linux).
870 The old unix realloc convention of allowing the last-free'd chunk
871 to be used as an argument to realloc is not supported.
874 Void_t* public_rEALLOc(Void_t*, size_t);
876 Void_t* public_rEALLOc();
880 memalign(size_t alignment, size_t n);
881 Returns a pointer to a newly allocated chunk of n bytes, aligned
882 in accord with the alignment argument.
884 The alignment argument should be a power of two. If the argument is
885 not a power of two, the nearest greater power is used.
886 8-byte alignment is guaranteed by normal malloc calls, so don't
887 bother calling memalign with an argument of 8 or less.
889 Overreliance on memalign is a sure way to fragment space.
892 Void_t* public_mEMALIGn(size_t, size_t);
894 Void_t* public_mEMALIGn();
899 Equivalent to memalign(pagesize, n), where pagesize is the page
900 size of the system. If the pagesize is unknown, 4096 is used.
903 Void_t* public_vALLOc(size_t);
905 Void_t* public_vALLOc();
911 mallopt(int parameter_number, int parameter_value)
912 Sets tunable parameters The format is to provide a
913 (parameter-number, parameter-value) pair. mallopt then sets the
914 corresponding parameter to the argument value if it can (i.e., so
915 long as the value is meaningful), and returns 1 if successful else
916 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
917 normally defined in malloc.h. Only one of these (M_MXFAST) is used
918 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
919 so setting them has no effect. But this malloc also supports four
920 other options in mallopt. See below for details. Briefly, supported
921 parameters are as follows (listed defaults are for "typical"
924 Symbol param # default allowed param values
925 M_MXFAST 1 64 0-80 (0 disables fastbins)
926 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
928 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
929 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
932 int public_mALLOPt(int, int);
934 int public_mALLOPt();
940 Returns (by copy) a struct containing various summary statistics:
942 arena: current total non-mmapped bytes allocated from system
943 ordblks: the number of free chunks
944 smblks: the number of fastbin blocks (i.e., small chunks that
945 have been freed but not use resused or consolidated)
946 hblks: current number of mmapped regions
947 hblkhd: total bytes held in mmapped regions
948 usmblks: the maximum total allocated space. This will be greater
949 than current total if trimming has occurred.
950 fsmblks: total bytes held in fastbin blocks
951 uordblks: current total allocated space (normal or mmapped)
952 fordblks: total free space
953 keepcost: the maximum number of bytes that could ideally be released
954 back to system via malloc_trim. ("ideally" means that
955 it ignores page restrictions etc.)
957 Because these fields are ints, but internal bookkeeping may
958 be kept as longs, the reported values may wrap around zero and
962 struct mallinfo public_mALLINFo(void);
964 struct mallinfo public_mALLINFo();
968 independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
970 independent_calloc is similar to calloc, but instead of returning a
971 single cleared space, it returns an array of pointers to n_elements
972 independent elements that can hold contents of size elem_size, each
973 of which starts out cleared, and can be independently freed,
974 realloc'ed etc. The elements are guaranteed to be adjacently
975 allocated (this is not guaranteed to occur with multiple callocs or
976 mallocs), which may also improve cache locality in some
979 The "chunks" argument is optional (i.e., may be null, which is
980 probably the most typical usage). If it is null, the returned array
981 is itself dynamically allocated and should also be freed when it is
982 no longer needed. Otherwise, the chunks array must be of at least
983 n_elements in length. It is filled in with the pointers to the
986 In either case, independent_calloc returns this pointer array, or
987 null if the allocation failed. If n_elements is zero and "chunks"
988 is null, it returns a chunk representing an array with zero elements
989 (which should be freed if not wanted).
991 Each element must be individually freed when it is no longer
992 needed. If you'd like to instead be able to free all at once, you
993 should instead use regular calloc and assign pointers into this
994 space to represent elements. (In this case though, you cannot
995 independently free elements.)
997 independent_calloc simplifies and speeds up implementations of many
998 kinds of pools. It may also be useful when constructing large data
999 structures that initially have a fixed number of fixed-sized nodes,
1000 but the number is not known at compile time, and some of the nodes
1001 may later need to be freed. For example:
1003 struct Node { int item; struct Node* next; };
1005 struct Node* build_list() {
1007 int n = read_number_of_nodes_needed();
1008 if (n <= 0) return 0;
1009 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1010 if (pool == 0) die();
1011 // organize into a linked list...
1012 struct Node* first = pool[0];
1013 for (i = 0; i < n-1; ++i)
1014 pool[i]->next = pool[i+1];
1015 free(pool); // Can now free the array (or not, if it is needed later)
1020 Void_t** public_iCALLOc(size_t, size_t, Void_t**);
1022 Void_t** public_iCALLOc();
1026 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1028 independent_comalloc allocates, all at once, a set of n_elements
1029 chunks with sizes indicated in the "sizes" array. It returns
1030 an array of pointers to these elements, each of which can be
1031 independently freed, realloc'ed etc. The elements are guaranteed to
1032 be adjacently allocated (this is not guaranteed to occur with
1033 multiple callocs or mallocs), which may also improve cache locality
1034 in some applications.
1036 The "chunks" argument is optional (i.e., may be null). If it is null
1037 the returned array is itself dynamically allocated and should also
1038 be freed when it is no longer needed. Otherwise, the chunks array
1039 must be of at least n_elements in length. It is filled in with the
1040 pointers to the chunks.
1042 In either case, independent_comalloc returns this pointer array, or
1043 null if the allocation failed. If n_elements is zero and chunks is
1044 null, it returns a chunk representing an array with zero elements
1045 (which should be freed if not wanted).
1047 Each element must be individually freed when it is no longer
1048 needed. If you'd like to instead be able to free all at once, you
1049 should instead use a single regular malloc, and assign pointers at
1050 particular offsets in the aggregate space. (In this case though, you
1051 cannot independently free elements.)
1053 independent_comallac differs from independent_calloc in that each
1054 element may have a different size, and also that it does not
1055 automatically clear elements.
1057 independent_comalloc can be used to speed up allocation in cases
1058 where several structs or objects must always be allocated at the
1059 same time. For example:
1064 void send_message(char* msg) {
1065 int msglen = strlen(msg);
1066 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1068 if (independent_comalloc(3, sizes, chunks) == 0)
1070 struct Head* head = (struct Head*)(chunks[0]);
1071 char* body = (char*)(chunks[1]);
1072 struct Foot* foot = (struct Foot*)(chunks[2]);
1076 In general though, independent_comalloc is worth using only for
1077 larger values of n_elements. For small values, you probably won't
1078 detect enough difference from series of malloc calls to bother.
1080 Overuse of independent_comalloc can increase overall memory usage,
1081 since it cannot reuse existing noncontiguous small chunks that
1082 might be available for some of the elements.
1085 Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
1087 Void_t** public_iCOMALLOc();
1093 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1094 round up n to nearest pagesize.
1097 Void_t* public_pVALLOc(size_t);
1099 Void_t* public_pVALLOc();
1104 Equivalent to free(p).
1106 cfree is needed/defined on some systems that pair it with calloc,
1107 for odd historical reasons (such as: cfree is used in example
1108 code in the first edition of K&R).
1111 void public_cFREe(Void_t*);
1113 void public_cFREe();
1117 malloc_trim(size_t pad);
1119 If possible, gives memory back to the system (via negative
1120 arguments to sbrk) if there is unused memory at the `high' end of
1121 the malloc pool. You can call this after freeing large blocks of
1122 memory to potentially reduce the system-level memory requirements
1123 of a program. However, it cannot guarantee to reduce memory. Under
1124 some allocation patterns, some large free blocks of memory will be
1125 locked between two used chunks, so they cannot be given back to
1128 The `pad' argument to malloc_trim represents the amount of free
1129 trailing space to leave untrimmed. If this argument is zero,
1130 only the minimum amount of memory to maintain internal data
1131 structures will be left (one page or less). Non-zero arguments
1132 can be supplied to maintain enough trailing space to service
1133 future expected allocations without having to re-obtain memory
1136 Malloc_trim returns 1 if it actually released any memory, else 0.
1137 On systems that do not support "negative sbrks", it will always
1141 int public_mTRIm(size_t);
1147 malloc_usable_size(Void_t* p);
1149 Returns the number of bytes you can actually use in
1150 an allocated chunk, which may be more than you requested (although
1151 often not) due to alignment and minimum size constraints.
1152 You can use this many bytes without worrying about
1153 overwriting other allocated objects. This is not a particularly great
1154 programming practice. malloc_usable_size can be more useful in
1155 debugging and assertions, for example:
1158 assert(malloc_usable_size(p) >= 256);
1162 size_t public_mUSABLe(Void_t*);
1164 size_t public_mUSABLe();
1169 Prints on stderr the amount of space obtained from the system (both
1170 via sbrk and mmap), the maximum amount (which may be more than
1171 current if malloc_trim and/or munmap got called), and the current
1172 number of bytes allocated via malloc (or realloc, etc) but not yet
1173 freed. Note that this is the number of bytes allocated, not the
1174 number requested. It will be larger than the number requested
1175 because of alignment and bookkeeping overhead. Because it includes
1176 alignment wastage as being in use, this figure may be greater than
1177 zero even when no user-level chunks are allocated.
1179 The reported current and maximum system memory can be inaccurate if
1180 a program makes other calls to system memory allocation functions
1181 (normally sbrk) outside of malloc.
1183 malloc_stats prints only the most commonly interesting statistics.
1184 More information can be obtained by calling mallinfo.
1188 void public_mSTATs(void);
1190 void public_mSTATs();
1194 malloc_get_state(void);
1196 Returns the state of all malloc variables in an opaque data
1200 Void_t* public_gET_STATe(void);
1202 Void_t* public_gET_STATe();
1206 malloc_set_state(Void_t* state);
1208 Restore the state of all malloc variables from data obtained with
1212 int public_sET_STATe(Void_t*);
1214 int public_sET_STATe();
1219 posix_memalign(void **memptr, size_t alignment, size_t size);
1221 POSIX wrapper like memalign(), checking for validity of size.
1223 int __posix_memalign(void **, size_t, size_t);
1226 /* mallopt tuning options */
1229 M_MXFAST is the maximum request size used for "fastbins", special bins
1230 that hold returned chunks without consolidating their spaces. This
1231 enables future requests for chunks of the same size to be handled
1232 very quickly, but can increase fragmentation, and thus increase the
1233 overall memory footprint of a program.
1235 This malloc manages fastbins very conservatively yet still
1236 efficiently, so fragmentation is rarely a problem for values less
1237 than or equal to the default. The maximum supported value of MXFAST
1238 is 80. You wouldn't want it any higher than this anyway. Fastbins
1239 are designed especially for use with many small structs, objects or
1240 strings -- the default handles structs/objects/arrays with sizes up
1241 to 8 4byte fields, or small strings representing words, tokens,
1242 etc. Using fastbins for larger objects normally worsens
1243 fragmentation without improving speed.
1245 M_MXFAST is set in REQUEST size units. It is internally used in
1246 chunksize units, which adds padding and alignment. You can reduce
1247 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1248 algorithm to be a closer approximation of fifo-best-fit in all cases,
1249 not just for larger requests, but will generally cause it to be
1254 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1259 #ifndef DEFAULT_MXFAST
1260 #define DEFAULT_MXFAST 64
1265 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1266 to keep before releasing via malloc_trim in free().
1268 Automatic trimming is mainly useful in long-lived programs.
1269 Because trimming via sbrk can be slow on some systems, and can
1270 sometimes be wasteful (in cases where programs immediately
1271 afterward allocate more large chunks) the value should be high
1272 enough so that your overall system performance would improve by
1273 releasing this much memory.
1275 The trim threshold and the mmap control parameters (see below)
1276 can be traded off with one another. Trimming and mmapping are
1277 two different ways of releasing unused memory back to the
1278 system. Between these two, it is often possible to keep
1279 system-level demands of a long-lived program down to a bare
1280 minimum. For example, in one test suite of sessions measuring
1281 the XF86 X server on Linux, using a trim threshold of 128K and a
1282 mmap threshold of 192K led to near-minimal long term resource
1285 If you are using this malloc in a long-lived program, it should
1286 pay to experiment with these values. As a rough guide, you
1287 might set to a value close to the average size of a process
1288 (program) running on your system. Releasing this much memory
1289 would allow such a process to run in memory. Generally, it's
1290 worth it to tune for trimming rather tham memory mapping when a
1291 program undergoes phases where several large chunks are
1292 allocated and released in ways that can reuse each other's
1293 storage, perhaps mixed with phases where there are no such
1294 chunks at all. And in well-behaved long-lived programs,
1295 controlling release of large blocks via trimming versus mapping
1298 However, in most programs, these parameters serve mainly as
1299 protection against the system-level effects of carrying around
1300 massive amounts of unneeded memory. Since frequent calls to
1301 sbrk, mmap, and munmap otherwise degrade performance, the default
1302 parameters are set to relatively high values that serve only as
1305 The trim value It must be greater than page size to have any useful
1306 effect. To disable trimming completely, you can set to
1309 Trim settings interact with fastbin (MXFAST) settings: Unless
1310 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1311 freeing a chunk with size less than or equal to MXFAST. Trimming is
1312 instead delayed until subsequent freeing of larger chunks. However,
1313 you can still force an attempted trim by calling malloc_trim.
1315 Also, trimming is not generally possible in cases where
1316 the main arena is obtained via mmap.
1318 Note that the trick some people use of mallocing a huge space and
1319 then freeing it at program startup, in an attempt to reserve system
1320 memory, doesn't have the intended effect under automatic trimming,
1321 since that memory will immediately be returned to the system.
1324 #define M_TRIM_THRESHOLD -1
1326 #ifndef DEFAULT_TRIM_THRESHOLD
1327 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
1331 M_TOP_PAD is the amount of extra `padding' space to allocate or
1332 retain whenever sbrk is called. It is used in two ways internally:
1334 * When sbrk is called to extend the top of the arena to satisfy
1335 a new malloc request, this much padding is added to the sbrk
1338 * When malloc_trim is called automatically from free(),
1339 it is used as the `pad' argument.
1341 In both cases, the actual amount of padding is rounded
1342 so that the end of the arena is always a system page boundary.
1344 The main reason for using padding is to avoid calling sbrk so
1345 often. Having even a small pad greatly reduces the likelihood
1346 that nearly every malloc request during program start-up (or
1347 after trimming) will invoke sbrk, which needlessly wastes
1350 Automatic rounding-up to page-size units is normally sufficient
1351 to avoid measurable overhead, so the default is 0. However, in
1352 systems where sbrk is relatively slow, it can pay to increase
1353 this value, at the expense of carrying around more memory than
1357 #define M_TOP_PAD -2
1359 #ifndef DEFAULT_TOP_PAD
1360 #define DEFAULT_TOP_PAD (0)
1364 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1365 to service a request. Requests of at least this size that cannot
1366 be allocated using already-existing space will be serviced via mmap.
1367 (If enough normal freed space already exists it is used instead.)
1369 Using mmap segregates relatively large chunks of memory so that
1370 they can be individually obtained and released from the host
1371 system. A request serviced through mmap is never reused by any
1372 other request (at least not directly; the system may just so
1373 happen to remap successive requests to the same locations).
1375 Segregating space in this way has the benefits that:
1377 1. Mmapped space can ALWAYS be individually released back
1378 to the system, which helps keep the system level memory
1379 demands of a long-lived program low.
1380 2. Mapped memory can never become `locked' between
1381 other chunks, as can happen with normally allocated chunks, which
1382 means that even trimming via malloc_trim would not release them.
1383 3. On some systems with "holes" in address spaces, mmap can obtain
1384 memory that sbrk cannot.
1386 However, it has the disadvantages that:
1388 1. The space cannot be reclaimed, consolidated, and then
1389 used to service later requests, as happens with normal chunks.
1390 2. It can lead to more wastage because of mmap page alignment
1392 3. It causes malloc performance to be more dependent on host
1393 system memory management support routines which may vary in
1394 implementation quality and may impose arbitrary
1395 limitations. Generally, servicing a request via normal
1396 malloc steps is faster than going through a system's mmap.
1398 The advantages of mmap nearly always outweigh disadvantages for
1399 "large" chunks, but the value of "large" varies across systems. The
1400 default is an empirically derived value that works well in most
1404 #define M_MMAP_THRESHOLD -3
1406 #ifndef DEFAULT_MMAP_THRESHOLD
1407 #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
1411 M_MMAP_MAX is the maximum number of requests to simultaneously
1412 service using mmap. This parameter exists because
1413 some systems have a limited number of internal tables for
1414 use by mmap, and using more than a few of them may degrade
1417 The default is set to a value that serves only as a safeguard.
1418 Setting to 0 disables use of mmap for servicing large requests. If
1419 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1420 to non-zero values in mallopt will fail.
1423 #define M_MMAP_MAX -4
1425 #ifndef DEFAULT_MMAP_MAX
1427 #define DEFAULT_MMAP_MAX (65536)
1429 #define DEFAULT_MMAP_MAX (0)
1434 }; /* end of extern "C" */
1438 #include "thread-m.h"
1441 #define BOUNDED_N(ptr, sz) (ptr)
1443 #ifndef RETURN_ADDRESS
1444 #define RETURN_ADDRESS(X_) (NULL)
1447 /* On some platforms we can compile internal, not exported functions better.
1448 Let the environment provide a macro and define it to be empty if it
1449 is not available. */
1450 #ifndef internal_function
1451 # define internal_function
1454 /* Forward declarations. */
1455 struct malloc_chunk;
1456 typedef struct malloc_chunk* mchunkptr;
1458 /* Internal routines. */
1462 Void_t* _int_malloc(mstate, size_t);
1463 void _int_free(mstate, Void_t*);
1464 Void_t* _int_realloc(mstate, Void_t*, size_t);
1465 Void_t* _int_memalign(mstate, size_t, size_t);
1466 Void_t* _int_valloc(mstate, size_t);
1467 static Void_t* _int_pvalloc(mstate, size_t);
1468 /*static Void_t* cALLOc(size_t, size_t);*/
1469 static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**);
1470 static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**);
1471 static int mTRIm(size_t);
1472 static size_t mUSABLe(Void_t*);
1473 static void mSTATs(void);
1474 static int mALLOPt(int, int);
1475 static struct mallinfo mALLINFo(mstate);
1477 static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);
1478 static int internal_function top_check(void);
1479 static void internal_function munmap_chunk(mchunkptr p);
1481 static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
1484 static Void_t* malloc_check(size_t sz, const Void_t *caller);
1485 static void free_check(Void_t* mem, const Void_t *caller);
1486 static Void_t* realloc_check(Void_t* oldmem, size_t bytes,
1487 const Void_t *caller);
1488 static Void_t* memalign_check(size_t alignment, size_t bytes,
1489 const Void_t *caller);
1491 static Void_t* malloc_starter(size_t sz, const Void_t *caller);
1492 static void free_starter(Void_t* mem, const Void_t *caller);
1493 static Void_t* malloc_atfork(size_t sz, const Void_t *caller);
1494 static void free_atfork(Void_t* mem, const Void_t *caller);
1499 Void_t* _int_malloc();
1501 Void_t* _int_realloc();
1502 Void_t* _int_memalign();
1503 Void_t* _int_valloc();
1504 Void_t* _int_pvalloc();
1505 /*static Void_t* cALLOc();*/
1506 static Void_t** _int_icalloc();
1507 static Void_t** _int_icomalloc();
1509 static size_t mUSABLe();
1510 static void mSTATs();
1511 static int mALLOPt();
1512 static struct mallinfo mALLINFo();
1519 /* ------------- Optional versions of memcopy ---------------- */
1525 Note: memcpy is ONLY invoked with non-overlapping regions,
1526 so the (usually slower) memmove is not needed.
1529 #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1530 #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1532 #else /* !USE_MEMCPY */
1534 /* Use Duff's device for good zeroing/copying performance. */
1536 #define MALLOC_ZERO(charp, nbytes) \
1538 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1539 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1541 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1543 case 0: for(;;) { *mzp++ = 0; \
1544 case 7: *mzp++ = 0; \
1545 case 6: *mzp++ = 0; \
1546 case 5: *mzp++ = 0; \
1547 case 4: *mzp++ = 0; \
1548 case 3: *mzp++ = 0; \
1549 case 2: *mzp++ = 0; \
1550 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1554 #define MALLOC_COPY(dest,src,nbytes) \
1556 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1557 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1558 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1560 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1562 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1563 case 7: *mcdst++ = *mcsrc++; \
1564 case 6: *mcdst++ = *mcsrc++; \
1565 case 5: *mcdst++ = *mcsrc++; \
1566 case 4: *mcdst++ = *mcsrc++; \
1567 case 3: *mcdst++ = *mcsrc++; \
1568 case 2: *mcdst++ = *mcsrc++; \
1569 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1575 /* ------------------ MMAP support ------------------ */
1581 #ifndef LACKS_SYS_MMAN_H
1582 #include <sys/mman.h>
1585 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1586 # define MAP_ANONYMOUS MAP_ANON
1588 #if !defined(MAP_FAILED)
1589 # define MAP_FAILED ((char*)-1)
1592 #ifndef MAP_NORESERVE
1593 # ifdef MAP_AUTORESRV
1594 # define MAP_NORESERVE MAP_AUTORESRV
1596 # define MAP_NORESERVE 0
1601 Nearly all versions of mmap support MAP_ANONYMOUS,
1602 so the following is unlikely to be needed, but is
1603 supplied just in case.
1606 #ifndef MAP_ANONYMOUS
1608 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1610 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1611 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1612 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1613 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1617 #define MMAP(addr, size, prot, flags) \
1618 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1623 #endif /* HAVE_MMAP */
1627 ----------------------- Chunk representations -----------------------
1632 This struct declaration is misleading (but accurate and necessary).
1633 It declares a "view" into memory allowing access to necessary
1634 fields at known offsets from a given base. See explanation below.
1637 struct malloc_chunk {
1639 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1640 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1642 struct malloc_chunk* fd; /* double links -- used only if free. */
1643 struct malloc_chunk* bk;
1648 malloc_chunk details:
1650 (The following includes lightly edited explanations by Colin Plumb.)
1652 Chunks of memory are maintained using a `boundary tag' method as
1653 described in e.g., Knuth or Standish. (See the paper by Paul
1654 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1655 survey of such techniques.) Sizes of free chunks are stored both
1656 in the front of each chunk and at the end. This makes
1657 consolidating fragmented chunks into bigger chunks very fast. The
1658 size fields also hold bits representing whether chunks are free or
1661 An allocated chunk looks like this:
1664 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1665 | Size of previous chunk, if allocated | |
1666 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1667 | Size of chunk, in bytes |P|
1668 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1669 | User data starts here... .
1671 . (malloc_usable_space() bytes) .
1673 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1675 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1678 Where "chunk" is the front of the chunk for the purpose of most of
1679 the malloc code, but "mem" is the pointer that is returned to the
1680 user. "Nextchunk" is the beginning of the next contiguous chunk.
1682 Chunks always begin on even word boundries, so the mem portion
1683 (which is returned to the user) is also on an even word boundary, and
1684 thus at least double-word aligned.
1686 Free chunks are stored in circular doubly-linked lists, and look like this:
1688 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1689 | Size of previous chunk |
1690 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1691 `head:' | Size of chunk, in bytes |P|
1692 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1693 | Forward pointer to next chunk in list |
1694 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1695 | Back pointer to previous chunk in list |
1696 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1697 | Unused space (may be 0 bytes long) .
1700 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1701 `foot:' | Size of chunk, in bytes |
1702 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1704 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1705 chunk size (which is always a multiple of two words), is an in-use
1706 bit for the *previous* chunk. If that bit is *clear*, then the
1707 word before the current chunk size contains the previous chunk
1708 size, and can be used to find the front of the previous chunk.
1709 The very first chunk allocated always has this bit set,
1710 preventing access to non-existent (or non-owned) memory. If
1711 prev_inuse is set for any given chunk, then you CANNOT determine
1712 the size of the previous chunk, and might even get a memory
1713 addressing fault when trying to do so.
1715 Note that the `foot' of the current chunk is actually represented
1716 as the prev_size of the NEXT chunk. This makes it easier to
1717 deal with alignments etc but can be very confusing when trying
1718 to extend or adapt this code.
1720 The two exceptions to all this are
1722 1. The special chunk `top' doesn't bother using the
1723 trailing size field since there is no next contiguous chunk
1724 that would have to index off it. After initialization, `top'
1725 is forced to always exist. If it would become less than
1726 MINSIZE bytes long, it is replenished.
1728 2. Chunks allocated via mmap, which have the second-lowest-order
1729 bit (IS_MMAPPED) set in their size fields. Because they are
1730 allocated one-by-one, each must contain its own trailing size field.
1735 ---------- Size and alignment checks and conversions ----------
1738 /* conversion from malloc headers to user pointers, and back */
1740 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1741 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1743 /* The smallest possible chunk */
1744 #define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
1746 /* The smallest size we can malloc is an aligned minimal chunk */
1749 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1751 /* Check if m has acceptable alignment */
1753 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1757 Check if a request is so large that it would wrap around zero when
1758 padded and aligned. To simplify some other code, the bound is made
1759 low enough so that adding MINSIZE will also not wrap around zero.
1762 #define REQUEST_OUT_OF_RANGE(req) \
1763 ((unsigned long)(req) >= \
1764 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
1766 /* pad request bytes into a usable size -- internal version */
1768 #define request2size(req) \
1769 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1771 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1773 /* Same, except also perform argument check */
1775 #define checked_request2size(req, sz) \
1776 if (REQUEST_OUT_OF_RANGE(req)) { \
1777 MALLOC_FAILURE_ACTION; \
1780 (sz) = request2size(req);
1783 --------------- Physical chunk operations ---------------
1787 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1788 #define PREV_INUSE 0x1
1790 /* extract inuse bit of previous chunk */
1791 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1794 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1795 #define IS_MMAPPED 0x2
1797 /* check for mmap()'ed chunk */
1798 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1801 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1802 from a non-main arena. This is only set immediately before handing
1803 the chunk to the user, if necessary. */
1804 #define NON_MAIN_ARENA 0x4
1806 /* check for chunk from non-main arena */
1807 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1811 Bits to mask off when extracting size
1813 Note: IS_MMAPPED is intentionally not masked off from size field in
1814 macros for which mmapped chunks should never be seen. This should
1815 cause helpful core dumps to occur if it is tried by accident by
1816 people extending or adapting this malloc.
1818 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
1820 /* Get size, ignoring use bits */
1821 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1824 /* Ptr to next physical malloc_chunk. */
1825 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
1827 /* Ptr to previous physical malloc_chunk */
1828 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1830 /* Treat space at ptr + offset as a chunk */
1831 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1833 /* extract p's inuse bit */
1835 ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
1837 /* set/clear chunk as being inuse without otherwise disturbing */
1838 #define set_inuse(p)\
1839 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
1841 #define clear_inuse(p)\
1842 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
1845 /* check/set/clear inuse bits in known places */
1846 #define inuse_bit_at_offset(p, s)\
1847 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1849 #define set_inuse_bit_at_offset(p, s)\
1850 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1852 #define clear_inuse_bit_at_offset(p, s)\
1853 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1856 /* Set size at head, without disturbing its use bit */
1857 #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
1859 /* Set size/use field */
1860 #define set_head(p, s) ((p)->size = (s))
1862 /* Set size at footer (only when chunk is not in use) */
1863 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1867 -------------------- Internal data structures --------------------
1869 All internal state is held in an instance of malloc_state defined
1870 below. There are no other static variables, except in two optional
1872 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1873 * If HAVE_MMAP is true, but mmap doesn't support
1874 MAP_ANONYMOUS, a dummy file descriptor for mmap.
1876 Beware of lots of tricks that minimize the total bookkeeping space
1877 requirements. The result is a little over 1K bytes (for 4byte
1878 pointers and size_t.)
1884 An array of bin headers for free chunks. Each bin is doubly
1885 linked. The bins are approximately proportionally (log) spaced.
1886 There are a lot of these bins (128). This may look excessive, but
1887 works very well in practice. Most bins hold sizes that are
1888 unusual as malloc request sizes, but are more usual for fragments
1889 and consolidated sets of chunks, which is what these bins hold, so
1890 they can be found quickly. All procedures maintain the invariant
1891 that no consolidated chunk physically borders another one, so each
1892 chunk in a list is known to be preceeded and followed by either
1893 inuse chunks or the ends of memory.
1895 Chunks in bins are kept in size order, with ties going to the
1896 approximately least recently used chunk. Ordering isn't needed
1897 for the small bins, which all contain the same-sized chunks, but
1898 facilitates best-fit allocation for larger chunks. These lists
1899 are just sequential. Keeping them in order almost never requires
1900 enough traversal to warrant using fancier ordered data
1903 Chunks of the same size are linked with the most
1904 recently freed at the front, and allocations are taken from the
1905 back. This results in LRU (FIFO) allocation order, which tends
1906 to give each chunk an equal opportunity to be consolidated with
1907 adjacent freed chunks, resulting in larger free chunks and less
1910 To simplify use in double-linked lists, each bin header acts
1911 as a malloc_chunk. This avoids special-casing for headers.
1912 But to conserve space and improve locality, we allocate
1913 only the fd/bk pointers of bins, and then use repositioning tricks
1914 to treat these as the fields of a malloc_chunk*.
1917 typedef struct malloc_chunk* mbinptr;
1919 /* addressing -- note that bin_at(0) does not exist */
1920 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
1922 /* analog of ++bin */
1923 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
1925 /* Reminders about list directionality within bins */
1926 #define first(b) ((b)->fd)
1927 #define last(b) ((b)->bk)
1929 /* Take a chunk off a bin list */
1930 #define unlink(P, BK, FD) { \
1940 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1941 8 bytes apart. Larger bins are approximately logarithmically spaced:
1947 4 bins of size 32768
1948 2 bins of size 262144
1949 1 bin of size what's left
1951 There is actually a little bit of slop in the numbers in bin_index
1952 for the sake of speed. This makes no difference elsewhere.
1954 The bins top out around 1MB because we expect to service large
1959 #define NSMALLBINS 64
1960 #define SMALLBIN_WIDTH 8
1961 #define MIN_LARGE_SIZE 512
1963 #define in_smallbin_range(sz) \
1964 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
1966 #define smallbin_index(sz) (((unsigned)(sz)) >> 3)
1968 #define largebin_index(sz) \
1969 (((((unsigned long)(sz)) >> 6) <= 32)? 56 + (((unsigned long)(sz)) >> 6): \
1970 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
1971 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
1972 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
1973 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
1976 #define bin_index(sz) \
1977 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
1983 All remainders from chunk splits, as well as all returned chunks,
1984 are first placed in the "unsorted" bin. They are then placed
1985 in regular bins after malloc gives them ONE chance to be used before
1986 binning. So, basically, the unsorted_chunks list acts as a queue,
1987 with chunks being placed on it in free (and malloc_consolidate),
1988 and taken off (to be either used or placed in bins) in malloc.
1990 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1991 does not have to be taken into account in size comparisons.
1994 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1995 #define unsorted_chunks(M) (bin_at(M, 1))
2000 The top-most available chunk (i.e., the one bordering the end of
2001 available memory) is treated specially. It is never included in
2002 any bin, is used only if no other chunk is available, and is
2003 released back to the system if it is very large (see
2004 M_TRIM_THRESHOLD). Because top initially
2005 points to its own bin with initial zero size, thus forcing
2006 extension on the first malloc request, we avoid having any special
2007 code in malloc to check whether it even exists yet. But we still
2008 need to do so when getting memory from system, so we make
2009 initial_top treat the bin as a legal but unusable chunk during the
2010 interval between initialization and the first call to
2011 sYSMALLOc. (This is somewhat delicate, since it relies on
2012 the 2 preceding words to be zero during this interval as well.)
2015 /* Conveniently, the unsorted bin can be used as dummy top on first call */
2016 #define initial_top(M) (unsorted_chunks(M))
2021 To help compensate for the large number of bins, a one-level index
2022 structure is used for bin-by-bin searching. `binmap' is a
2023 bitvector recording whether bins are definitely empty so they can
2024 be skipped over during during traversals. The bits are NOT always
2025 cleared as soon as bins are empty, but instead only
2026 when they are noticed to be empty during traversal in malloc.
2029 /* Conservatively use 32 bits per map word, even if on 64bit system */
2030 #define BINMAPSHIFT 5
2031 #define BITSPERMAP (1U << BINMAPSHIFT)
2032 #define BINMAPSIZE (NBINS / BITSPERMAP)
2034 #define idx2block(i) ((i) >> BINMAPSHIFT)
2035 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2037 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2038 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2039 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2044 An array of lists holding recently freed small chunks. Fastbins
2045 are not doubly linked. It is faster to single-link them, and
2046 since chunks are never removed from the middles of these lists,
2047 double linking is not necessary. Also, unlike regular bins, they
2048 are not even processed in FIFO order (they use faster LIFO) since
2049 ordering doesn't much matter in the transient contexts in which
2050 fastbins are normally used.
2052 Chunks in fastbins keep their inuse bit set, so they cannot
2053 be consolidated with other free chunks. malloc_consolidate
2054 releases all chunks in fastbins and consolidates them with
2058 typedef struct malloc_chunk* mfastbinptr;
2060 /* offset 2 to use otherwise unindexable first 2 bins */
2061 #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2063 /* The maximum fastbin request size we support */
2064 #define MAX_FAST_SIZE 80
2066 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2069 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2070 that triggers automatic consolidation of possibly-surrounding
2071 fastbin chunks. This is a heuristic, so the exact value should not
2072 matter too much. It is defined at half the default trim threshold as a
2073 compromise heuristic to only attempt consolidation if it is likely
2074 to lead to trimming. However, it is not dynamically tunable, since
2075 consolidation reduces fragmentation surrounding large chunks even
2076 if trimming is not used.
2079 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
2082 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2083 they are used as flags.
2087 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2088 some fastbin chunks. It is set true on entering a chunk into any
2089 fastbin, and cleared only in malloc_consolidate.
2091 The truth value is inverted so that have_fastchunks will be true
2092 upon startup (since statics are zero-filled), simplifying
2093 initialization checks.
2096 #define FASTCHUNKS_BIT (1U)
2098 #define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT) == 0)
2099 #define clear_fastchunks(M) ((M)->max_fast |= FASTCHUNKS_BIT)
2100 #define set_fastchunks(M) ((M)->max_fast &= ~FASTCHUNKS_BIT)
2103 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
2104 regions. Otherwise, contiguity is exploited in merging together,
2105 when possible, results from consecutive MORECORE calls.
2107 The initial value comes from MORECORE_CONTIGUOUS, but is
2108 changed dynamically if mmap is ever used as an sbrk substitute.
2111 #define NONCONTIGUOUS_BIT (2U)
2113 #define contiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) == 0)
2114 #define noncontiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) != 0)
2115 #define set_noncontiguous(M) ((M)->max_fast |= NONCONTIGUOUS_BIT)
2116 #define set_contiguous(M) ((M)->max_fast &= ~NONCONTIGUOUS_BIT)
2119 Set value of max_fast.
2120 Use impossibly small value if 0.
2121 Precondition: there are no existing fastbin chunks.
2122 Setting the value clears fastchunk bit but preserves noncontiguous bit.
2125 #define set_max_fast(M, s) \
2126 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2128 ((M)->max_fast & NONCONTIGUOUS_BIT)
2132 ----------- Internal state representation and initialization -----------
2135 struct malloc_state {
2136 /* Serialize access. */
2139 /* Statistics for locking. Only used if THREAD_STATS is defined. */
2140 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
2141 long pad0_[1]; /* try to give the mutex its own cacheline */
2143 /* The maximum chunk size to be eligible for fastbin */
2144 INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
2147 mfastbinptr fastbins[NFASTBINS];
2149 /* Base of the topmost chunk -- not otherwise kept in a bin */
2152 /* The remainder from the most recent split of a small request */
2153 mchunkptr last_remainder;
2155 /* Normal bins packed as described above */
2156 mchunkptr bins[NBINS * 2];
2158 /* Bitmap of bins */
2159 unsigned int binmap[BINMAPSIZE];
2162 struct malloc_state *next;
2164 /* Memory allocated from the system in this arena. */
2165 INTERNAL_SIZE_T system_mem;
2166 INTERNAL_SIZE_T max_system_mem;
2170 /* Tunable parameters */
2171 unsigned long trim_threshold;
2172 INTERNAL_SIZE_T top_pad;
2173 INTERNAL_SIZE_T mmap_threshold;
2175 /* Memory map support */
2180 /* Cache malloc_getpagesize */
2181 unsigned int pagesize;
2184 INTERNAL_SIZE_T mmapped_mem;
2185 /*INTERNAL_SIZE_T sbrked_mem;*/
2186 /*INTERNAL_SIZE_T max_sbrked_mem;*/
2187 INTERNAL_SIZE_T max_mmapped_mem;
2188 INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
2190 /* First address handed out by MORECORE/sbrk. */
2194 /* There are several instances of this struct ("arenas") in this
2195 malloc. If you are adapting this malloc in a way that does NOT use
2196 a static or mmapped malloc_state, you MUST explicitly zero-fill it
2197 before using. This malloc relies on the property that malloc_state
2198 is initialized to all zeroes (as is true of C statics). */
2200 static struct malloc_state main_arena;
2202 /* There is only one instance of the malloc parameters. */
2204 static struct malloc_par mp_;
2207 Initialize a malloc_state struct.
2209 This is called only from within malloc_consolidate, which needs
2210 be called in the same contexts anyway. It is never called directly
2211 outside of malloc_consolidate because some optimizing compilers try
2212 to inline it at all call points, which turns out not to be an
2213 optimization at all. (Inlining it in malloc_consolidate is fine though.)
2217 static void malloc_init_state(mstate av)
2219 static void malloc_init_state(av) mstate av;
2225 /* Establish circular links for normal bins */
2226 for (i = 1; i < NBINS; ++i) {
2228 bin->fd = bin->bk = bin;
2231 #if MORECORE_CONTIGUOUS
2232 if (av != &main_arena)
2234 set_noncontiguous(av);
2236 set_max_fast(av, DEFAULT_MXFAST);
2238 av->top = initial_top(av);
2242 Other internal utilities operating on mstates
2246 static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
2247 static int sYSTRIm(size_t, mstate);
2248 static void malloc_consolidate(mstate);
2249 static Void_t** iALLOc(mstate, size_t, size_t*, int, Void_t**);
2251 static Void_t* sYSMALLOc();
2252 static int sYSTRIm();
2253 static void malloc_consolidate();
2254 static Void_t** iALLOc();
2257 /* ------------------- Support for multiple arenas -------------------- */
2263 These routines make a number of assertions about the states
2264 of data structures that should be true at all times. If any
2265 are not true, it's very likely that a user program has somehow
2266 trashed memory. (It's also possible that there is a coding error
2267 in malloc. In which case, please report it!)
2272 #define check_chunk(A,P)
2273 #define check_free_chunk(A,P)
2274 #define check_inuse_chunk(A,P)
2275 #define check_remalloced_chunk(A,P,N)
2276 #define check_malloced_chunk(A,P,N)
2277 #define check_malloc_state(A)
2281 #define check_chunk(A,P) do_check_chunk(A,P)
2282 #define check_free_chunk(A,P) do_check_free_chunk(A,P)
2283 #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
2284 #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
2285 #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
2286 #define check_malloc_state(A) do_check_malloc_state(A)
2289 Properties of all chunks
2293 static void do_check_chunk(mstate av, mchunkptr p)
2295 static void do_check_chunk(av, p) mstate av; mchunkptr p;
2298 unsigned long sz = chunksize(p);
2299 /* min and max possible addresses assuming contiguous allocation */
2300 char* max_address = (char*)(av->top) + chunksize(av->top);
2301 char* min_address = max_address - av->system_mem;
2303 if (!chunk_is_mmapped(p)) {
2305 /* Has legal address ... */
2307 if (contiguous(av)) {
2308 assert(((char*)p) >= min_address);
2309 assert(((char*)p + sz) <= ((char*)(av->top)));
2313 /* top size is always at least MINSIZE */
2314 assert((unsigned long)(sz) >= MINSIZE);
2315 /* top predecessor always marked inuse */
2316 assert(prev_inuse(p));
2322 /* address is outside main heap */
2323 if (contiguous(av) && av->top != initial_top(av)) {
2324 assert(((char*)p) < min_address || ((char*)p) > max_address);
2326 /* chunk is page-aligned */
2327 assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0);
2328 /* mem is aligned */
2329 assert(aligned_OK(chunk2mem(p)));
2331 /* force an appropriate assert violation if debug set */
2332 assert(!chunk_is_mmapped(p));
2338 Properties of free chunks
2342 static void do_check_free_chunk(mstate av, mchunkptr p)
2344 static void do_check_free_chunk(av, p) mstate av; mchunkptr p;
2347 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2348 mchunkptr next = chunk_at_offset(p, sz);
2350 do_check_chunk(av, p);
2352 /* Chunk must claim to be free ... */
2354 assert (!chunk_is_mmapped(p));
2356 /* Unless a special marker, must have OK fields */
2357 if ((unsigned long)(sz) >= MINSIZE)
2359 assert((sz & MALLOC_ALIGN_MASK) == 0);
2360 assert(aligned_OK(chunk2mem(p)));
2361 /* ... matching footer field */
2362 assert(next->prev_size == sz);
2363 /* ... and is fully consolidated */
2364 assert(prev_inuse(p));
2365 assert (next == av->top || inuse(next));
2367 /* ... and has minimally sane links */
2368 assert(p->fd->bk == p);
2369 assert(p->bk->fd == p);
2371 else /* markers are always of size SIZE_SZ */
2372 assert(sz == SIZE_SZ);
2376 Properties of inuse chunks
2380 static void do_check_inuse_chunk(mstate av, mchunkptr p)
2382 static void do_check_inuse_chunk(av, p) mstate av; mchunkptr p;
2387 do_check_chunk(av, p);
2389 if (chunk_is_mmapped(p))
2390 return; /* mmapped chunks have no next/prev */
2392 /* Check whether it claims to be in use ... */
2395 next = next_chunk(p);
2397 /* ... and is surrounded by OK chunks.
2398 Since more things can be checked with free chunks than inuse ones,
2399 if an inuse chunk borders them and debug is on, it's worth doing them.
2401 if (!prev_inuse(p)) {
2402 /* Note that we cannot even look at prev unless it is not inuse */
2403 mchunkptr prv = prev_chunk(p);
2404 assert(next_chunk(prv) == p);
2405 do_check_free_chunk(av, prv);
2408 if (next == av->top) {
2409 assert(prev_inuse(next));
2410 assert(chunksize(next) >= MINSIZE);
2412 else if (!inuse(next))
2413 do_check_free_chunk(av, next);
2417 Properties of chunks recycled from fastbins
2421 static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2423 static void do_check_remalloced_chunk(av, p, s)
2424 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
2427 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2429 if (!chunk_is_mmapped(p)) {
2430 assert(av == arena_for_chunk(p));
2431 if (chunk_non_main_arena(p))
2432 assert(av != &main_arena);
2434 assert(av == &main_arena);
2437 do_check_inuse_chunk(av, p);
2439 /* Legal size ... */
2440 assert((sz & MALLOC_ALIGN_MASK) == 0);
2441 assert((unsigned long)(sz) >= MINSIZE);
2442 /* ... and alignment */
2443 assert(aligned_OK(chunk2mem(p)));
2444 /* chunk is less than MINSIZE more than request */
2445 assert((long)(sz) - (long)(s) >= 0);
2446 assert((long)(sz) - (long)(s + MINSIZE) < 0);
2450 Properties of nonrecycled chunks at the point they are malloced
2454 static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2456 static void do_check_malloced_chunk(av, p, s)
2457 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
2460 /* same as recycled case ... */
2461 do_check_remalloced_chunk(av, p, s);
2464 ... plus, must obey implementation invariant that prev_inuse is
2465 always true of any allocated chunk; i.e., that each allocated
2466 chunk borders either a previously allocated and still in-use
2467 chunk, or the base of its memory arena. This is ensured
2468 by making all allocations from the the `lowest' part of any found
2469 chunk. This does not necessarily hold however for chunks
2470 recycled via fastbins.
2473 assert(prev_inuse(p));
2478 Properties of malloc_state.
2480 This may be useful for debugging malloc, as well as detecting user
2481 programmer errors that somehow write into malloc_state.
2483 If you are extending or experimenting with this malloc, you can
2484 probably figure out how to hack this routine to print out or
2485 display chunk addresses, sizes, bins, and other instrumentation.
2488 static void do_check_malloc_state(mstate av)
2494 unsigned int binbit;
2497 INTERNAL_SIZE_T size;
2498 unsigned long total = 0;
2501 /* internal size_t must be no wider than pointer type */
2502 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
2504 /* alignment is a power of 2 */
2505 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
2507 /* cannot run remaining checks until fully initialized */
2508 if (av->top == 0 || av->top == initial_top(av))
2511 /* pagesize is a power of 2 */
2512 assert((mp_.pagesize & (mp_.pagesize-1)) == 0);
2514 /* A contiguous main_arena is consistent with sbrk_base. */
2515 if (av == &main_arena && contiguous(av))
2516 assert((char*)mp_.sbrk_base + av->system_mem ==
2517 (char*)av->top + chunksize(av->top));
2519 /* properties of fastbins */
2521 /* max_fast is in allowed range */
2522 assert((av->max_fast & ~1) <= request2size(MAX_FAST_SIZE));
2524 max_fast_bin = fastbin_index(av->max_fast);
2526 for (i = 0; i < NFASTBINS; ++i) {
2527 p = av->fastbins[i];
2529 /* all bins past max_fast are empty */
2530 if (i > max_fast_bin)
2534 /* each chunk claims to be inuse */
2535 do_check_inuse_chunk(av, p);
2536 total += chunksize(p);
2537 /* chunk belongs in this bin */
2538 assert(fastbin_index(chunksize(p)) == i);
2544 assert(have_fastchunks(av));
2545 else if (!have_fastchunks(av))
2548 /* check normal bins */
2549 for (i = 1; i < NBINS; ++i) {
2552 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2554 binbit = get_binmap(av,i);
2555 empty = last(b) == b;
2562 for (p = last(b); p != b; p = p->bk) {
2563 /* each chunk claims to be free */
2564 do_check_free_chunk(av, p);
2565 size = chunksize(p);
2568 /* chunk belongs in bin */
2569 idx = bin_index(size);
2571 /* lists are sorted */
2572 assert(p->bk == b ||
2573 (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
2575 /* chunk is followed by a legal chain of inuse chunks */
2576 for (q = next_chunk(p);
2577 (q != av->top && inuse(q) &&
2578 (unsigned long)(chunksize(q)) >= MINSIZE);
2580 do_check_inuse_chunk(av, q);
2584 /* top chunk is OK */
2585 check_chunk(av, av->top);
2587 /* sanity checks for statistics */
2590 assert(total <= (unsigned long)(mp_.max_total_mem));
2591 assert(mp_.n_mmaps >= 0);
2593 assert(mp_.n_mmaps <= mp_.n_mmaps_max);
2594 assert(mp_.n_mmaps <= mp_.max_n_mmaps);
2596 assert((unsigned long)(av->system_mem) <=
2597 (unsigned long)(av->max_system_mem));
2599 assert((unsigned long)(mp_.mmapped_mem) <=
2600 (unsigned long)(mp_.max_mmapped_mem));
2603 assert((unsigned long)(mp_.max_total_mem) >=
2604 (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem));
2610 /* ----------------- Support for debugging hooks -------------------- */
2614 /* ----------- Routines dealing with system allocation -------------- */
2617 sysmalloc handles malloc cases requiring more memory from the system.
2618 On entry, it is assumed that av->top does not have enough
2619 space to service request for nb bytes, thus requiring that av->top
2620 be extended or replaced.
2624 static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
2626 static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
2629 mchunkptr old_top; /* incoming value of av->top */
2630 INTERNAL_SIZE_T old_size; /* its size */
2631 char* old_end; /* its end address */
2633 long size; /* arg to first MORECORE or mmap call */
2634 char* brk; /* return value from MORECORE */
2636 long correction; /* arg to 2nd MORECORE call */
2637 char* snd_brk; /* 2nd return val */
2639 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2640 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2641 char* aligned_brk; /* aligned offset into brk */
2643 mchunkptr p; /* the allocated/returned chunk */
2644 mchunkptr remainder; /* remainder from allocation */
2645 unsigned long remainder_size; /* its size */
2647 unsigned long sum; /* for updating stats */
2649 size_t pagemask = mp_.pagesize - 1;
2655 If have mmap, and the request size meets the mmap threshold, and
2656 the system supports mmap, and there are few enough currently
2657 allocated mmapped regions, try to directly map this request
2658 rather than expanding top.
2661 if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
2662 (mp_.n_mmaps < mp_.n_mmaps_max)) {
2664 char* mm; /* return value from mmap call*/
2667 Round up size to nearest page. For mmapped chunks, the overhead
2668 is one SIZE_SZ unit larger than for normal chunks, because there
2669 is no following chunk whose prev_size field could be used.
2671 size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
2673 /* Don't try if size wraps around 0 */
2674 if ((unsigned long)(size) > (unsigned long)(nb)) {
2676 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
2678 if (mm != MAP_FAILED) {
2681 The offset to the start of the mmapped region is stored
2682 in the prev_size field of the chunk. This allows us to adjust
2683 returned start address to meet alignment requirements here
2684 and in memalign(), and still be able to compute proper
2685 address argument for later munmap in free() and realloc().
2688 front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
2689 if (front_misalign > 0) {
2690 correction = MALLOC_ALIGNMENT - front_misalign;
2691 p = (mchunkptr)(mm + correction);
2692 p->prev_size = correction;
2693 set_head(p, (size - correction) |IS_MMAPPED);
2697 set_head(p, size|IS_MMAPPED);
2700 /* update statistics */
2702 if (++mp_.n_mmaps > mp_.max_n_mmaps)
2703 mp_.max_n_mmaps = mp_.n_mmaps;
2705 sum = mp_.mmapped_mem += size;
2706 if (sum > (unsigned long)(mp_.max_mmapped_mem))
2707 mp_.max_mmapped_mem = sum;
2709 sum += av->system_mem;
2710 if (sum > (unsigned long)(mp_.max_total_mem))
2711 mp_.max_total_mem = sum;
2716 return chunk2mem(p);
2722 /* Record incoming configuration of top */
2725 old_size = chunksize(old_top);
2726 old_end = (char*)(chunk_at_offset(old_top, old_size));
2728 brk = snd_brk = (char*)(MORECORE_FAILURE);
2731 If not the first time through, we require old_size to be
2732 at least MINSIZE and to have prev_inuse set.
2735 assert((old_top == initial_top(av) && old_size == 0) ||
2736 ((unsigned long) (old_size) >= MINSIZE &&
2737 prev_inuse(old_top) &&
2738 ((unsigned long)old_end & pagemask) == 0));
2740 /* Precondition: not enough current space to satisfy nb request */
2741 assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
2743 /* Precondition: all fastbins are consolidated */
2744 assert(!have_fastchunks(av));
2747 if (av != &main_arena) {
2749 heap_info *old_heap, *heap;
2750 size_t old_heap_size;
2752 /* First try to extend the current heap. */
2753 old_heap = heap_for_ptr(old_top);
2754 old_heap_size = old_heap->size;
2755 if (grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
2756 av->system_mem += old_heap->size - old_heap_size;
2757 arena_mem += old_heap->size - old_heap_size;
2759 if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
2760 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
2762 set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
2765 /* A new heap must be created. */
2766 heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad);
2769 heap->prev = old_heap;
2770 av->system_mem += heap->size;
2771 arena_mem += heap->size;
2773 if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
2774 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
2778 /* Set up the new top. */
2779 top(av) = chunk_at_offset(heap, sizeof(*heap));
2780 set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
2782 /* Setup fencepost and free the old top chunk. */
2783 /* The fencepost takes at least MINSIZE bytes, because it might
2784 become the top chunk again later. Note that a footer is set
2785 up, too, although the chunk is marked in use. */
2786 old_size -= MINSIZE;
2787 set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
2788 if (old_size >= MINSIZE) {
2789 set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
2790 set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
2791 set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
2792 _int_free(av, chunk2mem(old_top));
2794 set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
2795 set_foot(old_top, (old_size + 2*SIZE_SZ));
2799 } else { /* av == main_arena */
2802 /* Request enough space for nb + pad + overhead */
2804 size = nb + mp_.top_pad + MINSIZE;
2807 If contiguous, we can subtract out existing space that we hope to
2808 combine with new space. We add it back later only if
2809 we don't actually get contiguous space.
2816 Round to a multiple of page size.
2817 If MORECORE is not contiguous, this ensures that we only call it
2818 with whole-page arguments. And if MORECORE is contiguous and
2819 this is not first time through, this preserves page-alignment of
2820 previous calls. Otherwise, we correct to page-align below.
2823 size = (size + pagemask) & ~pagemask;
2826 Don't try to call MORECORE if argument is so big as to appear
2827 negative. Note that since mmap takes size_t arg, it may succeed
2828 below even if we cannot call MORECORE.
2832 brk = (char*)(MORECORE(size));
2834 if (brk != (char*)(MORECORE_FAILURE)) {
2835 /* Call the `morecore' hook if necessary. */
2836 if (__after_morecore_hook)
2837 (*__after_morecore_hook) ();
2840 If have mmap, try using it as a backup when MORECORE fails or
2841 cannot be used. This is worth doing on systems that have "holes" in
2842 address space, so sbrk cannot extend to give contiguous space, but
2843 space is available elsewhere. Note that we ignore mmap max count
2844 and threshold limits, since the space will not be used as a
2845 segregated mmap region.
2849 /* Cannot merge with old top, so add its size back in */
2851 size = (size + old_size + pagemask) & ~pagemask;
2853 /* If we are relying on mmap as backup, then use larger units */
2854 if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
2855 size = MMAP_AS_MORECORE_SIZE;
2857 /* Don't try if size wraps around 0 */
2858 if ((unsigned long)(size) > (unsigned long)(nb)) {
2860 char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
2862 if (mbrk != MAP_FAILED) {
2864 /* We do not need, and cannot use, another sbrk call to find end */
2866 snd_brk = brk + size;
2869 Record that we no longer have a contiguous sbrk region.
2870 After the first time mmap is used as backup, we do not
2871 ever rely on contiguous space since this could incorrectly
2874 set_noncontiguous(av);
2880 if (brk != (char*)(MORECORE_FAILURE)) {
2881 if (mp_.sbrk_base == 0)
2882 mp_.sbrk_base = brk;
2883 av->system_mem += size;
2886 If MORECORE extends previous space, we can likewise extend top size.
2889 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
2890 set_head(old_top, (size + old_size) | PREV_INUSE);
2892 else if (old_size && brk < old_end) {
2893 /* Oops! Someone else killed our space.. Can't touch anything. */
2898 Otherwise, make adjustments:
2900 * If the first time through or noncontiguous, we need to call sbrk
2901 just to find out where the end of memory lies.
2903 * We need to ensure that all returned chunks from malloc will meet
2906 * If there was an intervening foreign sbrk, we need to adjust sbrk
2907 request size to account for fact that we will not be able to
2908 combine new space with existing space in old_top.
2910 * Almost all systems internally allocate whole pages at a time, in
2911 which case we might as well use the whole last page of request.
2912 So we allocate enough more memory to hit a page boundary now,
2913 which in turn causes future contiguous calls to page-align.
2917 /* Count foreign sbrk as system_mem. */
2919 av->system_mem += brk - old_end;
2925 /* handle contiguous cases */
2926 if (contiguous(av)) {
2928 /* Guarantee alignment of first new chunk made from this space */
2930 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
2931 if (front_misalign > 0) {
2934 Skip over some bytes to arrive at an aligned position.
2935 We don't need to specially mark these wasted front bytes.
2936 They will never be accessed anyway because
2937 prev_inuse of av->top (and any chunk created from its start)
2938 is always true after initialization.
2941 correction = MALLOC_ALIGNMENT - front_misalign;
2942 aligned_brk += correction;
2946 If this isn't adjacent to existing space, then we will not
2947 be able to merge with old_top space, so must add to 2nd request.
2950 correction += old_size;
2952 /* Extend the end address to hit a page boundary */
2953 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
2954 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
2956 assert(correction >= 0);
2957 snd_brk = (char*)(MORECORE(correction));
2960 If can't allocate correction, try to at least find out current
2961 brk. It might be enough to proceed without failing.
2963 Note that if second sbrk did NOT fail, we assume that space
2964 is contiguous with first sbrk. This is a safe assumption unless
2965 program is multithreaded but doesn't use locks and a foreign sbrk
2966 occurred between our first and second calls.
2969 if (snd_brk == (char*)(MORECORE_FAILURE)) {
2971 snd_brk = (char*)(MORECORE(0));
2973 /* Call the `morecore' hook if necessary. */
2974 if (__after_morecore_hook)
2975 (*__after_morecore_hook) ();
2978 /* handle non-contiguous cases */
2980 /* MORECORE/mmap must correctly align */
2981 assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
2983 /* Find out current end of memory */
2984 if (snd_brk == (char*)(MORECORE_FAILURE)) {
2985 snd_brk = (char*)(MORECORE(0));
2989 /* Adjust top based on results of second sbrk */
2990 if (snd_brk != (char*)(MORECORE_FAILURE)) {
2991 av->top = (mchunkptr)aligned_brk;
2992 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2993 av->system_mem += correction;
2996 If not the first time through, we either have a
2997 gap due to foreign sbrk or a non-contiguous region. Insert a
2998 double fencepost at old_top to prevent consolidation with space
2999 we don't own. These fenceposts are artificial chunks that are
3000 marked as inuse and are in any case too small to use. We need
3001 two to make sizes and alignments work out.
3004 if (old_size != 0) {
3006 Shrink old_top to insert fenceposts, keeping size a
3007 multiple of MALLOC_ALIGNMENT. We know there is at least
3008 enough space in old_top to do this.
3010 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3011 set_head(old_top, old_size | PREV_INUSE);
3014 Note that the following assignments completely overwrite
3015 old_top when old_size was previously MINSIZE. This is
3016 intentional. We need the fencepost, even if old_top otherwise gets
3019 chunk_at_offset(old_top, old_size )->size =
3020 (2*SIZE_SZ)|PREV_INUSE;
3022 chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
3023 (2*SIZE_SZ)|PREV_INUSE;
3025 /* If possible, release the rest. */
3026 if (old_size >= MINSIZE) {
3027 _int_free(av, chunk2mem(old_top));
3034 /* Update statistics */
3036 sum = av->system_mem + mp_.mmapped_mem;
3037 if (sum > (unsigned long)(mp_.max_total_mem))
3038 mp_.max_total_mem = sum;
3043 } /* if (av != &main_arena) */
3045 if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
3046 av->max_system_mem = av->system_mem;
3047 check_malloc_state(av);
3049 /* finally, do the allocation */
3051 size = chunksize(p);
3053 /* check that one of the above allocation paths succeeded */
3054 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3055 remainder_size = size - nb;
3056 remainder = chunk_at_offset(p, nb);
3057 av->top = remainder;
3058 set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
3059 set_head(remainder, remainder_size | PREV_INUSE);
3060 check_malloced_chunk(av, p, nb);
3061 return chunk2mem(p);
3064 /* catch all failure paths */
3065 MALLOC_FAILURE_ACTION;
3071 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3072 to the system (via negative arguments to sbrk) if there is unused
3073 memory at the `high' end of the malloc pool. It is called
3074 automatically by free() when top space exceeds the trim
3075 threshold. It is also called by the public malloc_trim routine. It
3076 returns 1 if it actually released any memory, else 0.
3080 static int sYSTRIm(size_t pad, mstate av)
3082 static int sYSTRIm(pad, av) size_t pad; mstate av;
3085 long top_size; /* Amount of top-most memory */
3086 long extra; /* Amount to release */
3087 long released; /* Amount actually released */
3088 char* current_brk; /* address returned by pre-check sbrk call */
3089 char* new_brk; /* address returned by post-check sbrk call */
3092 pagesz = mp_.pagesize;
3093 top_size = chunksize(av->top);
3095 /* Release in pagesize units, keeping at least one page */
3096 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3101 Only proceed if end of memory is where we last set it.
3102 This avoids problems if there were foreign sbrk calls.
3104 current_brk = (char*)(MORECORE(0));
3105 if (current_brk == (char*)(av->top) + top_size) {
3108 Attempt to release memory. We ignore MORECORE return value,
3109 and instead call again to find out where new end of memory is.
3110 This avoids problems if first call releases less than we asked,
3111 of if failure somehow altered brk value. (We could still
3112 encounter problems if it altered brk in some very bad way,
3113 but the only thing we can do is adjust anyway, which will cause
3114 some downstream failure.)
3118 /* Call the `morecore' hook if necessary. */
3119 if (__after_morecore_hook)
3120 (*__after_morecore_hook) ();
3121 new_brk = (char*)(MORECORE(0));
3123 if (new_brk != (char*)MORECORE_FAILURE) {
3124 released = (long)(current_brk - new_brk);
3126 if (released != 0) {
3127 /* Success. Adjust top. */
3128 av->system_mem -= released;
3129 set_head(av->top, (top_size - released) | PREV_INUSE);
3130 check_malloc_state(av);
3144 munmap_chunk(mchunkptr p)
3146 munmap_chunk(p) mchunkptr p;
3149 INTERNAL_SIZE_T size = chunksize(p);
3152 assert (chunk_is_mmapped(p));
3154 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3155 assert((mp_.n_mmaps > 0));
3157 assert(((p->prev_size + size) & (mp_.pagesize-1)) == 0);
3160 mp_.mmapped_mem -= (size + p->prev_size);
3162 ret = munmap((char *)p - p->prev_size, size + p->prev_size);
3164 /* munmap returns non-zero on failure */
3173 mremap_chunk(mchunkptr p, size_t new_size)
3175 mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
3178 size_t page_mask = mp_.pagesize - 1;
3179 INTERNAL_SIZE_T offset = p->prev_size;
3180 INTERNAL_SIZE_T size = chunksize(p);
3183 assert (chunk_is_mmapped(p));
3185 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3186 assert((mp_.n_mmaps > 0));
3188 assert(((size + offset) & (mp_.pagesize-1)) == 0);
3190 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3191 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
3193 cp = (char *)mremap((char *)p - offset, size + offset, new_size,
3196 if (cp == MAP_FAILED) return 0;
3198 p = (mchunkptr)(cp + offset);
3200 assert(aligned_OK(chunk2mem(p)));
3202 assert((p->prev_size == offset));
3203 set_head(p, (new_size - offset)|IS_MMAPPED);
3205 mp_.mmapped_mem -= size + offset;
3206 mp_.mmapped_mem += new_size;
3207 if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
3208 mp_.max_mmapped_mem = mp_.mmapped_mem;
3210 if ((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
3212 mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
3217 #endif /* HAVE_MREMAP */
3219 #endif /* HAVE_MMAP */
3221 /*------------------------ Public wrappers. --------------------------------*/
3224 public_mALLOc(size_t bytes)
3229 __malloc_ptr_t (*hook) __MALLOC_P ((size_t, __const __malloc_ptr_t)) =
3232 return (*hook)(bytes, RETURN_ADDRESS (0));
3234 arena_get(ar_ptr, bytes);
3237 victim = _int_malloc(ar_ptr, bytes);
3239 /* Maybe the failure is due to running out of mmapped areas. */
3240 if(ar_ptr != &main_arena) {
3241 (void)mutex_unlock(&ar_ptr->mutex);
3242 (void)mutex_lock(&main_arena.mutex);
3243 victim = _int_malloc(&main_arena, bytes);
3244 (void)mutex_unlock(&main_arena.mutex);
3247 /* ... or sbrk() has failed and there is still a chance to mmap() */
3248 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3249 (void)mutex_unlock(&main_arena.mutex);
3251 victim = _int_malloc(ar_ptr, bytes);
3252 (void)mutex_unlock(&ar_ptr->mutex);
3257 (void)mutex_unlock(&ar_ptr->mutex);
3258 assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
3259 ar_ptr == arena_for_chunk(mem2chunk(victim)));
3264 public_fREe(Void_t* mem)
3267 mchunkptr p; /* chunk corresponding to mem */
3269 void (*hook) __MALLOC_P ((__malloc_ptr_t, __const __malloc_ptr_t)) =
3272 (*hook)(mem, RETURN_ADDRESS (0));
3276 if (mem == 0) /* free(0) has no effect */
3282 if (chunk_is_mmapped(p)) /* release mmapped memory. */
3289 ar_ptr = arena_for_chunk(p);
3291 if(!mutex_trylock(&ar_ptr->mutex))
3292 ++(ar_ptr->stat_lock_direct);
3294 (void)mutex_lock(&ar_ptr->mutex);
3295 ++(ar_ptr->stat_lock_wait);
3298 (void)mutex_lock(&ar_ptr->mutex);
3300 _int_free(ar_ptr, mem);
3301 (void)mutex_unlock(&ar_ptr->mutex);
3305 public_rEALLOc(Void_t* oldmem, size_t bytes)
3308 INTERNAL_SIZE_T nb; /* padded request size */
3310 mchunkptr oldp; /* chunk corresponding to oldmem */
3311 INTERNAL_SIZE_T oldsize; /* its size */
3313 Void_t* newp; /* chunk to return */
3315 __malloc_ptr_t (*hook) __MALLOC_P ((__malloc_ptr_t, size_t,
3316 __const __malloc_ptr_t)) =
3319 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
3321 #if REALLOC_ZERO_BYTES_FREES
3322 if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
3325 /* realloc of null is supposed to be same as malloc */
3326 if (oldmem == 0) return public_mALLOc(bytes);
3328 oldp = mem2chunk(oldmem);
3329 oldsize = chunksize(oldp);
3331 checked_request2size(bytes, nb);
3334 if (chunk_is_mmapped(oldp))
3339 newp = mremap_chunk(oldp, nb);
3340 if(newp) return chunk2mem(newp);
3342 /* Note the extra SIZE_SZ overhead. */
3343 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
3344 /* Must alloc, copy, free. */
3345 newmem = public_mALLOc(bytes);
3346 if (newmem == 0) return 0; /* propagate failure */
3347 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
3353 ar_ptr = arena_for_chunk(oldp);
3355 if(!mutex_trylock(&ar_ptr->mutex))
3356 ++(ar_ptr->stat_lock_direct);
3358 (void)mutex_lock(&ar_ptr->mutex);
3359 ++(ar_ptr->stat_lock_wait);
3362 (void)mutex_lock(&ar_ptr->mutex);
3366 /* As in malloc(), remember this arena for the next allocation. */
3367 tsd_setspecific(arena_key, (Void_t *)ar_ptr);
3370 newp = _int_realloc(ar_ptr, oldmem, bytes);
3372 (void)mutex_unlock(&ar_ptr->mutex);
3373 assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
3374 ar_ptr == arena_for_chunk(mem2chunk(newp)));
3379 public_mEMALIGn(size_t alignment, size_t bytes)
3384 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3385 __const __malloc_ptr_t)) =
3388 return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
3390 /* If need less alignment than we give anyway, just relay to malloc */
3391 if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
3393 /* Otherwise, ensure that it is at least a minimum chunk size */
3394 if (alignment < MINSIZE) alignment = MINSIZE;
3396 arena_get(ar_ptr, bytes + alignment + MINSIZE);
3399 p = _int_memalign(ar_ptr, alignment, bytes);
3400 (void)mutex_unlock(&ar_ptr->mutex);
3402 /* Maybe the failure is due to running out of mmapped areas. */
3403 if(ar_ptr != &main_arena) {
3404 (void)mutex_lock(&main_arena.mutex);
3405 p = _int_memalign(&main_arena, alignment, bytes);
3406 (void)mutex_unlock(&main_arena.mutex);
3409 /* ... or sbrk() has failed and there is still a chance to mmap() */
3410 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3412 p = _int_memalign(ar_ptr, alignment, bytes);
3413 (void)mutex_unlock(&ar_ptr->mutex);
3418 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3419 ar_ptr == arena_for_chunk(mem2chunk(p)));
3424 public_vALLOc(size_t bytes)
3429 if(__malloc_initialized < 0)
3431 arena_get(ar_ptr, bytes + mp_.pagesize + MINSIZE);
3434 p = _int_valloc(ar_ptr, bytes);
3435 (void)mutex_unlock(&ar_ptr->mutex);
3440 public_pVALLOc(size_t bytes)
3445 if(__malloc_initialized < 0)
3447 arena_get(ar_ptr, bytes + 2*mp_.pagesize + MINSIZE);
3448 p = _int_pvalloc(ar_ptr, bytes);
3449 (void)mutex_unlock(&ar_ptr->mutex);
3454 public_cALLOc(size_t n, size_t elem_size)
3457 mchunkptr oldtop, p;
3458 INTERNAL_SIZE_T sz, csz, oldtopsize;
3460 unsigned long clearsize;
3461 unsigned long nclears;
3464 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
3468 mem = (*hook)(sz, RETURN_ADDRESS (0));
3472 return memset(mem, 0, sz);
3474 while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
3479 /* FIXME: check for overflow on multiplication. */
3486 /* Check if we hand out the top chunk, in which case there may be no
3490 oldtopsize = chunksize(top(av));
3491 #if MORECORE_CLEARS < 2
3492 /* Only newly allocated memory is guaranteed to be cleared. */
3493 if (av == &main_arena &&
3494 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
3495 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
3498 mem = _int_malloc(av, sz);
3500 /* Only clearing follows, so we can unlock early. */
3501 (void)mutex_unlock(&av->mutex);
3503 assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
3504 av == arena_for_chunk(mem2chunk(mem)));
3507 /* Maybe the failure is due to running out of mmapped areas. */
3508 if(av != &main_arena) {
3509 (void)mutex_lock(&main_arena.mutex);
3510 mem = _int_malloc(&main_arena, sz);
3511 (void)mutex_unlock(&main_arena.mutex);
3514 /* ... or sbrk() has failed and there is still a chance to mmap() */
3515 (void)mutex_lock(&main_arena.mutex);
3516 av = arena_get2(av->next ? av : 0, sz);
3517 (void)mutex_unlock(&main_arena.mutex);
3519 mem = _int_malloc(av, sz);
3520 (void)mutex_unlock(&av->mutex);
3524 if (mem == 0) return 0;
3528 /* Two optional cases in which clearing not necessary */
3530 if (chunk_is_mmapped(p))
3537 if (p == oldtop && csz > oldtopsize) {
3538 /* clear only the bytes from non-freshly-sbrked memory */
3543 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3544 contents have an odd number of INTERNAL_SIZE_T-sized words;
3546 d = (INTERNAL_SIZE_T*)mem;
3547 clearsize = csz - SIZE_SZ;
3548 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
3549 assert(nclears >= 3);
3552 MALLOC_ZERO(d, clearsize);
3576 public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
3581 arena_get(ar_ptr, n*elem_size);
3585 m = _int_icalloc(ar_ptr, n, elem_size, chunks);
3586 (void)mutex_unlock(&ar_ptr->mutex);
3591 public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks)
3596 arena_get(ar_ptr, 0);
3600 m = _int_icomalloc(ar_ptr, n, sizes, chunks);
3601 (void)mutex_unlock(&ar_ptr->mutex);
3608 public_cFREe(Void_t* m)
3616 public_mTRIm(size_t s)
3620 (void)mutex_lock(&main_arena.mutex);
3622 (void)mutex_unlock(&main_arena.mutex);
3627 public_mUSABLe(Void_t* m)
3631 result = mUSABLe(m);
3641 struct mallinfo public_mALLINFo()
3645 (void)mutex_lock(&main_arena.mutex);
3646 m = mALLINFo(&main_arena);
3647 (void)mutex_unlock(&main_arena.mutex);
3652 public_mALLOPt(int p, int v)
3655 result = mALLOPt(p, v);
3660 ------------------------------ malloc ------------------------------
3664 _int_malloc(mstate av, size_t bytes)
3666 INTERNAL_SIZE_T nb; /* normalized request size */
3667 unsigned int idx; /* associated bin index */
3668 mbinptr bin; /* associated bin */
3669 mfastbinptr* fb; /* associated fastbin */
3671 mchunkptr victim; /* inspected/selected chunk */
3672 INTERNAL_SIZE_T size; /* its size */
3673 int victim_index; /* its bin index */
3675 mchunkptr remainder; /* remainder from a split */
3676 unsigned long remainder_size; /* its size */
3678 unsigned int block; /* bit map traverser */
3679 unsigned int bit; /* bit map traverser */
3680 unsigned int map; /* current word of binmap */
3682 mchunkptr fwd; /* misc temp for linking */
3683 mchunkptr bck; /* misc temp for linking */
3686 Convert request size to internal form by adding SIZE_SZ bytes
3687 overhead plus possibly more to obtain necessary alignment and/or
3688 to obtain a size of at least MINSIZE, the smallest allocatable
3689 size. Also, checked_request2size traps (returning 0) request sizes
3690 that are so large that they wrap around zero when padded and
3694 checked_request2size(bytes, nb);
3697 If the size qualifies as a fastbin, first check corresponding bin.
3698 This code is safe to execute even if av is not yet initialized, so we
3699 can try it without checking, which saves some time on this fast path.
3702 if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) {
3703 fb = &(av->fastbins[(fastbin_index(nb))]);
3704 if ( (victim = *fb) != 0) {
3706 check_remalloced_chunk(av, victim, nb);
3707 return chunk2mem(victim);
3712 If a small request, check regular bin. Since these "smallbins"
3713 hold one size each, no searching within bins is necessary.
3714 (For a large request, we need to wait until unsorted chunks are
3715 processed to find best fit. But for small ones, fits are exact
3716 anyway, so we can check now, which is faster.)
3719 if (in_smallbin_range(nb)) {
3720 idx = smallbin_index(nb);
3721 bin = bin_at(av,idx);
3723 if ( (victim = last(bin)) != bin) {
3724 if (victim == 0) /* initialization check */
3725 malloc_consolidate(av);
3728 set_inuse_bit_at_offset(victim, nb);
3732 if (av != &main_arena)
3733 victim->size |= NON_MAIN_ARENA;
3734 check_malloced_chunk(av, victim, nb);
3735 return chunk2mem(victim);
3741 If this is a large request, consolidate fastbins before continuing.
3742 While it might look excessive to kill all fastbins before
3743 even seeing if there is space available, this avoids
3744 fragmentation problems normally associated with fastbins.
3745 Also, in practice, programs tend to have runs of either small or
3746 large requests, but less often mixtures, so consolidation is not
3747 invoked all that often in most programs. And the programs that
3748 it is called frequently in otherwise tend to fragment.
3752 idx = largebin_index(nb);
3753 if (have_fastchunks(av))
3754 malloc_consolidate(av);
3758 Process recently freed or remaindered chunks, taking one only if
3759 it is exact fit, or, if this a small request, the chunk is remainder from
3760 the most recent non-exact fit. Place other traversed chunks in
3761 bins. Note that this step is the only place in any routine where
3762 chunks are placed in bins.
3764 The outer loop here is needed because we might not realize until
3765 near the end of malloc that we should have consolidated, so must
3766 do so and retry. This happens at most once, and only when we would
3767 otherwise need to expand memory to service a "small" request.
3772 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
3774 size = chunksize(victim);
3777 If a small request, try to use last remainder if it is the
3778 only chunk in unsorted bin. This helps promote locality for
3779 runs of consecutive small requests. This is the only
3780 exception to best-fit, and applies only when there is
3781 no exact fit for a small chunk.
3784 if (in_smallbin_range(nb) &&
3785 bck == unsorted_chunks(av) &&
3786 victim == av->last_remainder &&
3787 (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
3789 /* split and reattach remainder */
3790 remainder_size = size - nb;
3791 remainder = chunk_at_offset(victim, nb);
3792 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3793 av->last_remainder = remainder;
3794 remainder->bk = remainder->fd = unsorted_chunks(av);
3796 set_head(victim, nb | PREV_INUSE |
3797 (av != &main_arena ? NON_MAIN_ARENA : 0));
3798 set_head(remainder, remainder_size | PREV_INUSE);
3799 set_foot(remainder, remainder_size);
3801 check_malloced_chunk(av, victim, nb);
3802 return chunk2mem(victim);
3805 /* remove from unsorted list */
3806 unsorted_chunks(av)->bk = bck;
3807 bck->fd = unsorted_chunks(av);
3809 /* Take now instead of binning if exact fit */
3812 set_inuse_bit_at_offset(victim, size);
3813 if (av != &main_arena)
3814 victim->size |= NON_MAIN_ARENA;
3815 check_malloced_chunk(av, victim, nb);
3816 return chunk2mem(victim);
3819 /* place chunk in bin */
3821 if (in_smallbin_range(size)) {
3822 victim_index = smallbin_index(size);
3823 bck = bin_at(av, victim_index);
3827 victim_index = largebin_index(size);
3828 bck = bin_at(av, victim_index);
3831 /* maintain large bins in sorted order */
3833 /* Or with inuse bit to speed comparisons */
3835 /* if smaller than smallest, bypass loop below */
3836 assert((bck->bk->size & NON_MAIN_ARENA) == 0);
3837 if ((unsigned long)(size) <= (unsigned long)(bck->bk->size)) {
3842 assert((fwd->size & NON_MAIN_ARENA) == 0);
3843 while ((unsigned long)(size) < (unsigned long)(fwd->size)) {
3845 assert((fwd->size & NON_MAIN_ARENA) == 0);
3852 mark_bin(av, victim_index);
3860 If a large request, scan through the chunks of current bin in
3861 sorted order to find smallest that fits. This is the only step
3862 where an unbounded number of chunks might be scanned without doing
3863 anything useful with them. However the lists tend to be short.
3866 if (!in_smallbin_range(nb)) {
3867 bin = bin_at(av, idx);
3869 /* skip scan if empty or largest chunk is too small */
3870 if ((victim = last(bin)) != bin &&
3871 (unsigned long)(first(bin)->size) >= (unsigned long)(nb)) {
3873 while (((unsigned long)(size = chunksize(victim)) <
3874 (unsigned long)(nb)))
3875 victim = victim->bk;
3877 remainder_size = size - nb;
3878 unlink(victim, bck, fwd);
3881 if (remainder_size < MINSIZE) {
3882 set_inuse_bit_at_offset(victim, size);
3883 if (av != &main_arena)
3884 victim->size |= NON_MAIN_ARENA;
3885 check_malloced_chunk(av, victim, nb);
3886 return chunk2mem(victim);
3890 remainder = chunk_at_offset(victim, nb);
3891 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3892 remainder->bk = remainder->fd = unsorted_chunks(av);
3893 set_head(victim, nb | PREV_INUSE |
3894 (av != &main_arena ? NON_MAIN_ARENA : 0));
3895 set_head(remainder, remainder_size | PREV_INUSE);
3896 set_foot(remainder, remainder_size);
3897 check_malloced_chunk(av, victim, nb);
3898 return chunk2mem(victim);
3904 Search for a chunk by scanning bins, starting with next largest
3905 bin. This search is strictly by best-fit; i.e., the smallest
3906 (with ties going to approximately the least recently used) chunk
3907 that fits is selected.
3909 The bitmap avoids needing to check that most blocks are nonempty.
3910 The particular case of skipping all bins during warm-up phases
3911 when no chunks have been returned yet is faster than it might look.
3915 bin = bin_at(av,idx);
3916 block = idx2block(idx);
3917 map = av->binmap[block];
3922 /* Skip rest of block if there are no more set bits in this block. */
3923 if (bit > map || bit == 0) {
3925 if (++block >= BINMAPSIZE) /* out of bins */
3927 } while ( (map = av->binmap[block]) == 0);
3929 bin = bin_at(av, (block << BINMAPSHIFT));
3933 /* Advance to bin with set bit. There must be one. */
3934 while ((bit & map) == 0) {
3935 bin = next_bin(bin);
3940 /* Inspect the bin. It is likely to be non-empty */
3943 /* If a false alarm (empty bin), clear the bit. */
3944 if (victim == bin) {
3945 av->binmap[block] = map &= ~bit; /* Write through */
3946 bin = next_bin(bin);
3951 size = chunksize(victim);
3953 /* We know the first chunk in this bin is big enough to use. */
3954 assert((unsigned long)(size) >= (unsigned long)(nb));
3956 remainder_size = size - nb;
3964 if (remainder_size < MINSIZE) {
3965 set_inuse_bit_at_offset(victim, size);
3966 if (av != &main_arena)
3967 victim->size |= NON_MAIN_ARENA;
3968 check_malloced_chunk(av, victim, nb);
3969 return chunk2mem(victim);
3974 remainder = chunk_at_offset(victim, nb);
3976 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3977 remainder->bk = remainder->fd = unsorted_chunks(av);
3978 /* advertise as last remainder */
3979 if (in_smallbin_range(nb))
3980 av->last_remainder = remainder;
3982 set_head(victim, nb | PREV_INUSE |
3983 (av != &main_arena ? NON_MAIN_ARENA : 0));
3984 set_head(remainder, remainder_size | PREV_INUSE);
3985 set_foot(remainder, remainder_size);
3986 check_malloced_chunk(av, victim, nb);
3987 return chunk2mem(victim);
3994 If large enough, split off the chunk bordering the end of memory
3995 (held in av->top). Note that this is in accord with the best-fit
3996 search rule. In effect, av->top is treated as larger (and thus
3997 less well fitting) than any other available chunk since it can
3998 be extended to be as large as necessary (up to system
4001 We require that av->top always exists (i.e., has size >=
4002 MINSIZE) after initialization, so if it would otherwise be
4003 exhuasted by current request, it is replenished. (The main
4004 reason for ensuring it exists is that we may need MINSIZE space
4005 to put in fenceposts in sysmalloc.)
4009 size = chunksize(victim);
4011 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
4012 remainder_size = size - nb;
4013 remainder = chunk_at_offset(victim, nb);
4014 av->top = remainder;
4015 set_head(victim, nb | PREV_INUSE |
4016 (av != &main_arena ? NON_MAIN_ARENA : 0));
4017 set_head(remainder, remainder_size | PREV_INUSE);
4019 check_malloced_chunk(av, victim, nb);
4020 return chunk2mem(victim);
4024 If there is space available in fastbins, consolidate and retry,
4025 to possibly avoid expanding memory. This can occur only if nb is
4026 in smallbin range so we didn't consolidate upon entry.
4029 else if (have_fastchunks(av)) {
4030 assert(in_smallbin_range(nb));
4031 malloc_consolidate(av);
4032 idx = smallbin_index(nb); /* restore original bin index */
4036 Otherwise, relay to handle system-dependent cases
4039 return sYSMALLOc(nb, av);
4044 ------------------------------ free ------------------------------
4048 _int_free(mstate av, Void_t* mem)
4050 mchunkptr p; /* chunk corresponding to mem */
4051 INTERNAL_SIZE_T size; /* its size */
4052 mfastbinptr* fb; /* associated fastbin */
4053 mchunkptr nextchunk; /* next contiguous chunk */
4054 INTERNAL_SIZE_T nextsize; /* its size */
4055 int nextinuse; /* true if nextchunk is used */
4056 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4057 mchunkptr bck; /* misc temp for linking */
4058 mchunkptr fwd; /* misc temp for linking */
4061 /* free(0) has no effect */
4064 size = chunksize(p);
4066 check_inuse_chunk(av, p);
4069 If eligible, place chunk on a fastbin so it can be found
4070 and used quickly in malloc.
4073 if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
4077 If TRIM_FASTBINS set, don't place chunks
4078 bordering top into fastbins
4080 && (chunk_at_offset(p, size) != av->top)
4085 fb = &(av->fastbins[fastbin_index(size)]);
4091 Consolidate other non-mmapped chunks as they arrive.
4094 else if (!chunk_is_mmapped(p)) {
4095 nextchunk = chunk_at_offset(p, size);
4096 nextsize = chunksize(nextchunk);
4097 assert(nextsize > 0);
4099 /* consolidate backward */
4100 if (!prev_inuse(p)) {
4101 prevsize = p->prev_size;
4103 p = chunk_at_offset(p, -((long) prevsize));
4104 unlink(p, bck, fwd);
4107 if (nextchunk != av->top) {
4108 /* get and clear inuse bit */
4109 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4111 /* consolidate forward */
4113 unlink(nextchunk, bck, fwd);
4116 clear_inuse_bit_at_offset(nextchunk, 0);
4119 Place the chunk in unsorted chunk list. Chunks are
4120 not placed into regular bins until after they have
4121 been given one chance to be used in malloc.
4124 bck = unsorted_chunks(av);
4131 set_head(p, size | PREV_INUSE);
4134 check_free_chunk(av, p);
4138 If the chunk borders the current high end of memory,
4139 consolidate into top
4144 set_head(p, size | PREV_INUSE);
4150 If freeing a large space, consolidate possibly-surrounding
4151 chunks. Then, if the total unused topmost memory exceeds trim
4152 threshold, ask malloc_trim to reduce top.
4154 Unless max_fast is 0, we don't know if there are fastbins
4155 bordering top, so we cannot tell for sure whether threshold
4156 has been reached unless fastbins are consolidated. But we
4157 don't want to consolidate on each free. As a compromise,
4158 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4162 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4163 if (have_fastchunks(av))
4164 malloc_consolidate(av);
4166 if (av == &main_arena) {
4167 #ifndef MORECORE_CANNOT_TRIM
4168 if ((unsigned long)(chunksize(av->top)) >=
4169 (unsigned long)(mp_.trim_threshold))
4170 sYSTRIm(mp_.top_pad, av);
4173 /* Always try heap_trim(), even if the top chunk is not
4174 large, because the corresponding heap might go away. */
4175 heap_info *heap = heap_for_ptr(top(av));
4177 assert(heap->ar_ptr == av);
4178 heap_trim(heap, mp_.top_pad);
4184 If the chunk was allocated via mmap, release via munmap(). Note
4185 that if HAVE_MMAP is false but chunk_is_mmapped is true, then
4186 user must have overwritten memory. There's nothing we can do to
4187 catch this error unless MALLOC_DEBUG is set, in which case
4188 check_inuse_chunk (above) will have triggered error.
4194 INTERNAL_SIZE_T offset = p->prev_size;
4196 mp_.mmapped_mem -= (size + offset);
4197 ret = munmap((char*)p - offset, size + offset);
4198 /* munmap returns non-zero on failure */
4206 ------------------------- malloc_consolidate -------------------------
4208 malloc_consolidate is a specialized version of free() that tears
4209 down chunks held in fastbins. Free itself cannot be used for this
4210 purpose since, among other things, it might place chunks back onto
4211 fastbins. So, instead, we need to use a minor variant of the same
4214 Also, because this routine needs to be called the first time through
4215 malloc anyway, it turns out to be the perfect place to trigger
4216 initialization code.
4220 static void malloc_consolidate(mstate av)
4222 static void malloc_consolidate(av) mstate av;
4225 mfastbinptr* fb; /* current fastbin being consolidated */
4226 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4227 mchunkptr p; /* current chunk being consolidated */
4228 mchunkptr nextp; /* next chunk to consolidate */
4229 mchunkptr unsorted_bin; /* bin header */
4230 mchunkptr first_unsorted; /* chunk to link to */
4232 /* These have same use as in free() */
4233 mchunkptr nextchunk;
4234 INTERNAL_SIZE_T size;
4235 INTERNAL_SIZE_T nextsize;
4236 INTERNAL_SIZE_T prevsize;
4242 If max_fast is 0, we know that av hasn't
4243 yet been initialized, in which case do so below
4246 if (av->max_fast != 0) {
4247 clear_fastchunks(av);
4249 unsorted_bin = unsorted_chunks(av);
4252 Remove each chunk from fast bin and consolidate it, placing it
4253 then in unsorted bin. Among other reasons for doing this,
4254 placing in unsorted bin avoids needing to calculate actual bins
4255 until malloc is sure that chunks aren't immediately going to be
4259 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
4260 fb = &(av->fastbins[0]);
4262 if ( (p = *fb) != 0) {
4266 check_inuse_chunk(av, p);
4269 /* Slightly streamlined version of consolidation code in free() */
4270 size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
4271 nextchunk = chunk_at_offset(p, size);
4272 nextsize = chunksize(nextchunk);
4274 if (!prev_inuse(p)) {
4275 prevsize = p->prev_size;
4277 p = chunk_at_offset(p, -((long) prevsize));
4278 unlink(p, bck, fwd);
4281 if (nextchunk != av->top) {
4282 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4286 unlink(nextchunk, bck, fwd);
4288 clear_inuse_bit_at_offset(nextchunk, 0);
4290 first_unsorted = unsorted_bin->fd;
4291 unsorted_bin->fd = p;
4292 first_unsorted->bk = p;
4294 set_head(p, size | PREV_INUSE);
4295 p->bk = unsorted_bin;
4296 p->fd = first_unsorted;
4302 set_head(p, size | PREV_INUSE);
4306 } while ( (p = nextp) != 0);
4309 } while (fb++ != maxfb);
4312 malloc_init_state(av);
4313 check_malloc_state(av);
4318 ------------------------------ realloc ------------------------------
4322 _int_realloc(mstate av, Void_t* oldmem, size_t bytes)
4324 INTERNAL_SIZE_T nb; /* padded request size */
4326 mchunkptr oldp; /* chunk corresponding to oldmem */
4327 INTERNAL_SIZE_T oldsize; /* its size */
4329 mchunkptr newp; /* chunk to return */
4330 INTERNAL_SIZE_T newsize; /* its size */
4331 Void_t* newmem; /* corresponding user mem */
4333 mchunkptr next; /* next contiguous chunk after oldp */
4335 mchunkptr remainder; /* extra space at end of newp */
4336 unsigned long remainder_size; /* its size */
4338 mchunkptr bck; /* misc temp for linking */
4339 mchunkptr fwd; /* misc temp for linking */
4341 unsigned long copysize; /* bytes to copy */
4342 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4343 INTERNAL_SIZE_T* s; /* copy source */
4344 INTERNAL_SIZE_T* d; /* copy destination */
4347 #if REALLOC_ZERO_BYTES_FREES
4349 _int_free(av, oldmem);
4354 /* realloc of null is supposed to be same as malloc */
4355 if (oldmem == 0) return _int_malloc(av, bytes);
4357 checked_request2size(bytes, nb);
4359 oldp = mem2chunk(oldmem);
4360 oldsize = chunksize(oldp);
4362 check_inuse_chunk(av, oldp);
4364 if (!chunk_is_mmapped(oldp)) {
4366 if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
4367 /* already big enough; split below */
4373 next = chunk_at_offset(oldp, oldsize);
4375 /* Try to expand forward into top */
4376 if (next == av->top &&
4377 (unsigned long)(newsize = oldsize + chunksize(next)) >=
4378 (unsigned long)(nb + MINSIZE)) {
4379 set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4380 av->top = chunk_at_offset(oldp, nb);
4381 set_head(av->top, (newsize - nb) | PREV_INUSE);
4382 check_inuse_chunk(av, oldp);
4383 return chunk2mem(oldp);
4386 /* Try to expand forward into next chunk; split off remainder below */
4387 else if (next != av->top &&
4389 (unsigned long)(newsize = oldsize + chunksize(next)) >=
4390 (unsigned long)(nb)) {
4392 unlink(next, bck, fwd);
4395 /* allocate, copy, free */
4397 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4399 return 0; /* propagate failure */
4401 newp = mem2chunk(newmem);
4402 newsize = chunksize(newp);
4405 Avoid copy if newp is next chunk after oldp.
4413 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4414 We know that contents have an odd number of
4415 INTERNAL_SIZE_T-sized words; minimally 3.
4418 copysize = oldsize - SIZE_SZ;
4419 s = (INTERNAL_SIZE_T*)(oldmem);
4420 d = (INTERNAL_SIZE_T*)(newmem);
4421 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4422 assert(ncopies >= 3);
4425 MALLOC_COPY(d, s, copysize);
4445 _int_free(av, oldmem);
4446 check_inuse_chunk(av, newp);
4447 return chunk2mem(newp);
4452 /* If possible, free extra space in old or extended chunk */
4454 assert((unsigned long)(newsize) >= (unsigned long)(nb));
4456 remainder_size = newsize - nb;
4458 if (remainder_size < MINSIZE) { /* not enough extra to split off */
4459 set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4460 set_inuse_bit_at_offset(newp, newsize);
4462 else { /* split remainder */
4463 remainder = chunk_at_offset(newp, nb);
4464 set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4465 set_head(remainder, remainder_size | PREV_INUSE |
4466 (av != &main_arena ? NON_MAIN_ARENA : 0));
4467 /* Mark remainder as inuse so free() won't complain */
4468 set_inuse_bit_at_offset(remainder, remainder_size);
4469 _int_free(av, chunk2mem(remainder));
4472 check_inuse_chunk(av, newp);
4473 return chunk2mem(newp);
4484 INTERNAL_SIZE_T offset = oldp->prev_size;
4485 size_t pagemask = mp_.pagesize - 1;
4489 /* Note the extra SIZE_SZ overhead */
4490 newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
4492 /* don't need to remap if still within same page */
4493 if (oldsize == newsize - offset)
4496 cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
4498 if (cp != MAP_FAILED) {
4500 newp = (mchunkptr)(cp + offset);
4501 set_head(newp, (newsize - offset)|IS_MMAPPED);
4503 assert(aligned_OK(chunk2mem(newp)));
4504 assert((newp->prev_size == offset));
4506 /* update statistics */
4507 sum = mp_.mmapped_mem += newsize - oldsize;
4508 if (sum > (unsigned long)(mp_.max_mmapped_mem))
4509 mp_.max_mmapped_mem = sum;
4511 sum += main_arena.system_mem;
4512 if (sum > (unsigned long)(mp_.max_total_mem))
4513 mp_.max_total_mem = sum;
4516 return chunk2mem(newp);
4520 /* Note the extra SIZE_SZ overhead. */
4521 if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
4522 newmem = oldmem; /* do nothing */
4524 /* Must alloc, copy, free. */
4525 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4527 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
4528 _int_free(av, oldmem);
4534 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
4535 check_malloc_state(av);
4536 MALLOC_FAILURE_ACTION;
4543 ------------------------------ memalign ------------------------------
4547 _int_memalign(mstate av, size_t alignment, size_t bytes)
4549 INTERNAL_SIZE_T nb; /* padded request size */
4550 char* m; /* memory returned by malloc call */
4551 mchunkptr p; /* corresponding chunk */
4552 char* brk; /* alignment point within p */
4553 mchunkptr newp; /* chunk to return */
4554 INTERNAL_SIZE_T newsize; /* its size */
4555 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4556 mchunkptr remainder; /* spare room at end to split off */
4557 unsigned long remainder_size; /* its size */
4558 INTERNAL_SIZE_T size;
4560 /* If need less alignment than we give anyway, just relay to malloc */
4562 if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
4564 /* Otherwise, ensure that it is at least a minimum chunk size */
4566 if (alignment < MINSIZE) alignment = MINSIZE;
4568 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
4569 if ((alignment & (alignment - 1)) != 0) {
4570 size_t a = MALLOC_ALIGNMENT * 2;
4571 while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
4575 checked_request2size(bytes, nb);
4578 Strategy: find a spot within that chunk that meets the alignment
4579 request, and then possibly free the leading and trailing space.
4583 /* Call malloc with worst case padding to hit alignment. */
4585 m = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
4587 if (m == 0) return 0; /* propagate failure */
4591 if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
4594 Find an aligned spot inside chunk. Since we need to give back
4595 leading space in a chunk of at least MINSIZE, if the first
4596 calculation places us at a spot with less than MINSIZE leader,
4597 we can move to the next aligned spot -- we've allocated enough
4598 total room so that this is always possible.
4601 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
4602 -((signed long) alignment));
4603 if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
4606 newp = (mchunkptr)brk;
4607 leadsize = brk - (char*)(p);
4608 newsize = chunksize(p) - leadsize;
4610 /* For mmapped chunks, just adjust offset */
4611 if (chunk_is_mmapped(p)) {
4612 newp->prev_size = p->prev_size + leadsize;
4613 set_head(newp, newsize|IS_MMAPPED);
4614 return chunk2mem(newp);
4617 /* Otherwise, give back leader, use the rest */
4618 set_head(newp, newsize | PREV_INUSE |
4619 (av != &main_arena ? NON_MAIN_ARENA : 0));
4620 set_inuse_bit_at_offset(newp, newsize);
4621 set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4622 _int_free(av, chunk2mem(p));
4625 assert (newsize >= nb &&
4626 (((unsigned long)(chunk2mem(p))) % alignment) == 0);
4629 /* Also give back spare room at the end */
4630 if (!chunk_is_mmapped(p)) {
4631 size = chunksize(p);
4632 if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4633 remainder_size = size - nb;
4634 remainder = chunk_at_offset(p, nb);
4635 set_head(remainder, remainder_size | PREV_INUSE |
4636 (av != &main_arena ? NON_MAIN_ARENA : 0));
4637 set_head_size(p, nb);
4638 _int_free(av, chunk2mem(remainder));
4642 check_inuse_chunk(av, p);
4643 return chunk2mem(p);
4648 ------------------------------ calloc ------------------------------
4652 Void_t* cALLOc(size_t n_elements, size_t elem_size)
4654 Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
4658 unsigned long clearsize;
4659 unsigned long nclears;
4662 Void_t* mem = mALLOc(n_elements * elem_size);
4668 if (!chunk_is_mmapped(p)) /* don't need to clear mmapped space */
4672 Unroll clear of <= 36 bytes (72 if 8byte sizes)
4673 We know that contents have an odd number of
4674 INTERNAL_SIZE_T-sized words; minimally 3.
4677 d = (INTERNAL_SIZE_T*)mem;
4678 clearsize = chunksize(p) - SIZE_SZ;
4679 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
4680 assert(nclears >= 3);
4683 MALLOC_ZERO(d, clearsize);
4709 ------------------------- independent_calloc -------------------------
4714 _int_icalloc(mstate av, size_t n_elements, size_t elem_size, Void_t* chunks[])
4716 _int_icalloc(av, n_elements, elem_size, chunks)
4717 mstate av; size_t n_elements; size_t elem_size; Void_t* chunks[];
4720 size_t sz = elem_size; /* serves as 1-element array */
4721 /* opts arg of 3 means all elements are same size, and should be cleared */
4722 return iALLOc(av, n_elements, &sz, 3, chunks);
4726 ------------------------- independent_comalloc -------------------------
4731 _int_icomalloc(mstate av, size_t n_elements, size_t sizes[], Void_t* chunks[])
4733 _int_icomalloc(av, n_elements, sizes, chunks)
4734 mstate av; size_t n_elements; size_t sizes[]; Void_t* chunks[];
4737 return iALLOc(av, n_elements, sizes, 0, chunks);
4742 ------------------------------ ialloc ------------------------------
4743 ialloc provides common support for independent_X routines, handling all of
4744 the combinations that can result.
4747 bit 0 set if all elements are same size (using sizes[0])
4748 bit 1 set if elements should be zeroed
4754 iALLOc(mstate av, size_t n_elements, size_t* sizes, int opts, Void_t* chunks[])
4756 iALLOc(av, n_elements, sizes, opts, chunks)
4757 mstate av; size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
4760 INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
4761 INTERNAL_SIZE_T contents_size; /* total size of elements */
4762 INTERNAL_SIZE_T array_size; /* request size of pointer array */
4763 Void_t* mem; /* malloced aggregate space */
4764 mchunkptr p; /* corresponding chunk */
4765 INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
4766 Void_t** marray; /* either "chunks" or malloced ptr array */
4767 mchunkptr array_chunk; /* chunk for malloced ptr array */
4768 int mmx; /* to disable mmap */
4769 INTERNAL_SIZE_T size;
4770 INTERNAL_SIZE_T size_flags;
4773 /* Ensure initialization/consolidation */
4774 if (have_fastchunks(av)) malloc_consolidate(av);
4776 /* compute array length, if needed */
4778 if (n_elements == 0)
4779 return chunks; /* nothing to do */
4784 /* if empty req, must still return chunk representing empty array */
4785 if (n_elements == 0)
4786 return (Void_t**) _int_malloc(av, 0);
4788 array_size = request2size(n_elements * (sizeof(Void_t*)));
4791 /* compute total element size */
4792 if (opts & 0x1) { /* all-same-size */
4793 element_size = request2size(*sizes);
4794 contents_size = n_elements * element_size;
4796 else { /* add up all the sizes */
4799 for (i = 0; i != n_elements; ++i)
4800 contents_size += request2size(sizes[i]);
4803 /* subtract out alignment bytes from total to minimize overallocation */
4804 size = contents_size + array_size - MALLOC_ALIGN_MASK;
4807 Allocate the aggregate chunk.
4808 But first disable mmap so malloc won't use it, since
4809 we would not be able to later free/realloc space internal
4810 to a segregated mmap region.
4812 mmx = mp_.n_mmaps_max; /* disable mmap */
4813 mp_.n_mmaps_max = 0;
4814 mem = _int_malloc(av, size);
4815 mp_.n_mmaps_max = mmx; /* reset mmap */
4820 assert(!chunk_is_mmapped(p));
4821 remainder_size = chunksize(p);
4823 if (opts & 0x2) { /* optionally clear the elements */
4824 MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
4827 size_flags = PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0);
4829 /* If not provided, allocate the pointer array as final part of chunk */
4831 array_chunk = chunk_at_offset(p, contents_size);
4832 marray = (Void_t**) (chunk2mem(array_chunk));
4833 set_head(array_chunk, (remainder_size - contents_size) | size_flags);
4834 remainder_size = contents_size;
4837 /* split out elements */
4838 for (i = 0; ; ++i) {
4839 marray[i] = chunk2mem(p);
4840 if (i != n_elements-1) {
4841 if (element_size != 0)
4842 size = element_size;
4844 size = request2size(sizes[i]);
4845 remainder_size -= size;
4846 set_head(p, size | size_flags);
4847 p = chunk_at_offset(p, size);
4849 else { /* the final element absorbs any overallocation slop */
4850 set_head(p, remainder_size | size_flags);
4856 if (marray != chunks) {
4857 /* final element must have exactly exhausted chunk */
4858 if (element_size != 0)
4859 assert(remainder_size == element_size);
4861 assert(remainder_size == request2size(sizes[i]));
4862 check_inuse_chunk(av, mem2chunk(marray));
4865 for (i = 0; i != n_elements; ++i)
4866 check_inuse_chunk(av, mem2chunk(marray[i]));
4874 ------------------------------ valloc ------------------------------
4879 _int_valloc(mstate av, size_t bytes)
4881 _int_valloc(av, bytes) mstate av; size_t bytes;
4884 /* Ensure initialization/consolidation */
4885 if (have_fastchunks(av)) malloc_consolidate(av);
4886 return _int_memalign(av, mp_.pagesize, bytes);
4890 ------------------------------ pvalloc ------------------------------
4896 _int_pvalloc(mstate av, size_t bytes)
4898 _int_pvalloc(av, bytes) mstate av, size_t bytes;
4903 /* Ensure initialization/consolidation */
4904 if (have_fastchunks(av)) malloc_consolidate(av);
4905 pagesz = mp_.pagesize;
4906 return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
4911 ------------------------------ malloc_trim ------------------------------
4915 int mTRIm(size_t pad)
4917 int mTRIm(pad) size_t pad;
4920 mstate av = &main_arena; /* already locked */
4922 /* Ensure initialization/consolidation */
4923 malloc_consolidate(av);
4925 #ifndef MORECORE_CANNOT_TRIM
4926 return sYSTRIm(pad, av);
4934 ------------------------- malloc_usable_size -------------------------
4938 size_t mUSABLe(Void_t* mem)
4940 size_t mUSABLe(mem) Void_t* mem;
4946 if (chunk_is_mmapped(p))
4947 return chunksize(p) - 2*SIZE_SZ;
4949 return chunksize(p) - SIZE_SZ;
4955 ------------------------------ mallinfo ------------------------------
4958 struct mallinfo mALLINFo(mstate av)
4964 INTERNAL_SIZE_T avail;
4965 INTERNAL_SIZE_T fastavail;
4969 /* Ensure initialization */
4970 if (av->top == 0) malloc_consolidate(av);
4972 check_malloc_state(av);
4974 /* Account for top */
4975 avail = chunksize(av->top);
4976 nblocks = 1; /* top always exists */
4978 /* traverse fastbins */
4982 for (i = 0; i < NFASTBINS; ++i) {
4983 for (p = av->fastbins[i]; p != 0; p = p->fd) {
4985 fastavail += chunksize(p);
4991 /* traverse regular bins */
4992 for (i = 1; i < NBINS; ++i) {
4994 for (p = last(b); p != b; p = p->bk) {
4996 avail += chunksize(p);
5000 mi.smblks = nfastblocks;
5001 mi.ordblks = nblocks;
5002 mi.fordblks = avail;
5003 mi.uordblks = av->system_mem - avail;
5004 mi.arena = av->system_mem;
5005 mi.hblks = mp_.n_mmaps;
5006 mi.hblkhd = mp_.mmapped_mem;
5007 mi.fsmblks = fastavail;
5008 mi.keepcost = chunksize(av->top);
5009 mi.usmblks = mp_.max_total_mem;
5014 ------------------------------ malloc_stats ------------------------------
5022 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
5024 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
5027 for (i=0, ar_ptr = &main_arena;; i++) {
5028 (void)mutex_lock(&ar_ptr->mutex);
5029 mi = mALLINFo(ar_ptr);
5030 fprintf(stderr, "Arena %d:\n", i);
5031 fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
5032 fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
5033 #if MALLOC_DEBUG > 1
5035 dump_heap(heap_for_ptr(top(ar_ptr)));
5037 system_b += mi.arena;
5038 in_use_b += mi.uordblks;
5040 stat_lock_direct += ar_ptr->stat_lock_direct;
5041 stat_lock_loop += ar_ptr->stat_lock_loop;
5042 stat_lock_wait += ar_ptr->stat_lock_wait;
5044 (void)mutex_unlock(&ar_ptr->mutex);
5045 ar_ptr = ar_ptr->next;
5046 if(ar_ptr == &main_arena) break;
5049 fprintf(stderr, "Total (incl. mmap):\n");
5051 fprintf(stderr, "Total:\n");
5053 fprintf(stderr, "system bytes = %10u\n", system_b);
5054 fprintf(stderr, "in use bytes = %10u\n", in_use_b);
5056 fprintf(stderr, "max system bytes = %10u\n", (unsigned int)mp_.max_total_mem);
5059 fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
5060 fprintf(stderr, "max mmap bytes = %10lu\n",
5061 (unsigned long)mp_.max_mmapped_mem);
5064 fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);
5065 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
5066 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
5067 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
5068 fprintf(stderr, "locked total = %10ld\n",
5069 stat_lock_direct + stat_lock_loop + stat_lock_wait);
5075 ------------------------------ mallopt ------------------------------
5079 int mALLOPt(int param_number, int value)
5081 int mALLOPt(param_number, value) int param_number; int value;
5084 mstate av = &main_arena;
5087 (void)mutex_lock(&av->mutex);
5088 /* Ensure initialization/consolidation */
5089 malloc_consolidate(av);
5091 switch(param_number) {
5093 if (value >= 0 && value <= MAX_FAST_SIZE) {
5094 set_max_fast(av, value);
5100 case M_TRIM_THRESHOLD:
5101 mp_.trim_threshold = value;
5105 mp_.top_pad = value;
5108 case M_MMAP_THRESHOLD:
5110 /* Forbid setting the threshold too high. */
5111 if((unsigned long)value > HEAP_MAX_SIZE/2)
5115 mp_.mmap_threshold = value;
5124 mp_.n_mmaps_max = value;
5127 case M_CHECK_ACTION:
5128 check_action = value;
5131 (void)mutex_unlock(&av->mutex);
5137 -------------------- Alternative MORECORE functions --------------------
5142 General Requirements for MORECORE.
5144 The MORECORE function must have the following properties:
5146 If MORECORE_CONTIGUOUS is false:
5148 * MORECORE must allocate in multiples of pagesize. It will
5149 only be called with arguments that are multiples of pagesize.
5151 * MORECORE(0) must return an address that is at least
5152 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5154 else (i.e. If MORECORE_CONTIGUOUS is true):
5156 * Consecutive calls to MORECORE with positive arguments
5157 return increasing addresses, indicating that space has been
5158 contiguously extended.
5160 * MORECORE need not allocate in multiples of pagesize.
5161 Calls to MORECORE need not have args of multiples of pagesize.
5163 * MORECORE need not page-align.
5167 * MORECORE may allocate more memory than requested. (Or even less,
5168 but this will generally result in a malloc failure.)
5170 * MORECORE must not allocate memory when given argument zero, but
5171 instead return one past the end address of memory from previous
5172 nonzero call. This malloc does NOT call MORECORE(0)
5173 until at least one call with positive arguments is made, so
5174 the initial value returned is not important.
5176 * Even though consecutive calls to MORECORE need not return contiguous
5177 addresses, it must be OK for malloc'ed chunks to span multiple
5178 regions in those cases where they do happen to be contiguous.
5180 * MORECORE need not handle negative arguments -- it may instead
5181 just return MORECORE_FAILURE when given negative arguments.
5182 Negative arguments are always multiples of pagesize. MORECORE
5183 must not misinterpret negative args as large positive unsigned
5184 args. You can suppress all such calls from even occurring by defining
5185 MORECORE_CANNOT_TRIM,
5187 There is some variation across systems about the type of the
5188 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5189 actually be size_t, because sbrk supports negative args, so it is
5190 normally the signed type of the same width as size_t (sometimes
5191 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5192 matter though. Internally, we use "long" as arguments, which should
5193 work across all reasonable possibilities.
5195 Additionally, if MORECORE ever returns failure for a positive
5196 request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
5197 system allocator. This is a useful backup strategy for systems with
5198 holes in address spaces -- in this case sbrk cannot contiguously
5199 expand the heap, but mmap may be able to map noncontiguous space.
5201 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5202 a function that always returns MORECORE_FAILURE.
5204 If you are using this malloc with something other than sbrk (or its
5205 emulation) to supply memory regions, you probably want to set
5206 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5207 allocator kindly contributed for pre-OSX macOS. It uses virtually
5208 but not necessarily physically contiguous non-paged memory (locked
5209 in, present and won't get swapped out). You can use it by
5210 uncommenting this section, adding some #includes, and setting up the
5211 appropriate defines above:
5213 #define MORECORE osMoreCore
5214 #define MORECORE_CONTIGUOUS 0
5216 There is also a shutdown routine that should somehow be called for
5217 cleanup upon program exit.
5219 #define MAX_POOL_ENTRIES 100
5220 #define MINIMUM_MORECORE_SIZE (64 * 1024)
5221 static int next_os_pool;
5222 void *our_os_pools[MAX_POOL_ENTRIES];
5224 void *osMoreCore(int size)
5227 static void *sbrk_top = 0;
5231 if (size < MINIMUM_MORECORE_SIZE)
5232 size = MINIMUM_MORECORE_SIZE;
5233 if (CurrentExecutionLevel() == kTaskLevel)
5234 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5237 return (void *) MORECORE_FAILURE;
5239 // save ptrs so they can be freed during cleanup
5240 our_os_pools[next_os_pool] = ptr;
5242 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5243 sbrk_top = (char *) ptr + size;
5248 // we don't currently support shrink behavior
5249 return (void *) MORECORE_FAILURE;
5257 // cleanup any allocated memory pools
5258 // called as last thing before shutting down driver
5260 void osCleanupMem(void)
5264 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5267 PoolDeallocate(*ptr);
5277 /* We need a wrapper function for one of the additions of POSIX. */
5279 __posix_memalign (void **memptr, size_t alignment, size_t size)
5283 /* Test whether the SIZE argument is valid. It must be a power of
5284 two multiple of sizeof (void *). */
5285 if (size % sizeof (void *) != 0 || (size & (size - 1)) != 0)
5288 mem = __libc_memalign (alignment, size);
5297 weak_alias (__posix_memalign, posix_memalign)
5299 weak_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5300 weak_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
5301 weak_alias (__libc_free, __free) weak_alias (__libc_free, free)
5302 weak_alias (__libc_malloc, __malloc) weak_alias (__libc_malloc, malloc)
5303 weak_alias (__libc_memalign, __memalign) weak_alias (__libc_memalign, memalign)
5304 weak_alias (__libc_realloc, __realloc) weak_alias (__libc_realloc, realloc)
5305 weak_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5306 weak_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5307 weak_alias (__libc_mallinfo, __mallinfo) weak_alias (__libc_mallinfo, mallinfo)
5308 weak_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5310 weak_alias (__malloc_stats, malloc_stats)
5311 weak_alias (__malloc_usable_size, malloc_usable_size)
5312 weak_alias (__malloc_trim, malloc_trim)
5313 weak_alias (__malloc_get_state, malloc_get_state)
5314 weak_alias (__malloc_set_state, malloc_set_state)
5318 /* ------------------------------------------------------------
5321 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]