1 /* GLIB sliced memory - fast concurrent memory chunk allocator
2 * Copyright (C) 2005 Tim Janik
4 * SPDX-License-Identifier: LGPL-2.1-or-later
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "glibconfig.h"
24 #if defined(HAVE_POSIX_MEMALIGN) && !defined(_XOPEN_SOURCE)
25 #define _XOPEN_SOURCE 600 /* posix_memalign() */
27 #include <stdlib.h> /* posix_memalign() */
32 #include <unistd.h> /* sysconf() */
39 #include <stdio.h> /* fputs */
44 #include "gmem.h" /* gslice.h */
45 #include "gstrfuncs.h"
47 #include "gtrashstack.h"
48 #include "gtestutils.h"
50 #include "gthreadprivate.h"
51 #include "glib_trace.h"
54 #include "gvalgrind.h"
57 * SECTION:memory_slices
58 * @title: Memory Slices
59 * @short_description: efficient way to allocate groups of equal-sized
62 * Memory slices provide a space-efficient and multi-processing scalable
63 * way to allocate equal-sized pieces of memory, just like the original
64 * #GMemChunks (from GLib 2.8), while avoiding their excessive
65 * memory-waste, scalability and performance problems.
67 * To achieve these goals, the slice allocator uses a sophisticated,
68 * layered design that has been inspired by Bonwick's slab allocator
69 * ([Bonwick94](http://citeseer.ist.psu.edu/bonwick94slab.html)
70 * Jeff Bonwick, The slab allocator: An object-caching kernel
71 * memory allocator. USENIX 1994, and
72 * [Bonwick01](http://citeseer.ist.psu.edu/bonwick01magazines.html)
73 * Bonwick and Jonathan Adams, Magazines and vmem: Extending the
74 * slab allocator to many cpu's and arbitrary resources. USENIX 2001)
76 * It uses posix_memalign() to optimize allocations of many equally-sized
77 * chunks, and has per-thread free lists (the so-called magazine layer)
78 * to quickly satisfy allocation requests of already known structure sizes.
79 * This is accompanied by extra caching logic to keep freed memory around
80 * for some time before returning it to the system. Memory that is unused
81 * due to alignment constraints is used for cache colorization (random
82 * distribution of chunk addresses) to improve CPU cache utilization. The
83 * caching layer of the slice allocator adapts itself to high lock contention
84 * to improve scalability.
86 * The slice allocator can allocate blocks as small as two pointers, and
87 * unlike malloc(), it does not reserve extra space per block. For large block
88 * sizes, g_slice_new() and g_slice_alloc() will automatically delegate to the
89 * system malloc() implementation. For newly written code it is recommended
90 * to use the new `g_slice` API instead of g_malloc() and
91 * friends, as long as objects are not resized during their lifetime and the
92 * object size used at allocation time is still available when freeing.
94 * Here is an example for using the slice allocator:
95 * |[<!-- language="C" -->
99 * // Allocate 10000 blocks.
100 * for (i = 0; i < 10000; i++)
102 * mem[i] = g_slice_alloc (50);
104 * // Fill in the memory with some junk.
105 * for (j = 0; j < 50; j++)
109 * // Now free all of the blocks.
110 * for (i = 0; i < 10000; i++)
111 * g_slice_free1 (50, mem[i]);
114 * And here is an example for using the using the slice allocator
115 * with data structures:
116 * |[<!-- language="C" -->
119 * // Allocate one block, using the g_slice_new() macro.
120 * array = g_slice_new (GRealArray);
122 * // We can now use array just like a normal pointer to a structure.
123 * array->data = NULL;
126 * array->zero_terminated = (zero_terminated ? 1 : 0);
127 * array->clear = (clear ? 1 : 0);
128 * array->elt_size = elt_size;
130 * // We can free the block, so it can be reused.
131 * g_slice_free (GRealArray, array);
135 /* the GSlice allocator is split up into 4 layers, roughly modelled after the slab
136 * allocator and magazine extensions as outlined in:
137 * + [Bonwick94] Jeff Bonwick, The slab allocator: An object-caching kernel
138 * memory allocator. USENIX 1994, http://citeseer.ist.psu.edu/bonwick94slab.html
139 * + [Bonwick01] Bonwick and Jonathan Adams, Magazines and vmem: Extending the
140 * slab allocator to many cpu's and arbitrary resources.
141 * USENIX 2001, http://citeseer.ist.psu.edu/bonwick01magazines.html
143 * - the thread magazines. for each (aligned) chunk size, a magazine (a list)
144 * of recently freed and soon to be allocated chunks is maintained per thread.
145 * this way, most alloc/free requests can be quickly satisfied from per-thread
146 * free lists which only require one g_private_get() call to retrieve the
148 * - the magazine cache. allocating and freeing chunks to/from threads only
149 * occurs at magazine sizes from a global depot of magazines. the depot
150 * maintaines a 15 second working set of allocated magazines, so full
151 * magazines are not allocated and released too often.
152 * the chunk size dependent magazine sizes automatically adapt (within limits,
153 * see [3]) to lock contention to properly scale performance across a variety
155 * - the slab allocator. this allocator allocates slabs (blocks of memory) close
156 * to the system page size or multiples thereof which have to be page aligned.
157 * the blocks are divided into smaller chunks which are used to satisfy
158 * allocations from the upper layers. the space provided by the reminder of
159 * the chunk size division is used for cache colorization (random distribution
160 * of chunk addresses) to improve processor cache utilization. multiple slabs
161 * with the same chunk size are kept in a partially sorted ring to allow O(1)
162 * freeing and allocation of chunks (as long as the allocation of an entirely
163 * new slab can be avoided).
164 * - the page allocator. on most modern systems, posix_memalign(3) or
165 * memalign(3) should be available, so this is used to allocate blocks with
166 * system page size based alignments and sizes or multiples thereof.
167 * if no memalign variant is provided, valloc() is used instead and
168 * block sizes are limited to the system page size (no multiples thereof).
169 * as a fallback, on system without even valloc(), a malloc(3)-based page
170 * allocator with alloc-only behaviour is used.
173 * [1] some systems memalign(3) implementations may rely on boundary tagging for
174 * the handed out memory chunks. to avoid excessive page-wise fragmentation,
175 * we reserve 2 * sizeof (void*) per block size for the systems memalign(3),
176 * specified in NATIVE_MALLOC_PADDING.
177 * [2] using the slab allocator alone already provides for a fast and efficient
178 * allocator, it doesn't properly scale beyond single-threaded uses though.
179 * also, the slab allocator implements eager free(3)-ing, i.e. does not
180 * provide any form of caching or working set maintenance. so if used alone,
181 * it's vulnerable to trashing for sequences of balanced (alloc, free) pairs
182 * at certain thresholds.
183 * [3] magazine sizes are bound by an implementation specific minimum size and
184 * a chunk size specific maximum to limit magazine storage sizes to roughly
186 * [4] allocating ca. 8 chunks per block/page keeps a good balance between
187 * external and internal fragmentation (<= 12.5%). [Bonwick94]
190 /* --- macros and constants --- */
191 #define LARGEALIGNMENT (256)
192 #define P2ALIGNMENT (2 * sizeof (gsize)) /* fits 2 pointers (assumed to be 2 * GLIB_SIZEOF_SIZE_T below) */
193 #define ALIGN(size, base) ((base) * (gsize) (((size) + (base) - 1) / (base)))
194 #define NATIVE_MALLOC_PADDING P2ALIGNMENT /* per-page padding left for native malloc(3) see [1] */
195 #define SLAB_INFO_SIZE P2ALIGN (sizeof (SlabInfo) + NATIVE_MALLOC_PADDING)
196 #define MAX_MAGAZINE_SIZE (256) /* see [3] and allocator_get_magazine_threshold() for this */
197 #define MIN_MAGAZINE_SIZE (4)
198 #define MAX_STAMP_COUNTER (7) /* distributes the load of gettimeofday() */
199 #define MAX_SLAB_CHUNK_SIZE(al) (((al)->max_page_size - SLAB_INFO_SIZE) / 8) /* we want at last 8 chunks per page, see [4] */
200 #define MAX_SLAB_INDEX(al) (SLAB_INDEX (al, MAX_SLAB_CHUNK_SIZE (al)) + 1)
201 #define SLAB_INDEX(al, asize) ((asize) / P2ALIGNMENT - 1) /* asize must be P2ALIGNMENT aligned */
202 #define SLAB_CHUNK_SIZE(al, ix) (((ix) + 1) * P2ALIGNMENT)
203 #define SLAB_BPAGE_SIZE(al,csz) (8 * (csz) + SLAB_INFO_SIZE)
205 /* optimized version of ALIGN (size, P2ALIGNMENT) */
206 #if GLIB_SIZEOF_SIZE_T * 2 == 8 /* P2ALIGNMENT */
207 #define P2ALIGN(size) (((size) + 0x7) & ~(gsize) 0x7)
208 #elif GLIB_SIZEOF_SIZE_T * 2 == 16 /* P2ALIGNMENT */
209 #define P2ALIGN(size) (((size) + 0xf) & ~(gsize) 0xf)
211 #define P2ALIGN(size) ALIGN (size, P2ALIGNMENT)
214 /* special helpers to avoid gmessage.c dependency */
215 static void mem_error (const char *format, ...) G_GNUC_PRINTF (1,2);
216 #define mem_assert(cond) do { if (G_LIKELY (cond)) ; else mem_error ("assertion failed: %s", #cond); } while (0)
218 /* --- structures --- */
219 typedef struct _ChunkLink ChunkLink;
220 typedef struct _SlabInfo SlabInfo;
221 typedef struct _CachedMagazine CachedMagazine;
229 SlabInfo *next, *prev;
233 gsize count; /* approximative chunks list length */
236 Magazine *magazine1; /* array of MAX_SLAB_INDEX (allocator) */
237 Magazine *magazine2; /* array of MAX_SLAB_INDEX (allocator) */
240 gboolean always_malloc;
241 gboolean bypass_magazines;
242 gboolean debug_blocks;
243 gsize working_set_msecs;
244 guint color_increment;
247 /* const after initialization */
248 gsize min_page_size, max_page_size;
250 gsize max_slab_chunk_size_for_magazine_cache;
252 GMutex magazine_mutex;
253 ChunkLink **magazines; /* array of MAX_SLAB_INDEX (allocator) */
254 guint *contention_counters; /* array of MAX_SLAB_INDEX (allocator) */
260 SlabInfo **slab_stack; /* array of MAX_SLAB_INDEX (allocator) */
264 /* --- g-slice prototypes --- */
265 static gpointer slab_allocator_alloc_chunk (gsize chunk_size);
266 static void slab_allocator_free_chunk (gsize chunk_size,
268 static void private_thread_memory_cleanup (gpointer data);
269 static gpointer allocator_memalign (gsize alignment,
271 static void allocator_memfree (gsize memsize,
273 static inline void magazine_cache_update_stamp (void);
274 static inline gsize allocator_get_magazine_threshold (Allocator *allocator,
277 /* --- g-slice memory checker --- */
278 static void smc_notify_alloc (void *pointer,
280 static int smc_notify_free (void *pointer,
283 /* --- variables --- */
284 static GPrivate private_thread_memory = G_PRIVATE_INIT (private_thread_memory_cleanup);
285 static gsize sys_page_size = 0;
286 static Allocator allocator[1] = { { 0, }, };
287 static SliceConfig slice_config = {
288 FALSE, /* always_malloc */
289 FALSE, /* bypass_magazines */
290 FALSE, /* debug_blocks */
291 15 * 1000, /* working_set_msecs */
292 1, /* color increment, alt: 0x7fffffff */
294 static GMutex smc_tree_mutex; /* mutex for G_SLICE=debug-blocks */
296 /* --- auxiliary functions --- */
298 g_slice_set_config (GSliceConfig ckey,
301 g_return_if_fail (sys_page_size == 0);
304 case G_SLICE_CONFIG_ALWAYS_MALLOC:
305 slice_config.always_malloc = value != 0;
307 case G_SLICE_CONFIG_BYPASS_MAGAZINES:
308 slice_config.bypass_magazines = value != 0;
310 case G_SLICE_CONFIG_WORKING_SET_MSECS:
311 slice_config.working_set_msecs = value;
313 case G_SLICE_CONFIG_COLOR_INCREMENT:
314 slice_config.color_increment = value;
321 g_slice_get_config (GSliceConfig ckey)
325 case G_SLICE_CONFIG_ALWAYS_MALLOC:
326 return slice_config.always_malloc;
327 case G_SLICE_CONFIG_BYPASS_MAGAZINES:
328 return slice_config.bypass_magazines;
329 case G_SLICE_CONFIG_WORKING_SET_MSECS:
330 return slice_config.working_set_msecs;
331 case G_SLICE_CONFIG_CHUNK_SIZES:
332 return MAX_SLAB_INDEX (allocator);
333 case G_SLICE_CONFIG_COLOR_INCREMENT:
334 return slice_config.color_increment;
341 g_slice_get_config_state (GSliceConfig ckey,
346 g_return_val_if_fail (n_values != NULL, NULL);
351 case G_SLICE_CONFIG_CONTENTION_COUNTER:
352 array[i++] = SLAB_CHUNK_SIZE (allocator, address);
353 array[i++] = allocator->contention_counters[address];
354 array[i++] = allocator_get_magazine_threshold (allocator, address);
356 return g_memdup2 (array, sizeof (array[0]) * *n_values);
363 slice_config_init (SliceConfig *config)
366 gchar *val_allocated = NULL;
368 *config = slice_config;
370 /* Note that the empty string (`G_SLICE=""`) is treated differently from the
371 * envvar being unset. In the latter case, we also check whether running under
374 val = g_getenv ("G_SLICE");
376 /* The win32 implementation of g_getenv() has to do UTF-8 ↔ UTF-16 conversions
377 * which use the slice allocator, leading to deadlock. Use a simple in-place
378 * implementation here instead.
380 * Ignore references to other environment variables: only support values which
381 * are a combination of always-malloc and debug-blocks. */
384 wchar_t wvalue[128]; /* at least big enough for `always-malloc,debug-blocks` */
387 len = GetEnvironmentVariableW (L"G_SLICE", wvalue, G_N_ELEMENTS (wvalue));
391 if (GetLastError () == ERROR_ENVVAR_NOT_FOUND)
396 else if (len >= G_N_ELEMENTS (wvalue))
398 /* @wvalue isn’t big enough. Give up. */
399 g_warning ("Unsupported G_SLICE value");
404 /* it’s safe to use g_utf16_to_utf8() here as it only allocates using
405 * malloc() rather than GSlice */
406 val = val_allocated = g_utf16_to_utf8 (wvalue, -1, NULL, NULL, NULL);
410 #endif /* G_OS_WIN32 */
415 const GDebugKey keys[] = {
416 { "always-malloc", 1 << 0 },
417 { "debug-blocks", 1 << 1 },
420 flags = g_parse_debug_string (val, keys, G_N_ELEMENTS (keys));
421 if (flags & (1 << 0))
422 config->always_malloc = TRUE;
423 if (flags & (1 << 1))
424 config->debug_blocks = TRUE;
428 /* G_SLICE was not specified, so check if valgrind is running and
429 * disable ourselves if it is.
431 * This way it's possible to force gslice to be enabled under
432 * valgrind just by setting G_SLICE to the empty string.
434 #ifdef ENABLE_VALGRIND
435 if (RUNNING_ON_VALGRIND)
436 config->always_malloc = TRUE;
440 g_free (val_allocated);
444 g_slice_init_nomessage (void)
446 /* we may not use g_error() or friends here */
447 mem_assert (sys_page_size == 0);
448 mem_assert (MIN_MAGAZINE_SIZE >= 4);
452 SYSTEM_INFO system_info;
453 GetSystemInfo (&system_info);
454 sys_page_size = system_info.dwPageSize;
457 sys_page_size = sysconf (_SC_PAGESIZE); /* = sysconf (_SC_PAGE_SIZE); = getpagesize(); */
459 mem_assert (sys_page_size >= 2 * LARGEALIGNMENT);
460 mem_assert ((sys_page_size & (sys_page_size - 1)) == 0);
461 slice_config_init (&allocator->config);
462 allocator->min_page_size = sys_page_size;
463 #if HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN
464 /* allow allocation of pages up to 8KB (with 8KB alignment).
465 * this is useful because many medium to large sized structures
466 * fit less than 8 times (see [4]) into 4KB pages.
467 * we allow very small page sizes here, to reduce wastage in
468 * threads if only small allocations are required (this does
469 * bear the risk of increasing allocation times and fragmentation
472 allocator->min_page_size = MAX (allocator->min_page_size, 4096);
473 allocator->max_page_size = MAX (allocator->min_page_size, 8192);
474 allocator->min_page_size = MIN (allocator->min_page_size, 128);
476 /* we can only align to system page size */
477 allocator->max_page_size = sys_page_size;
479 if (allocator->config.always_malloc)
481 allocator->contention_counters = NULL;
482 allocator->magazines = NULL;
483 allocator->slab_stack = NULL;
487 allocator->contention_counters = g_new0 (guint, MAX_SLAB_INDEX (allocator));
488 allocator->magazines = g_new0 (ChunkLink*, MAX_SLAB_INDEX (allocator));
489 allocator->slab_stack = g_new0 (SlabInfo*, MAX_SLAB_INDEX (allocator));
492 allocator->mutex_counter = 0;
493 allocator->stamp_counter = MAX_STAMP_COUNTER; /* force initial update */
494 allocator->last_stamp = 0;
495 allocator->color_accu = 0;
496 magazine_cache_update_stamp();
497 /* values cached for performance reasons */
498 allocator->max_slab_chunk_size_for_magazine_cache = MAX_SLAB_CHUNK_SIZE (allocator);
499 if (allocator->config.always_malloc || allocator->config.bypass_magazines)
500 allocator->max_slab_chunk_size_for_magazine_cache = 0; /* non-optimized cases */
504 allocator_categorize (gsize aligned_chunk_size)
506 /* speed up the likely path */
507 if (G_LIKELY (aligned_chunk_size && aligned_chunk_size <= allocator->max_slab_chunk_size_for_magazine_cache))
508 return 1; /* use magazine cache */
510 if (!allocator->config.always_malloc &&
511 aligned_chunk_size &&
512 aligned_chunk_size <= MAX_SLAB_CHUNK_SIZE (allocator))
514 if (allocator->config.bypass_magazines)
515 return 2; /* use slab allocator, see [2] */
516 return 1; /* use magazine cache */
518 return 0; /* use malloc() */
522 g_mutex_lock_a (GMutex *mutex,
523 guint *contention_counter)
525 gboolean contention = FALSE;
526 if (!g_mutex_trylock (mutex))
528 g_mutex_lock (mutex);
533 allocator->mutex_counter++;
534 if (allocator->mutex_counter >= 1) /* quickly adapt to contention */
536 allocator->mutex_counter = 0;
537 *contention_counter = MIN (*contention_counter + 1, MAX_MAGAZINE_SIZE);
540 else /* !contention */
542 allocator->mutex_counter--;
543 if (allocator->mutex_counter < -11) /* moderately recover magazine sizes */
545 allocator->mutex_counter = 0;
546 *contention_counter = MAX (*contention_counter, 1) - 1;
551 static inline ThreadMemory*
552 thread_memory_from_self (void)
554 ThreadMemory *tmem = g_private_get (&private_thread_memory);
555 if (G_UNLIKELY (!tmem))
557 static GMutex init_mutex;
560 g_mutex_lock (&init_mutex);
561 if G_UNLIKELY (sys_page_size == 0)
562 g_slice_init_nomessage ();
563 g_mutex_unlock (&init_mutex);
565 n_magazines = MAX_SLAB_INDEX (allocator);
566 tmem = g_private_set_alloc0 (&private_thread_memory, sizeof (ThreadMemory) + sizeof (Magazine) * 2 * n_magazines);
567 tmem->magazine1 = (Magazine*) (tmem + 1);
568 tmem->magazine2 = &tmem->magazine1[n_magazines];
573 static inline ChunkLink*
574 magazine_chain_pop_head (ChunkLink **magazine_chunks)
576 /* magazine chains are linked via ChunkLink->next.
577 * each ChunkLink->data of the toplevel chain may point to a subchain,
578 * linked via ChunkLink->next. ChunkLink->data of the subchains just
579 * contains uninitialized junk.
581 ChunkLink *chunk = (*magazine_chunks)->data;
582 if (G_UNLIKELY (chunk))
584 /* allocating from freed list */
585 (*magazine_chunks)->data = chunk->next;
589 chunk = *magazine_chunks;
590 *magazine_chunks = chunk->next;
595 #if 0 /* useful for debugging */
597 magazine_count (ChunkLink *head)
604 ChunkLink *child = head->data;
606 for (child = head->data; child; child = child->next)
615 allocator_get_magazine_threshold (Allocator *local_allocator,
618 /* the magazine size calculated here has a lower bound of MIN_MAGAZINE_SIZE,
619 * which is required by the implementation. also, for moderately sized chunks
620 * (say >= 64 bytes), magazine sizes shouldn't be much smaller then the number
621 * of chunks available per page/2 to avoid excessive traffic in the magazine
622 * cache for small to medium sized structures.
623 * the upper bound of the magazine size is effectively provided by
624 * MAX_MAGAZINE_SIZE. for larger chunks, this number is scaled down so that
625 * the content of a single magazine doesn't exceed ca. 16KB.
627 gsize chunk_size = SLAB_CHUNK_SIZE (local_allocator, ix);
628 guint threshold = MAX (MIN_MAGAZINE_SIZE, local_allocator->max_page_size / MAX (5 * chunk_size, 5 * 32));
629 guint contention_counter = local_allocator->contention_counters[ix];
630 if (G_UNLIKELY (contention_counter)) /* single CPU bias */
632 /* adapt contention counter thresholds to chunk sizes */
633 contention_counter = contention_counter * 64 / chunk_size;
634 threshold = MAX (threshold, contention_counter);
639 /* --- magazine cache --- */
641 magazine_cache_update_stamp (void)
643 if (allocator->stamp_counter >= MAX_STAMP_COUNTER)
645 gint64 now_us = g_get_real_time ();
646 allocator->last_stamp = now_us / 1000; /* milli seconds */
647 allocator->stamp_counter = 0;
650 allocator->stamp_counter++;
653 static inline ChunkLink*
654 magazine_chain_prepare_fields (ChunkLink *magazine_chunks)
660 /* checked upon initialization: mem_assert (MIN_MAGAZINE_SIZE >= 4); */
661 /* ensure a magazine with at least 4 unused data pointers */
662 chunk1 = magazine_chain_pop_head (&magazine_chunks);
663 chunk2 = magazine_chain_pop_head (&magazine_chunks);
664 chunk3 = magazine_chain_pop_head (&magazine_chunks);
665 chunk4 = magazine_chain_pop_head (&magazine_chunks);
666 chunk4->next = magazine_chunks;
667 chunk3->next = chunk4;
668 chunk2->next = chunk3;
669 chunk1->next = chunk2;
673 /* access the first 3 fields of a specially prepared magazine chain */
674 #define magazine_chain_prev(mc) ((mc)->data)
675 #define magazine_chain_stamp(mc) ((mc)->next->data)
676 #define magazine_chain_uint_stamp(mc) GPOINTER_TO_UINT ((mc)->next->data)
677 #define magazine_chain_next(mc) ((mc)->next->next->data)
678 #define magazine_chain_count(mc) ((mc)->next->next->next->data)
681 magazine_cache_trim (Allocator *local_allocator,
685 /* g_mutex_lock (local_allocator->mutex); done by caller */
686 /* trim magazine cache from tail */
687 ChunkLink *current = magazine_chain_prev (local_allocator->magazines[ix]);
688 ChunkLink *trash = NULL;
689 while (!G_APPROX_VALUE (stamp, magazine_chain_uint_stamp (current),
690 local_allocator->config.working_set_msecs))
693 ChunkLink *prev = magazine_chain_prev (current);
694 ChunkLink *next = magazine_chain_next (current);
695 magazine_chain_next (prev) = next;
696 magazine_chain_prev (next) = prev;
697 /* clear special fields, put on trash stack */
698 magazine_chain_next (current) = NULL;
699 magazine_chain_count (current) = NULL;
700 magazine_chain_stamp (current) = NULL;
701 magazine_chain_prev (current) = trash;
703 /* fixup list head if required */
704 if (current == local_allocator->magazines[ix])
706 local_allocator->magazines[ix] = NULL;
711 g_mutex_unlock (&local_allocator->magazine_mutex);
715 const gsize chunk_size = SLAB_CHUNK_SIZE (local_allocator, ix);
716 g_mutex_lock (&local_allocator->slab_mutex);
720 trash = magazine_chain_prev (current);
721 magazine_chain_prev (current) = NULL; /* clear special field */
724 ChunkLink *chunk = magazine_chain_pop_head (¤t);
725 slab_allocator_free_chunk (chunk_size, chunk);
728 g_mutex_unlock (&local_allocator->slab_mutex);
733 magazine_cache_push_magazine (guint ix,
734 ChunkLink *magazine_chunks,
735 gsize count) /* must be >= MIN_MAGAZINE_SIZE */
737 ChunkLink *current = magazine_chain_prepare_fields (magazine_chunks);
738 ChunkLink *next, *prev;
739 g_mutex_lock (&allocator->magazine_mutex);
740 /* add magazine at head */
741 next = allocator->magazines[ix];
743 prev = magazine_chain_prev (next);
745 next = prev = current;
746 magazine_chain_next (prev) = current;
747 magazine_chain_prev (next) = current;
748 magazine_chain_prev (current) = prev;
749 magazine_chain_next (current) = next;
750 magazine_chain_count (current) = (gpointer) count;
752 magazine_cache_update_stamp();
753 magazine_chain_stamp (current) = GUINT_TO_POINTER (allocator->last_stamp);
754 allocator->magazines[ix] = current;
755 /* free old magazines beyond a certain threshold */
756 magazine_cache_trim (allocator, ix, allocator->last_stamp);
757 /* g_mutex_unlock (allocator->mutex); was done by magazine_cache_trim() */
761 magazine_cache_pop_magazine (guint ix,
764 g_mutex_lock_a (&allocator->magazine_mutex, &allocator->contention_counters[ix]);
765 if (!allocator->magazines[ix])
767 guint magazine_threshold = allocator_get_magazine_threshold (allocator, ix);
768 gsize i, chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
769 ChunkLink *chunk, *head;
770 g_mutex_unlock (&allocator->magazine_mutex);
771 g_mutex_lock (&allocator->slab_mutex);
772 head = slab_allocator_alloc_chunk (chunk_size);
775 for (i = 1; i < magazine_threshold; i++)
777 chunk->next = slab_allocator_alloc_chunk (chunk_size);
782 g_mutex_unlock (&allocator->slab_mutex);
788 ChunkLink *current = allocator->magazines[ix];
789 ChunkLink *prev = magazine_chain_prev (current);
790 ChunkLink *next = magazine_chain_next (current);
792 magazine_chain_next (prev) = next;
793 magazine_chain_prev (next) = prev;
794 allocator->magazines[ix] = next == current ? NULL : next;
795 g_mutex_unlock (&allocator->magazine_mutex);
796 /* clear special fields and hand out */
797 *countp = (gsize) magazine_chain_count (current);
798 magazine_chain_prev (current) = NULL;
799 magazine_chain_next (current) = NULL;
800 magazine_chain_count (current) = NULL;
801 magazine_chain_stamp (current) = NULL;
806 /* --- thread magazines --- */
808 private_thread_memory_cleanup (gpointer data)
810 ThreadMemory *tmem = data;
811 const guint n_magazines = MAX_SLAB_INDEX (allocator);
813 for (ix = 0; ix < n_magazines; ix++)
817 mags[0] = &tmem->magazine1[ix];
818 mags[1] = &tmem->magazine2[ix];
819 for (j = 0; j < 2; j++)
821 Magazine *mag = mags[j];
822 if (mag->count >= MIN_MAGAZINE_SIZE)
823 magazine_cache_push_magazine (ix, mag->chunks, mag->count);
826 const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
827 g_mutex_lock (&allocator->slab_mutex);
830 ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
831 slab_allocator_free_chunk (chunk_size, chunk);
833 g_mutex_unlock (&allocator->slab_mutex);
841 thread_memory_magazine1_reload (ThreadMemory *tmem,
844 Magazine *mag = &tmem->magazine1[ix];
845 mem_assert (mag->chunks == NULL); /* ensure that we may reset mag->count */
847 mag->chunks = magazine_cache_pop_magazine (ix, &mag->count);
851 thread_memory_magazine2_unload (ThreadMemory *tmem,
854 Magazine *mag = &tmem->magazine2[ix];
855 magazine_cache_push_magazine (ix, mag->chunks, mag->count);
861 thread_memory_swap_magazines (ThreadMemory *tmem,
864 Magazine xmag = tmem->magazine1[ix];
865 tmem->magazine1[ix] = tmem->magazine2[ix];
866 tmem->magazine2[ix] = xmag;
869 static inline gboolean
870 thread_memory_magazine1_is_empty (ThreadMemory *tmem,
873 return tmem->magazine1[ix].chunks == NULL;
876 static inline gboolean
877 thread_memory_magazine2_is_full (ThreadMemory *tmem,
880 return tmem->magazine2[ix].count >= allocator_get_magazine_threshold (allocator, ix);
883 static inline gpointer
884 thread_memory_magazine1_alloc (ThreadMemory *tmem,
887 Magazine *mag = &tmem->magazine1[ix];
888 ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
889 if (G_LIKELY (mag->count > 0))
895 thread_memory_magazine2_free (ThreadMemory *tmem,
899 Magazine *mag = &tmem->magazine2[ix];
900 ChunkLink *chunk = mem;
902 chunk->next = mag->chunks;
907 /* --- API functions --- */
911 * @type: the type to allocate, typically a structure name
913 * A convenience macro to allocate a block of memory from the
916 * It calls g_slice_alloc() with `sizeof (@type)` and casts the
917 * returned pointer to a pointer of the given type, avoiding a type
918 * cast in the source code. Note that the underlying slice allocation
919 * mechanism can be changed with the [`G_SLICE=always-malloc`][G_SLICE]
920 * environment variable.
922 * This can never return %NULL as the minimum allocation size from
923 * `sizeof (@type)` is 1 byte.
925 * Returns: (not nullable): a pointer to the allocated block, cast to a pointer
933 * @type: the type to allocate, typically a structure name
935 * A convenience macro to allocate a block of memory from the
936 * slice allocator and set the memory to 0.
938 * It calls g_slice_alloc0() with `sizeof (@type)`
939 * and casts the returned pointer to a pointer of the given type,
940 * avoiding a type cast in the source code.
941 * Note that the underlying slice allocation mechanism can
942 * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
943 * environment variable.
945 * This can never return %NULL as the minimum allocation size from
946 * `sizeof (@type)` is 1 byte.
948 * Returns: (not nullable): a pointer to the allocated block, cast to a pointer
956 * @type: the type to duplicate, typically a structure name
957 * @mem: (not nullable): the memory to copy into the allocated block
959 * A convenience macro to duplicate a block of memory using
960 * the slice allocator.
962 * It calls g_slice_copy() with `sizeof (@type)`
963 * and casts the returned pointer to a pointer of the given type,
964 * avoiding a type cast in the source code.
965 * Note that the underlying slice allocation mechanism can
966 * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
967 * environment variable.
969 * This can never return %NULL.
971 * Returns: (not nullable): a pointer to the allocated block, cast to a pointer
979 * @type: the type of the block to free, typically a structure name
980 * @mem: a pointer to the block to free
982 * A convenience macro to free a block of memory that has
983 * been allocated from the slice allocator.
985 * It calls g_slice_free1() using `sizeof (type)`
987 * Note that the exact release behaviour can be changed with the
988 * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
989 * [`G_SLICE`][G_SLICE] for related debugging options.
991 * If @mem is %NULL, this macro does nothing.
997 * g_slice_free_chain:
998 * @type: the type of the @mem_chain blocks
999 * @mem_chain: a pointer to the first block of the chain
1000 * @next: the field name of the next pointer in @type
1002 * Frees a linked list of memory blocks of structure type @type.
1004 * The memory blocks must be equal-sized, allocated via
1005 * g_slice_alloc() or g_slice_alloc0() and linked together by
1006 * a @next pointer (similar to #GSList). The name of the
1007 * @next field in @type is passed as third argument.
1008 * Note that the exact release behaviour can be changed with the
1009 * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
1010 * [`G_SLICE`][G_SLICE] for related debugging options.
1012 * If @mem_chain is %NULL, this function does nothing.
1019 * @block_size: the number of bytes to allocate
1021 * Allocates a block of memory from the slice allocator.
1023 * The block address handed out can be expected to be aligned
1024 * to at least `1 * sizeof (void*)`, though in general slices
1025 * are `2 * sizeof (void*)` bytes aligned; if a `malloc()`
1026 * fallback implementation is used instead, the alignment may
1027 * be reduced in a libc dependent fashion.
1029 * Note that the underlying slice allocation mechanism can
1030 * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
1031 * environment variable.
1033 * Returns: a pointer to the allocated memory block, which will
1034 * be %NULL if and only if @mem_size is 0
1039 g_slice_alloc (gsize mem_size)
1046 /* This gets the private structure for this thread. If the private
1047 * structure does not yet exist, it is created.
1049 * This has a side effect of causing GSlice to be initialised, so it
1052 tmem = thread_memory_from_self ();
1054 chunk_size = P2ALIGN (mem_size);
1055 acat = allocator_categorize (chunk_size);
1056 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
1058 guint ix = SLAB_INDEX (allocator, chunk_size);
1059 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
1061 thread_memory_swap_magazines (tmem, ix);
1062 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
1063 thread_memory_magazine1_reload (tmem, ix);
1065 mem = thread_memory_magazine1_alloc (tmem, ix);
1067 else if (acat == 2) /* allocate through slab allocator */
1069 g_mutex_lock (&allocator->slab_mutex);
1070 mem = slab_allocator_alloc_chunk (chunk_size);
1071 g_mutex_unlock (&allocator->slab_mutex);
1073 else /* delegate to system malloc */
1074 mem = g_malloc (mem_size);
1075 if (G_UNLIKELY (allocator->config.debug_blocks))
1076 smc_notify_alloc (mem, mem_size);
1078 TRACE (GLIB_SLICE_ALLOC((void*)mem, mem_size));
1085 * @block_size: the number of bytes to allocate
1087 * Allocates a block of memory via g_slice_alloc() and initializes
1088 * the returned memory to 0. Note that the underlying slice allocation
1089 * mechanism can be changed with the [`G_SLICE=always-malloc`][G_SLICE]
1090 * environment variable.
1092 * Returns: a pointer to the allocated block, which will be %NULL if and only
1098 g_slice_alloc0 (gsize mem_size)
1100 gpointer mem = g_slice_alloc (mem_size);
1102 memset (mem, 0, mem_size);
1108 * @block_size: the number of bytes to allocate
1109 * @mem_block: the memory to copy
1111 * Allocates a block of memory from the slice allocator
1112 * and copies @block_size bytes into it from @mem_block.
1114 * @mem_block must be non-%NULL if @block_size is non-zero.
1116 * Returns: a pointer to the allocated memory block, which will be %NULL if and
1117 * only if @mem_size is 0
1122 g_slice_copy (gsize mem_size,
1123 gconstpointer mem_block)
1125 gpointer mem = g_slice_alloc (mem_size);
1127 memcpy (mem, mem_block, mem_size);
1133 * @block_size: the size of the block
1134 * @mem_block: a pointer to the block to free
1136 * Frees a block of memory.
1138 * The memory must have been allocated via g_slice_alloc() or
1139 * g_slice_alloc0() and the @block_size has to match the size
1140 * specified upon allocation. Note that the exact release behaviour
1141 * can be changed with the [`G_DEBUG=gc-friendly`][G_DEBUG] environment
1142 * variable, also see [`G_SLICE`][G_SLICE] for related debugging options.
1144 * If @mem_block is %NULL, this function does nothing.
1149 g_slice_free1 (gsize mem_size,
1152 gsize chunk_size = P2ALIGN (mem_size);
1153 guint acat = allocator_categorize (chunk_size);
1154 if (G_UNLIKELY (!mem_block))
1156 if (G_UNLIKELY (allocator->config.debug_blocks) &&
1157 !smc_notify_free (mem_block, mem_size))
1159 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
1161 ThreadMemory *tmem = thread_memory_from_self();
1162 guint ix = SLAB_INDEX (allocator, chunk_size);
1163 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1165 thread_memory_swap_magazines (tmem, ix);
1166 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1167 thread_memory_magazine2_unload (tmem, ix);
1169 if (G_UNLIKELY (g_mem_gc_friendly))
1170 memset (mem_block, 0, chunk_size);
1171 thread_memory_magazine2_free (tmem, ix, mem_block);
1173 else if (acat == 2) /* allocate through slab allocator */
1175 if (G_UNLIKELY (g_mem_gc_friendly))
1176 memset (mem_block, 0, chunk_size);
1177 g_mutex_lock (&allocator->slab_mutex);
1178 slab_allocator_free_chunk (chunk_size, mem_block);
1179 g_mutex_unlock (&allocator->slab_mutex);
1181 else /* delegate to system malloc */
1183 if (G_UNLIKELY (g_mem_gc_friendly))
1184 memset (mem_block, 0, mem_size);
1187 TRACE (GLIB_SLICE_FREE((void*)mem_block, mem_size));
1191 * g_slice_free_chain_with_offset:
1192 * @block_size: the size of the blocks
1193 * @mem_chain: a pointer to the first block of the chain
1194 * @next_offset: the offset of the @next field in the blocks
1196 * Frees a linked list of memory blocks of structure type @type.
1198 * The memory blocks must be equal-sized, allocated via
1199 * g_slice_alloc() or g_slice_alloc0() and linked together by a
1200 * @next pointer (similar to #GSList). The offset of the @next
1201 * field in each block is passed as third argument.
1202 * Note that the exact release behaviour can be changed with the
1203 * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
1204 * [`G_SLICE`][G_SLICE] for related debugging options.
1206 * If @mem_chain is %NULL, this function does nothing.
1211 g_slice_free_chain_with_offset (gsize mem_size,
1215 gpointer slice = mem_chain;
1216 /* while the thread magazines and the magazine cache are implemented so that
1217 * they can easily be extended to allow for free lists containing more free
1218 * lists for the first level nodes, which would allow O(1) freeing in this
1219 * function, the benefit of such an extension is questionable, because:
1220 * - the magazine size counts will become mere lower bounds which confuses
1221 * the code adapting to lock contention;
1222 * - freeing a single node to the thread magazines is very fast, so this
1223 * O(list_length) operation is multiplied by a fairly small factor;
1224 * - memory usage histograms on larger applications seem to indicate that
1225 * the amount of released multi node lists is negligible in comparison
1226 * to single node releases.
1227 * - the major performance bottle neck, namely g_private_get() or
1228 * g_mutex_lock()/g_mutex_unlock() has already been moved out of the
1229 * inner loop for freeing chained slices.
1231 gsize chunk_size = P2ALIGN (mem_size);
1232 guint acat = allocator_categorize (chunk_size);
1233 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
1235 ThreadMemory *tmem = thread_memory_from_self();
1236 guint ix = SLAB_INDEX (allocator, chunk_size);
1239 guint8 *current = slice;
1240 slice = *(gpointer*) (current + next_offset);
1241 if (G_UNLIKELY (allocator->config.debug_blocks) &&
1242 !smc_notify_free (current, mem_size))
1244 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1246 thread_memory_swap_magazines (tmem, ix);
1247 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1248 thread_memory_magazine2_unload (tmem, ix);
1250 if (G_UNLIKELY (g_mem_gc_friendly))
1251 memset (current, 0, chunk_size);
1252 thread_memory_magazine2_free (tmem, ix, current);
1255 else if (acat == 2) /* allocate through slab allocator */
1257 g_mutex_lock (&allocator->slab_mutex);
1260 guint8 *current = slice;
1261 slice = *(gpointer*) (current + next_offset);
1262 if (G_UNLIKELY (allocator->config.debug_blocks) &&
1263 !smc_notify_free (current, mem_size))
1265 if (G_UNLIKELY (g_mem_gc_friendly))
1266 memset (current, 0, chunk_size);
1267 slab_allocator_free_chunk (chunk_size, current);
1269 g_mutex_unlock (&allocator->slab_mutex);
1271 else /* delegate to system malloc */
1274 guint8 *current = slice;
1275 slice = *(gpointer*) (current + next_offset);
1276 if (G_UNLIKELY (allocator->config.debug_blocks) &&
1277 !smc_notify_free (current, mem_size))
1279 if (G_UNLIKELY (g_mem_gc_friendly))
1280 memset (current, 0, mem_size);
1285 /* --- single page allocator --- */
1287 allocator_slab_stack_push (Allocator *local_allocator,
1291 /* insert slab at slab ring head */
1292 if (!local_allocator->slab_stack[ix])
1294 sinfo->next = sinfo;
1295 sinfo->prev = sinfo;
1299 SlabInfo *next = local_allocator->slab_stack[ix], *prev = next->prev;
1305 local_allocator->slab_stack[ix] = sinfo;
1309 allocator_aligned_page_size (Allocator *local_allocator,
1312 gsize val = (gsize) 1 << g_bit_storage (n_bytes - 1);
1313 val = MAX (val, local_allocator->min_page_size);
1318 allocator_add_slab (Allocator *local_allocator,
1324 gsize addr, padding, n_chunks, color = 0;
1327 gpointer aligned_memory;
1331 page_size = allocator_aligned_page_size (local_allocator, SLAB_BPAGE_SIZE (local_allocator, chunk_size));
1332 /* allocate 1 page for the chunks and the slab */
1333 aligned_memory = allocator_memalign (page_size, page_size - NATIVE_MALLOC_PADDING);
1335 mem = aligned_memory;
1339 const gchar *syserr = strerror (errsv);
1340 mem_error ("failed to allocate %u bytes (alignment: %u): %s\n",
1341 (guint) (page_size - NATIVE_MALLOC_PADDING), (guint) page_size, syserr);
1343 /* mask page address */
1344 addr = ((gsize) mem / page_size) * page_size;
1345 /* assert alignment */
1346 mem_assert (aligned_memory == (gpointer) addr);
1347 /* basic slab info setup */
1348 sinfo = (SlabInfo*) (mem + page_size - SLAB_INFO_SIZE);
1349 sinfo->n_allocated = 0;
1350 sinfo->chunks = NULL;
1351 /* figure cache colorization */
1352 n_chunks = ((guint8*) sinfo - mem) / chunk_size;
1353 padding = ((guint8*) sinfo - mem) - n_chunks * chunk_size;
1356 color = (local_allocator->color_accu * P2ALIGNMENT) % padding;
1357 local_allocator->color_accu += local_allocator->config.color_increment;
1359 /* add chunks to free list */
1360 chunk = (ChunkLink*) (mem + color);
1361 sinfo->chunks = chunk;
1362 for (i = 0; i < n_chunks - 1; i++)
1364 chunk->next = (ChunkLink*) ((guint8*) chunk + chunk_size);
1365 chunk = chunk->next;
1367 chunk->next = NULL; /* last chunk */
1368 /* add slab to slab ring */
1369 allocator_slab_stack_push (local_allocator, ix, sinfo);
1373 slab_allocator_alloc_chunk (gsize chunk_size)
1376 guint ix = SLAB_INDEX (allocator, chunk_size);
1377 /* ensure non-empty slab */
1378 if (!allocator->slab_stack[ix] || !allocator->slab_stack[ix]->chunks)
1379 allocator_add_slab (allocator, ix, chunk_size);
1380 /* allocate chunk */
1381 chunk = allocator->slab_stack[ix]->chunks;
1382 allocator->slab_stack[ix]->chunks = chunk->next;
1383 allocator->slab_stack[ix]->n_allocated++;
1384 /* rotate empty slabs */
1385 if (!allocator->slab_stack[ix]->chunks)
1386 allocator->slab_stack[ix] = allocator->slab_stack[ix]->next;
1391 slab_allocator_free_chunk (gsize chunk_size,
1396 guint ix = SLAB_INDEX (allocator, chunk_size);
1397 gsize page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
1398 gsize addr = ((gsize) mem / page_size) * page_size;
1399 /* mask page address */
1400 guint8 *page = (guint8*) addr;
1401 SlabInfo *sinfo = (SlabInfo*) (page + page_size - SLAB_INFO_SIZE);
1402 /* assert valid chunk count */
1403 mem_assert (sinfo->n_allocated > 0);
1404 /* add chunk to free list */
1405 was_empty = sinfo->chunks == NULL;
1406 chunk = (ChunkLink*) mem;
1407 chunk->next = sinfo->chunks;
1408 sinfo->chunks = chunk;
1409 sinfo->n_allocated--;
1410 /* keep slab ring partially sorted, empty slabs at end */
1414 SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1417 if (allocator->slab_stack[ix] == sinfo)
1418 allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1419 /* insert slab at head */
1420 allocator_slab_stack_push (allocator, ix, sinfo);
1422 /* eagerly free complete unused slabs */
1423 if (!sinfo->n_allocated)
1426 SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1429 if (allocator->slab_stack[ix] == sinfo)
1430 allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1432 allocator_memfree (page_size, page);
1436 /* --- memalign implementation --- */
1437 #ifdef HAVE_MALLOC_H
1438 #include <malloc.h> /* memalign() */
1442 * define HAVE_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works, <stdlib.h>
1443 * define HAVE_MEMALIGN 1 // if free(memalign(3)) works, <malloc.h>
1444 * define HAVE_VALLOC 1 // if free(valloc(3)) works, <stdlib.h> or <malloc.h>
1445 * if none is provided, we implement malloc(3)-based alloc-only page alignment
1448 #if !(HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC)
1449 G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1450 static GTrashStack *compat_valloc_trash = NULL;
1451 G_GNUC_END_IGNORE_DEPRECATIONS
1455 allocator_memalign (gsize alignment,
1458 gpointer aligned_memory = NULL;
1460 #if HAVE_POSIX_MEMALIGN
1461 err = posix_memalign (&aligned_memory, alignment, memsize);
1464 aligned_memory = memalign (alignment, memsize);
1468 aligned_memory = valloc (memsize);
1471 /* simplistic non-freeing page allocator */
1472 mem_assert (alignment == sys_page_size);
1473 mem_assert (memsize <= sys_page_size);
1474 if (!compat_valloc_trash)
1476 const guint n_pages = 16;
1477 guint8 *mem = malloc (n_pages * sys_page_size);
1482 guint8 *amem = (guint8*) ALIGN ((gsize) mem, sys_page_size);
1484 i--; /* mem wasn't page aligned */
1485 G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1487 g_trash_stack_push (&compat_valloc_trash, amem + i * sys_page_size);
1488 G_GNUC_END_IGNORE_DEPRECATIONS
1491 G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1492 aligned_memory = g_trash_stack_pop (&compat_valloc_trash);
1493 G_GNUC_END_IGNORE_DEPRECATIONS
1495 if (!aligned_memory)
1497 return aligned_memory;
1501 allocator_memfree (gsize memsize,
1504 #if HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC
1507 mem_assert (memsize <= sys_page_size);
1508 G_GNUC_BEGIN_IGNORE_DEPRECATIONS
1509 g_trash_stack_push (&compat_valloc_trash, mem);
1510 G_GNUC_END_IGNORE_DEPRECATIONS
1515 mem_error (const char *format,
1520 /* at least, put out "MEMORY-ERROR", in case we segfault during the rest of the function */
1521 fputs ("\n***MEMORY-ERROR***: ", stderr);
1522 pname = g_get_prgname();
1523 g_fprintf (stderr, "%s[%ld]: GSlice: ", pname ? pname : "", (long)getpid());
1524 va_start (args, format);
1525 g_vfprintf (stderr, format, args);
1527 fputs ("\n", stderr);
1532 /* --- g-slice memory checker tree --- */
1533 typedef size_t SmcKType; /* key type */
1534 typedef size_t SmcVType; /* value type */
1539 static void smc_tree_insert (SmcKType key,
1541 static gboolean smc_tree_lookup (SmcKType key,
1543 static gboolean smc_tree_remove (SmcKType key);
1546 /* --- g-slice memory checker implementation --- */
1548 smc_notify_alloc (void *pointer,
1551 size_t address = (size_t) pointer;
1553 smc_tree_insert (address, size);
1558 smc_notify_ignore (void *pointer)
1560 size_t address = (size_t) pointer;
1562 smc_tree_remove (address);
1567 smc_notify_free (void *pointer,
1570 size_t address = (size_t) pointer;
1575 return 1; /* ignore */
1576 found_one = smc_tree_lookup (address, &real_size);
1579 g_fprintf (stderr, "GSlice: MemChecker: attempt to release non-allocated block: %p size=%" G_GSIZE_FORMAT "\n", pointer, size);
1582 if (real_size != size && (real_size || size))
1584 g_fprintf (stderr, "GSlice: MemChecker: attempt to release block with invalid size: %p size=%" G_GSIZE_FORMAT " invalid-size=%" G_GSIZE_FORMAT "\n", pointer, real_size, size);
1587 if (!smc_tree_remove (address))
1589 g_fprintf (stderr, "GSlice: MemChecker: attempt to release non-allocated block: %p size=%" G_GSIZE_FORMAT "\n", pointer, size);
1592 return 1; /* all fine */
1595 /* --- g-slice memory checker tree implementation --- */
1596 #define SMC_TRUNK_COUNT (4093 /* 16381 */) /* prime, to distribute trunk collisions (big, allocated just once) */
1597 #define SMC_BRANCH_COUNT (511) /* prime, to distribute branch collisions */
1598 #define SMC_TRUNK_EXTENT (SMC_BRANCH_COUNT * 2039) /* key address space per trunk, should distribute uniformly across BRANCH_COUNT */
1599 #define SMC_TRUNK_HASH(k) ((k / SMC_TRUNK_EXTENT) % SMC_TRUNK_COUNT) /* generate new trunk hash per megabyte (roughly) */
1600 #define SMC_BRANCH_HASH(k) (k % SMC_BRANCH_COUNT)
1604 unsigned int n_entries;
1607 static SmcBranch **smc_tree_root = NULL;
1610 smc_tree_abort (int errval)
1612 const char *syserr = strerror (errval);
1613 mem_error ("MemChecker: failure in debugging tree: %s", syserr);
1616 static inline SmcEntry*
1617 smc_tree_branch_grow_L (SmcBranch *branch,
1620 unsigned int old_size = branch->n_entries * sizeof (branch->entries[0]);
1621 unsigned int new_size = old_size + sizeof (branch->entries[0]);
1623 mem_assert (index <= branch->n_entries);
1624 branch->entries = (SmcEntry*) realloc (branch->entries, new_size);
1625 if (!branch->entries)
1626 smc_tree_abort (errno);
1627 entry = branch->entries + index;
1628 memmove (entry + 1, entry, (branch->n_entries - index) * sizeof (entry[0]));
1629 branch->n_entries += 1;
1633 static inline SmcEntry*
1634 smc_tree_branch_lookup_nearest_L (SmcBranch *branch,
1637 unsigned int n_nodes = branch->n_entries, offs = 0;
1638 SmcEntry *check = branch->entries;
1640 while (offs < n_nodes)
1642 unsigned int i = (offs + n_nodes) >> 1;
1643 check = branch->entries + i;
1644 cmp = key < check->key ? -1 : key != check->key;
1646 return check; /* return exact match */
1649 else /* (cmp > 0) */
1652 /* check points at last mismatch, cmp > 0 indicates greater key */
1653 return cmp > 0 ? check + 1 : check; /* return insertion position for inexact match */
1657 smc_tree_insert (SmcKType key,
1660 unsigned int ix0, ix1;
1663 g_mutex_lock (&smc_tree_mutex);
1664 ix0 = SMC_TRUNK_HASH (key);
1665 ix1 = SMC_BRANCH_HASH (key);
1668 smc_tree_root = calloc (SMC_TRUNK_COUNT, sizeof (smc_tree_root[0]));
1670 smc_tree_abort (errno);
1672 if (!smc_tree_root[ix0])
1674 smc_tree_root[ix0] = calloc (SMC_BRANCH_COUNT, sizeof (smc_tree_root[0][0]));
1675 if (!smc_tree_root[ix0])
1676 smc_tree_abort (errno);
1678 entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1679 if (!entry || /* need create */
1680 entry >= smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries || /* need append */
1681 entry->key != key) /* need insert */
1682 entry = smc_tree_branch_grow_L (&smc_tree_root[ix0][ix1], entry - smc_tree_root[ix0][ix1].entries);
1684 entry->value = value;
1685 g_mutex_unlock (&smc_tree_mutex);
1689 smc_tree_lookup (SmcKType key,
1692 SmcEntry *entry = NULL;
1693 unsigned int ix0 = SMC_TRUNK_HASH (key), ix1 = SMC_BRANCH_HASH (key);
1694 gboolean found_one = FALSE;
1696 g_mutex_lock (&smc_tree_mutex);
1697 if (smc_tree_root && smc_tree_root[ix0])
1699 entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1701 entry < smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries &&
1705 *value_p = entry->value;
1708 g_mutex_unlock (&smc_tree_mutex);
1713 smc_tree_remove (SmcKType key)
1715 unsigned int ix0 = SMC_TRUNK_HASH (key), ix1 = SMC_BRANCH_HASH (key);
1716 gboolean found_one = FALSE;
1717 g_mutex_lock (&smc_tree_mutex);
1718 if (smc_tree_root && smc_tree_root[ix0])
1720 SmcEntry *entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1722 entry < smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries &&
1725 unsigned int i = entry - smc_tree_root[ix0][ix1].entries;
1726 smc_tree_root[ix0][ix1].n_entries -= 1;
1727 memmove (entry, entry + 1, (smc_tree_root[ix0][ix1].n_entries - i) * sizeof (entry[0]));
1728 if (!smc_tree_root[ix0][ix1].n_entries)
1730 /* avoid useless pressure on the memory system */
1731 free (smc_tree_root[ix0][ix1].entries);
1732 smc_tree_root[ix0][ix1].entries = NULL;
1737 g_mutex_unlock (&smc_tree_mutex);
1741 #ifdef G_ENABLE_DEBUG
1743 g_slice_debug_tree_statistics (void)
1745 g_mutex_lock (&smc_tree_mutex);
1748 unsigned int i, j, t = 0, o = 0, b = 0, su = 0, ex = 0, en = 4294967295u;
1750 for (i = 0; i < SMC_TRUNK_COUNT; i++)
1751 if (smc_tree_root[i])
1754 for (j = 0; j < SMC_BRANCH_COUNT; j++)
1755 if (smc_tree_root[i][j].n_entries)
1758 su += smc_tree_root[i][j].n_entries;
1759 en = MIN (en, smc_tree_root[i][j].n_entries);
1760 ex = MAX (ex, smc_tree_root[i][j].n_entries);
1762 else if (smc_tree_root[i][j].entries)
1763 o++; /* formerly used, now empty */
1766 tf = MAX (t, 1.0); /* max(1) to be a valid divisor */
1767 bf = MAX (b, 1.0); /* max(1) to be a valid divisor */
1768 g_fprintf (stderr, "GSlice: MemChecker: %u trunks, %u branches, %u old branches\n", t, b, o);
1769 g_fprintf (stderr, "GSlice: MemChecker: %f branches per trunk, %.2f%% utilization\n",
1771 100.0 - (SMC_BRANCH_COUNT - b / tf) / (0.01 * SMC_BRANCH_COUNT));
1772 g_fprintf (stderr, "GSlice: MemChecker: %f entries per branch, %u minimum, %u maximum\n",
1776 g_fprintf (stderr, "GSlice: MemChecker: root=NULL\n");
1777 g_mutex_unlock (&smc_tree_mutex);
1779 /* sample statistics (beast + GSLice + 24h scripted core & GUI activity):
1780 * PID %CPU %MEM VSZ RSS COMMAND
1781 * 8887 30.3 45.8 456068 414856 beast-0.7.1 empty.bse
1782 * $ cat /proc/8887/statm # total-program-size resident-set-size shared-pages text/code data/stack library dirty-pages
1783 * 114017 103714 2354 344 0 108676 0
1784 * $ cat /proc/8887/status
1795 * (gdb) print g_slice_debug_tree_statistics ()
1796 * GSlice: MemChecker: 422 trunks, 213068 branches, 0 old branches
1797 * GSlice: MemChecker: 504.900474 branches per trunk, 98.81% utilization
1798 * GSlice: MemChecker: 4.965039 entries per branch, 1 minimum, 37 maximum
1801 #endif /* G_ENABLE_DEBUG */