1 /* GLIB sliced memory - fast concurrent memory chunk allocator
2 * Copyright (C) 2005 Tim Janik
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
22 #include "glibconfig.h"
24 #if defined HAVE_POSIX_MEMALIGN && defined POSIX_MEMALIGN_WITH_COMPLIANT_ALLOCS
25 # define HAVE_COMPLIANT_POSIX_MEMALIGN 1
28 #if defined(HAVE_COMPLIANT_POSIX_MEMALIGN) && !defined(_XOPEN_SOURCE)
29 #define _XOPEN_SOURCE 600 /* posix_memalign() */
31 #include <stdlib.h> /* posix_memalign() */
36 #include <unistd.h> /* sysconf() */
43 #include <stdio.h> /* fputs/fprintf */
48 #include "gmem.h" /* gslice.h */
49 #include "gstrfuncs.h"
51 #include "gtrashstack.h"
52 #include "gtestutils.h"
54 #include "glib_trace.h"
59 * SECTION:memory_slices
60 * @title: Memory Slices
61 * @short_description: efficient way to allocate groups of equal-sized
64 * Memory slices provide a space-efficient and multi-processing scalable
65 * way to allocate equal-sized pieces of memory, just like the original
66 * #GMemChunks (from GLib 2.8), while avoiding their excessive
67 * memory-waste, scalability and performance problems.
69 * To achieve these goals, the slice allocator uses a sophisticated,
70 * layered design that has been inspired by Bonwick's slab allocator
72 * <ulink url="http://citeseer.ist.psu.edu/bonwick94slab.html">[Bonwick94]</ulink> Jeff Bonwick, The slab allocator: An object-caching kernel
73 * memory allocator. USENIX 1994, and
74 * <ulink url="http://citeseer.ist.psu.edu/bonwick01magazines.html">[Bonwick01]</ulink> Bonwick and Jonathan Adams, Magazines and vmem: Extending the
75 * slab allocator to many cpu's and arbitrary resources. USENIX 2001
77 * It uses posix_memalign() to optimize allocations of many equally-sized
78 * chunks, and has per-thread free lists (the so-called magazine layer)
79 * to quickly satisfy allocation requests of already known structure sizes.
80 * This is accompanied by extra caching logic to keep freed memory around
81 * for some time before returning it to the system. Memory that is unused
82 * due to alignment constraints is used for cache colorization (random
83 * distribution of chunk addresses) to improve CPU cache utilization. The
84 * caching layer of the slice allocator adapts itself to high lock contention
85 * to improve scalability.
87 * The slice allocator can allocate blocks as small as two pointers, and
88 * unlike malloc(), it does not reserve extra space per block. For large block
89 * sizes, g_slice_new() and g_slice_alloc() will automatically delegate to the
90 * system malloc() implementation. For newly written code it is recommended
91 * to use the new <literal>g_slice</literal> API instead of g_malloc() and
92 * friends, as long as objects are not resized during their lifetime and the
93 * object size used at allocation time is still available when freeing.
96 * <title>Using the slice allocator</title>
101 * /* Allocate 10000 blocks. */
102 * for (i = 0; i < 10000; i++)
104 * mem[i] = g_slice_alloc (50);
106 * /* Fill in the memory with some junk. */
107 * for (j = 0; j < 50; j++)
111 * /* Now free all of the blocks. */
112 * for (i = 0; i < 10000; i++)
114 * g_slice_free1 (50, mem[i]);
116 * </programlisting></example>
119 * <title>Using the slice allocator with data structures</title>
123 * /* Allocate one block, using the g_slice_new() macro. */
124 * array = g_slice_new (GRealArray);
126 * /* We can now use array just like a normal pointer to a structure. */
127 * array->data = NULL;
130 * array->zero_terminated = (zero_terminated ? 1 : 0);
131 * array->clear = (clear ? 1 : 0);
132 * array->elt_size = elt_size;
134 * /* We can free the block, so it can be reused. */
135 * g_slice_free (GRealArray, array);
136 * </programlisting></example>
139 /* the GSlice allocator is split up into 4 layers, roughly modelled after the slab
140 * allocator and magazine extensions as outlined in:
141 * + [Bonwick94] Jeff Bonwick, The slab allocator: An object-caching kernel
142 * memory allocator. USENIX 1994, http://citeseer.ist.psu.edu/bonwick94slab.html
143 * + [Bonwick01] Bonwick and Jonathan Adams, Magazines and vmem: Extending the
144 * slab allocator to many cpu's and arbitrary resources.
145 * USENIX 2001, http://citeseer.ist.psu.edu/bonwick01magazines.html
147 * - the thread magazines. for each (aligned) chunk size, a magazine (a list)
148 * of recently freed and soon to be allocated chunks is maintained per thread.
149 * this way, most alloc/free requests can be quickly satisfied from per-thread
150 * free lists which only require one g_private_get() call to retrive the
152 * - the magazine cache. allocating and freeing chunks to/from threads only
153 * occours at magazine sizes from a global depot of magazines. the depot
154 * maintaines a 15 second working set of allocated magazines, so full
155 * magazines are not allocated and released too often.
156 * the chunk size dependent magazine sizes automatically adapt (within limits,
157 * see [3]) to lock contention to properly scale performance across a variety
159 * - the slab allocator. this allocator allocates slabs (blocks of memory) close
160 * to the system page size or multiples thereof which have to be page aligned.
161 * the blocks are divided into smaller chunks which are used to satisfy
162 * allocations from the upper layers. the space provided by the reminder of
163 * the chunk size division is used for cache colorization (random distribution
164 * of chunk addresses) to improve processor cache utilization. multiple slabs
165 * with the same chunk size are kept in a partially sorted ring to allow O(1)
166 * freeing and allocation of chunks (as long as the allocation of an entirely
167 * new slab can be avoided).
168 * - the page allocator. on most modern systems, posix_memalign(3) or
169 * memalign(3) should be available, so this is used to allocate blocks with
170 * system page size based alignments and sizes or multiples thereof.
171 * if no memalign variant is provided, valloc() is used instead and
172 * block sizes are limited to the system page size (no multiples thereof).
173 * as a fallback, on system without even valloc(), a malloc(3)-based page
174 * allocator with alloc-only behaviour is used.
177 * [1] some systems memalign(3) implementations may rely on boundary tagging for
178 * the handed out memory chunks. to avoid excessive page-wise fragmentation,
179 * we reserve 2 * sizeof (void*) per block size for the systems memalign(3),
180 * specified in NATIVE_MALLOC_PADDING.
181 * [2] using the slab allocator alone already provides for a fast and efficient
182 * allocator, it doesn't properly scale beyond single-threaded uses though.
183 * also, the slab allocator implements eager free(3)-ing, i.e. does not
184 * provide any form of caching or working set maintenance. so if used alone,
185 * it's vulnerable to trashing for sequences of balanced (alloc, free) pairs
186 * at certain thresholds.
187 * [3] magazine sizes are bound by an implementation specific minimum size and
188 * a chunk size specific maximum to limit magazine storage sizes to roughly
190 * [4] allocating ca. 8 chunks per block/page keeps a good balance between
191 * external and internal fragmentation (<= 12.5%). [Bonwick94]
194 /* --- macros and constants --- */
195 #define LARGEALIGNMENT (256)
196 #define P2ALIGNMENT (2 * sizeof (gsize)) /* fits 2 pointers (assumed to be 2 * GLIB_SIZEOF_SIZE_T below) */
197 #define ALIGN(size, base) ((base) * (gsize) (((size) + (base) - 1) / (base)))
198 #define NATIVE_MALLOC_PADDING P2ALIGNMENT /* per-page padding left for native malloc(3) see [1] */
199 #define SLAB_INFO_SIZE P2ALIGN (sizeof (SlabInfo) + NATIVE_MALLOC_PADDING)
200 #define MAX_MAGAZINE_SIZE (256) /* see [3] and allocator_get_magazine_threshold() for this */
201 #define MIN_MAGAZINE_SIZE (4)
202 #define MAX_STAMP_COUNTER (7) /* distributes the load of gettimeofday() */
203 #define MAX_SLAB_CHUNK_SIZE(al) (((al)->max_page_size - SLAB_INFO_SIZE) / 8) /* we want at last 8 chunks per page, see [4] */
204 #define MAX_SLAB_INDEX(al) (SLAB_INDEX (al, MAX_SLAB_CHUNK_SIZE (al)) + 1)
205 #define SLAB_INDEX(al, asize) ((asize) / P2ALIGNMENT - 1) /* asize must be P2ALIGNMENT aligned */
206 #define SLAB_CHUNK_SIZE(al, ix) (((ix) + 1) * P2ALIGNMENT)
207 #define SLAB_BPAGE_SIZE(al,csz) (8 * (csz) + SLAB_INFO_SIZE)
209 /* optimized version of ALIGN (size, P2ALIGNMENT) */
210 #if GLIB_SIZEOF_SIZE_T * 2 == 8 /* P2ALIGNMENT */
211 #define P2ALIGN(size) (((size) + 0x7) & ~(gsize) 0x7)
212 #elif GLIB_SIZEOF_SIZE_T * 2 == 16 /* P2ALIGNMENT */
213 #define P2ALIGN(size) (((size) + 0xf) & ~(gsize) 0xf)
215 #define P2ALIGN(size) ALIGN (size, P2ALIGNMENT)
218 /* special helpers to avoid gmessage.c dependency */
219 static void mem_error (const char *format, ...) G_GNUC_PRINTF (1,2);
220 #define mem_assert(cond) do { if (G_LIKELY (cond)) ; else mem_error ("assertion failed: %s", #cond); } while (0)
222 /* --- structures --- */
223 typedef struct _ChunkLink ChunkLink;
224 typedef struct _SlabInfo SlabInfo;
225 typedef struct _CachedMagazine CachedMagazine;
233 SlabInfo *next, *prev;
237 gsize count; /* approximative chunks list length */
240 Magazine *magazine1; /* array of MAX_SLAB_INDEX (allocator) */
241 Magazine *magazine2; /* array of MAX_SLAB_INDEX (allocator) */
244 gboolean always_malloc;
245 gboolean bypass_magazines;
246 gboolean debug_blocks;
247 gsize working_set_msecs;
248 guint color_increment;
251 /* const after initialization */
252 gsize min_page_size, max_page_size;
254 gsize max_slab_chunk_size_for_magazine_cache;
256 GMutex magazine_mutex;
257 ChunkLink **magazines; /* array of MAX_SLAB_INDEX (allocator) */
258 guint *contention_counters; /* array of MAX_SLAB_INDEX (allocator) */
264 SlabInfo **slab_stack; /* array of MAX_SLAB_INDEX (allocator) */
268 /* --- g-slice prototypes --- */
269 static gpointer slab_allocator_alloc_chunk (gsize chunk_size);
270 static void slab_allocator_free_chunk (gsize chunk_size,
272 static void private_thread_memory_cleanup (gpointer data);
273 static gpointer allocator_memalign (gsize alignment,
275 static void allocator_memfree (gsize memsize,
277 static inline void magazine_cache_update_stamp (void);
278 static inline gsize allocator_get_magazine_threshold (Allocator *allocator,
281 /* --- g-slice memory checker --- */
282 static void smc_notify_alloc (void *pointer,
284 static int smc_notify_free (void *pointer,
287 /* --- variables --- */
288 static GPrivate private_thread_memory = G_PRIVATE_INIT (private_thread_memory_cleanup);
289 static gsize sys_page_size = 0;
290 static Allocator allocator[1] = { { 0, }, };
291 static SliceConfig slice_config = {
292 FALSE, /* always_malloc */
293 FALSE, /* bypass_magazines */
294 FALSE, /* debug_blocks */
295 15 * 1000, /* working_set_msecs */
296 1, /* color increment, alt: 0x7fffffff */
298 static GMutex smc_tree_mutex; /* mutex for G_SLICE=debug-blocks */
300 /* --- auxiliary funcitons --- */
302 g_slice_set_config (GSliceConfig ckey,
305 g_return_if_fail (sys_page_size == 0);
308 case G_SLICE_CONFIG_ALWAYS_MALLOC:
309 slice_config.always_malloc = value != 0;
311 case G_SLICE_CONFIG_BYPASS_MAGAZINES:
312 slice_config.bypass_magazines = value != 0;
314 case G_SLICE_CONFIG_WORKING_SET_MSECS:
315 slice_config.working_set_msecs = value;
317 case G_SLICE_CONFIG_COLOR_INCREMENT:
318 slice_config.color_increment = value;
324 g_slice_get_config (GSliceConfig ckey)
328 case G_SLICE_CONFIG_ALWAYS_MALLOC:
329 return slice_config.always_malloc;
330 case G_SLICE_CONFIG_BYPASS_MAGAZINES:
331 return slice_config.bypass_magazines;
332 case G_SLICE_CONFIG_WORKING_SET_MSECS:
333 return slice_config.working_set_msecs;
334 case G_SLICE_CONFIG_CHUNK_SIZES:
335 return MAX_SLAB_INDEX (allocator);
336 case G_SLICE_CONFIG_COLOR_INCREMENT:
337 return slice_config.color_increment;
344 g_slice_get_config_state (GSliceConfig ckey,
349 g_return_val_if_fail (n_values != NULL, NULL);
354 case G_SLICE_CONFIG_CONTENTION_COUNTER:
355 array[i++] = SLAB_CHUNK_SIZE (allocator, address);
356 array[i++] = allocator->contention_counters[address];
357 array[i++] = allocator_get_magazine_threshold (allocator, address);
359 return g_memdup (array, sizeof (array[0]) * *n_values);
366 slice_config_init (SliceConfig *config)
370 *config = slice_config;
372 val = getenv ("G_SLICE");
376 const GDebugKey keys[] = {
377 { "always-malloc", 1 << 0 },
378 { "debug-blocks", 1 << 1 },
381 flags = g_parse_debug_string (val, keys, G_N_ELEMENTS (keys));
382 if (flags & (1 << 0))
383 config->always_malloc = TRUE;
384 if (flags & (1 << 1))
385 config->debug_blocks = TRUE;
389 /* G_SLICE was not specified, so check if valgrind is running and
390 * disable ourselves if it is.
392 * This way it's possible to force gslice to be enabled under
393 * valgrind just by setting G_SLICE to the empty string.
395 if (RUNNING_ON_VALGRIND)
396 config->always_malloc = TRUE;
401 g_slice_init_nomessage (void)
403 /* we may not use g_error() or friends here */
404 mem_assert (sys_page_size == 0);
405 mem_assert (MIN_MAGAZINE_SIZE >= 4);
409 SYSTEM_INFO system_info;
410 GetSystemInfo (&system_info);
411 sys_page_size = system_info.dwPageSize;
414 sys_page_size = sysconf (_SC_PAGESIZE); /* = sysconf (_SC_PAGE_SIZE); = getpagesize(); */
416 mem_assert (sys_page_size >= 2 * LARGEALIGNMENT);
417 mem_assert ((sys_page_size & (sys_page_size - 1)) == 0);
418 slice_config_init (&allocator->config);
419 allocator->min_page_size = sys_page_size;
420 #if HAVE_COMPLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN
421 /* allow allocation of pages up to 8KB (with 8KB alignment).
422 * this is useful because many medium to large sized structures
423 * fit less than 8 times (see [4]) into 4KB pages.
424 * we allow very small page sizes here, to reduce wastage in
425 * threads if only small allocations are required (this does
426 * bear the risk of increasing allocation times and fragmentation
429 allocator->min_page_size = MAX (allocator->min_page_size, 4096);
430 allocator->max_page_size = MAX (allocator->min_page_size, 8192);
431 allocator->min_page_size = MIN (allocator->min_page_size, 128);
433 /* we can only align to system page size */
434 allocator->max_page_size = sys_page_size;
436 if (allocator->config.always_malloc)
438 allocator->contention_counters = NULL;
439 allocator->magazines = NULL;
440 allocator->slab_stack = NULL;
444 allocator->contention_counters = g_new0 (guint, MAX_SLAB_INDEX (allocator));
445 allocator->magazines = g_new0 (ChunkLink*, MAX_SLAB_INDEX (allocator));
446 allocator->slab_stack = g_new0 (SlabInfo*, MAX_SLAB_INDEX (allocator));
449 allocator->mutex_counter = 0;
450 allocator->stamp_counter = MAX_STAMP_COUNTER; /* force initial update */
451 allocator->last_stamp = 0;
452 allocator->color_accu = 0;
453 magazine_cache_update_stamp();
454 /* values cached for performance reasons */
455 allocator->max_slab_chunk_size_for_magazine_cache = MAX_SLAB_CHUNK_SIZE (allocator);
456 if (allocator->config.always_malloc || allocator->config.bypass_magazines)
457 allocator->max_slab_chunk_size_for_magazine_cache = 0; /* non-optimized cases */
461 allocator_categorize (gsize aligned_chunk_size)
463 /* speed up the likely path */
464 if (G_LIKELY (aligned_chunk_size && aligned_chunk_size <= allocator->max_slab_chunk_size_for_magazine_cache))
465 return 1; /* use magazine cache */
467 if (!allocator->config.always_malloc &&
468 aligned_chunk_size &&
469 aligned_chunk_size <= MAX_SLAB_CHUNK_SIZE (allocator))
471 if (allocator->config.bypass_magazines)
472 return 2; /* use slab allocator, see [2] */
473 return 1; /* use magazine cache */
475 return 0; /* use malloc() */
479 g_mutex_lock_a (GMutex *mutex,
480 guint *contention_counter)
482 gboolean contention = FALSE;
483 if (!g_mutex_trylock (mutex))
485 g_mutex_lock (mutex);
490 allocator->mutex_counter++;
491 if (allocator->mutex_counter >= 1) /* quickly adapt to contention */
493 allocator->mutex_counter = 0;
494 *contention_counter = MIN (*contention_counter + 1, MAX_MAGAZINE_SIZE);
497 else /* !contention */
499 allocator->mutex_counter--;
500 if (allocator->mutex_counter < -11) /* moderately recover magazine sizes */
502 allocator->mutex_counter = 0;
503 *contention_counter = MAX (*contention_counter, 1) - 1;
508 static inline ThreadMemory*
509 thread_memory_from_self (void)
511 ThreadMemory *tmem = g_private_get (&private_thread_memory);
512 if (G_UNLIKELY (!tmem))
514 static GMutex init_mutex;
517 g_mutex_lock (&init_mutex);
518 if G_UNLIKELY (sys_page_size == 0)
519 g_slice_init_nomessage ();
520 g_mutex_unlock (&init_mutex);
522 n_magazines = MAX_SLAB_INDEX (allocator);
523 tmem = g_malloc0 (sizeof (ThreadMemory) + sizeof (Magazine) * 2 * n_magazines);
524 tmem->magazine1 = (Magazine*) (tmem + 1);
525 tmem->magazine2 = &tmem->magazine1[n_magazines];
526 g_private_set (&private_thread_memory, tmem);
531 static inline ChunkLink*
532 magazine_chain_pop_head (ChunkLink **magazine_chunks)
534 /* magazine chains are linked via ChunkLink->next.
535 * each ChunkLink->data of the toplevel chain may point to a subchain,
536 * linked via ChunkLink->next. ChunkLink->data of the subchains just
537 * contains uninitialized junk.
539 ChunkLink *chunk = (*magazine_chunks)->data;
540 if (G_UNLIKELY (chunk))
542 /* allocating from freed list */
543 (*magazine_chunks)->data = chunk->next;
547 chunk = *magazine_chunks;
548 *magazine_chunks = chunk->next;
553 #if 0 /* useful for debugging */
555 magazine_count (ChunkLink *head)
562 ChunkLink *child = head->data;
564 for (child = head->data; child; child = child->next)
573 allocator_get_magazine_threshold (Allocator *allocator,
576 /* the magazine size calculated here has a lower bound of MIN_MAGAZINE_SIZE,
577 * which is required by the implementation. also, for moderately sized chunks
578 * (say >= 64 bytes), magazine sizes shouldn't be much smaller then the number
579 * of chunks available per page/2 to avoid excessive traffic in the magazine
580 * cache for small to medium sized structures.
581 * the upper bound of the magazine size is effectively provided by
582 * MAX_MAGAZINE_SIZE. for larger chunks, this number is scaled down so that
583 * the content of a single magazine doesn't exceed ca. 16KB.
585 gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
586 guint threshold = MAX (MIN_MAGAZINE_SIZE, allocator->max_page_size / MAX (5 * chunk_size, 5 * 32));
587 guint contention_counter = allocator->contention_counters[ix];
588 if (G_UNLIKELY (contention_counter)) /* single CPU bias */
590 /* adapt contention counter thresholds to chunk sizes */
591 contention_counter = contention_counter * 64 / chunk_size;
592 threshold = MAX (threshold, contention_counter);
597 /* --- magazine cache --- */
599 magazine_cache_update_stamp (void)
601 if (allocator->stamp_counter >= MAX_STAMP_COUNTER)
604 g_get_current_time (&tv);
605 allocator->last_stamp = tv.tv_sec * 1000 + tv.tv_usec / 1000; /* milli seconds */
606 allocator->stamp_counter = 0;
609 allocator->stamp_counter++;
612 static inline ChunkLink*
613 magazine_chain_prepare_fields (ChunkLink *magazine_chunks)
619 /* checked upon initialization: mem_assert (MIN_MAGAZINE_SIZE >= 4); */
620 /* ensure a magazine with at least 4 unused data pointers */
621 chunk1 = magazine_chain_pop_head (&magazine_chunks);
622 chunk2 = magazine_chain_pop_head (&magazine_chunks);
623 chunk3 = magazine_chain_pop_head (&magazine_chunks);
624 chunk4 = magazine_chain_pop_head (&magazine_chunks);
625 chunk4->next = magazine_chunks;
626 chunk3->next = chunk4;
627 chunk2->next = chunk3;
628 chunk1->next = chunk2;
632 /* access the first 3 fields of a specially prepared magazine chain */
633 #define magazine_chain_prev(mc) ((mc)->data)
634 #define magazine_chain_stamp(mc) ((mc)->next->data)
635 #define magazine_chain_uint_stamp(mc) GPOINTER_TO_UINT ((mc)->next->data)
636 #define magazine_chain_next(mc) ((mc)->next->next->data)
637 #define magazine_chain_count(mc) ((mc)->next->next->next->data)
640 magazine_cache_trim (Allocator *allocator,
644 /* g_mutex_lock (allocator->mutex); done by caller */
645 /* trim magazine cache from tail */
646 ChunkLink *current = magazine_chain_prev (allocator->magazines[ix]);
647 ChunkLink *trash = NULL;
648 while (ABS (stamp - magazine_chain_uint_stamp (current)) >= allocator->config.working_set_msecs)
651 ChunkLink *prev = magazine_chain_prev (current);
652 ChunkLink *next = magazine_chain_next (current);
653 magazine_chain_next (prev) = next;
654 magazine_chain_prev (next) = prev;
655 /* clear special fields, put on trash stack */
656 magazine_chain_next (current) = NULL;
657 magazine_chain_count (current) = NULL;
658 magazine_chain_stamp (current) = NULL;
659 magazine_chain_prev (current) = trash;
661 /* fixup list head if required */
662 if (current == allocator->magazines[ix])
664 allocator->magazines[ix] = NULL;
669 g_mutex_unlock (&allocator->magazine_mutex);
673 const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
674 g_mutex_lock (&allocator->slab_mutex);
678 trash = magazine_chain_prev (current);
679 magazine_chain_prev (current) = NULL; /* clear special field */
682 ChunkLink *chunk = magazine_chain_pop_head (¤t);
683 slab_allocator_free_chunk (chunk_size, chunk);
686 g_mutex_unlock (&allocator->slab_mutex);
691 magazine_cache_push_magazine (guint ix,
692 ChunkLink *magazine_chunks,
693 gsize count) /* must be >= MIN_MAGAZINE_SIZE */
695 ChunkLink *current = magazine_chain_prepare_fields (magazine_chunks);
696 ChunkLink *next, *prev;
697 g_mutex_lock (&allocator->magazine_mutex);
698 /* add magazine at head */
699 next = allocator->magazines[ix];
701 prev = magazine_chain_prev (next);
703 next = prev = current;
704 magazine_chain_next (prev) = current;
705 magazine_chain_prev (next) = current;
706 magazine_chain_prev (current) = prev;
707 magazine_chain_next (current) = next;
708 magazine_chain_count (current) = (gpointer) count;
710 magazine_cache_update_stamp();
711 magazine_chain_stamp (current) = GUINT_TO_POINTER (allocator->last_stamp);
712 allocator->magazines[ix] = current;
713 /* free old magazines beyond a certain threshold */
714 magazine_cache_trim (allocator, ix, allocator->last_stamp);
715 /* g_mutex_unlock (allocator->mutex); was done by magazine_cache_trim() */
719 magazine_cache_pop_magazine (guint ix,
722 g_mutex_lock_a (&allocator->magazine_mutex, &allocator->contention_counters[ix]);
723 if (!allocator->magazines[ix])
725 guint magazine_threshold = allocator_get_magazine_threshold (allocator, ix);
726 gsize i, chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
727 ChunkLink *chunk, *head;
728 g_mutex_unlock (&allocator->magazine_mutex);
729 g_mutex_lock (&allocator->slab_mutex);
730 head = slab_allocator_alloc_chunk (chunk_size);
733 for (i = 1; i < magazine_threshold; i++)
735 chunk->next = slab_allocator_alloc_chunk (chunk_size);
740 g_mutex_unlock (&allocator->slab_mutex);
746 ChunkLink *current = allocator->magazines[ix];
747 ChunkLink *prev = magazine_chain_prev (current);
748 ChunkLink *next = magazine_chain_next (current);
750 magazine_chain_next (prev) = next;
751 magazine_chain_prev (next) = prev;
752 allocator->magazines[ix] = next == current ? NULL : next;
753 g_mutex_unlock (&allocator->magazine_mutex);
754 /* clear special fields and hand out */
755 *countp = (gsize) magazine_chain_count (current);
756 magazine_chain_prev (current) = NULL;
757 magazine_chain_next (current) = NULL;
758 magazine_chain_count (current) = NULL;
759 magazine_chain_stamp (current) = NULL;
764 /* --- thread magazines --- */
766 private_thread_memory_cleanup (gpointer data)
768 ThreadMemory *tmem = data;
769 const guint n_magazines = MAX_SLAB_INDEX (allocator);
771 for (ix = 0; ix < n_magazines; ix++)
775 mags[0] = &tmem->magazine1[ix];
776 mags[1] = &tmem->magazine2[ix];
777 for (j = 0; j < 2; j++)
779 Magazine *mag = mags[j];
780 if (mag->count >= MIN_MAGAZINE_SIZE)
781 magazine_cache_push_magazine (ix, mag->chunks, mag->count);
784 const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
785 g_mutex_lock (&allocator->slab_mutex);
788 ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
789 slab_allocator_free_chunk (chunk_size, chunk);
791 g_mutex_unlock (&allocator->slab_mutex);
799 thread_memory_magazine1_reload (ThreadMemory *tmem,
802 Magazine *mag = &tmem->magazine1[ix];
803 mem_assert (mag->chunks == NULL); /* ensure that we may reset mag->count */
805 mag->chunks = magazine_cache_pop_magazine (ix, &mag->count);
809 thread_memory_magazine2_unload (ThreadMemory *tmem,
812 Magazine *mag = &tmem->magazine2[ix];
813 magazine_cache_push_magazine (ix, mag->chunks, mag->count);
819 thread_memory_swap_magazines (ThreadMemory *tmem,
822 Magazine xmag = tmem->magazine1[ix];
823 tmem->magazine1[ix] = tmem->magazine2[ix];
824 tmem->magazine2[ix] = xmag;
827 static inline gboolean
828 thread_memory_magazine1_is_empty (ThreadMemory *tmem,
831 return tmem->magazine1[ix].chunks == NULL;
834 static inline gboolean
835 thread_memory_magazine2_is_full (ThreadMemory *tmem,
838 return tmem->magazine2[ix].count >= allocator_get_magazine_threshold (allocator, ix);
841 static inline gpointer
842 thread_memory_magazine1_alloc (ThreadMemory *tmem,
845 Magazine *mag = &tmem->magazine1[ix];
846 ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
847 if (G_LIKELY (mag->count > 0))
853 thread_memory_magazine2_free (ThreadMemory *tmem,
857 Magazine *mag = &tmem->magazine2[ix];
858 ChunkLink *chunk = mem;
860 chunk->next = mag->chunks;
865 /* --- API functions --- */
869 * @type: the type to allocate, typically a structure name
871 * A convenience macro to allocate a block of memory from the
874 * It calls g_slice_alloc() with <literal>sizeof (@type)</literal>
875 * and casts the returned pointer to a pointer of the given type,
876 * avoiding a type cast in the source code.
877 * Note that the underlying slice allocation mechanism can
878 * be changed with the <link linkend="G_SLICE">G_SLICE=always-malloc</link>
879 * environment variable.
881 * Returns: a pointer to the allocated block, cast to a pointer to @type
888 * @type: the type to allocate, typically a structure name
890 * A convenience macro to allocate a block of memory from the
891 * slice allocator and set the memory to 0.
893 * It calls g_slice_alloc0() with <literal>sizeof (@type)</literal>
894 * and casts the returned pointer to a pointer of the given type,
895 * avoiding a type cast in the source code.
896 * Note that the underlying slice allocation mechanism can
897 * be changed with the <link linkend="G_SLICE">G_SLICE=always-malloc</link>
898 * environment variable.
905 * @type: the type to duplicate, typically a structure name
906 * @mem: the memory to copy into the allocated block
908 * A convenience macro to duplicate a block of memory using
909 * the slice allocator.
911 * It calls g_slice_copy() with <literal>sizeof (@type)</literal>
912 * and casts the returned pointer to a pointer of the given type,
913 * avoiding a type cast in the source code.
914 * Note that the underlying slice allocation mechanism can
915 * be changed with the <link linkend="G_SLICE">G_SLICE=always-malloc</link>
916 * environment variable.
918 * Returns: a pointer to the allocated block, cast to a pointer to @type
925 * @type: the type of the block to free, typically a structure name
926 * @mem: a pointer to the block to free
928 * A convenience macro to free a block of memory that has
929 * been allocated from the slice allocator.
931 * It calls g_slice_free1() using <literal>sizeof (type)</literal>
933 * Note that the exact release behaviour can be changed with the
934 * <link linkend="G_DEBUG">G_DEBUG=gc-friendly</link> environment
935 * variable, also see <link linkend="G_SLICE">G_SLICE</link> for
936 * related debugging options.
942 * g_slice_free_chain:
943 * @type: the type of the @mem_chain blocks
944 * @mem_chain: a pointer to the first block of the chain
945 * @next: the field name of the next pointer in @type
947 * Frees a linked list of memory blocks of structure type @type.
948 * The memory blocks must be equal-sized, allocated via
949 * g_slice_alloc() or g_slice_alloc0() and linked together by
950 * a @next pointer (similar to #GSList). The name of the
951 * @next field in @type is passed as third argument.
952 * Note that the exact release behaviour can be changed with the
953 * <link linkend="G_DEBUG">G_DEBUG=gc-friendly</link> environment
954 * variable, also see <link linkend="G_SLICE">G_SLICE</link> for
955 * related debugging options.
962 * @block_size: the number of bytes to allocate
964 * Allocates a block of memory from the slice allocator.
965 * The block adress handed out can be expected to be aligned
966 * to at least <literal>1 * sizeof (void*)</literal>,
967 * though in general slices are 2 * sizeof (void*) bytes aligned,
968 * if a malloc() fallback implementation is used instead,
969 * the alignment may be reduced in a libc dependent fashion.
970 * Note that the underlying slice allocation mechanism can
971 * be changed with the <link linkend="G_SLICE">G_SLICE=always-malloc</link>
972 * environment variable.
974 * Returns: a pointer to the allocated memory block
979 g_slice_alloc (gsize mem_size)
986 /* This gets the private structure for this thread. If the private
987 * structure does not yet exist, it is created.
989 * This has a side effect of causing GSlice to be initialised, so it
992 tmem = thread_memory_from_self ();
994 chunk_size = P2ALIGN (mem_size);
995 acat = allocator_categorize (chunk_size);
996 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
998 guint ix = SLAB_INDEX (allocator, chunk_size);
999 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
1001 thread_memory_swap_magazines (tmem, ix);
1002 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
1003 thread_memory_magazine1_reload (tmem, ix);
1005 mem = thread_memory_magazine1_alloc (tmem, ix);
1007 else if (acat == 2) /* allocate through slab allocator */
1009 g_mutex_lock (&allocator->slab_mutex);
1010 mem = slab_allocator_alloc_chunk (chunk_size);
1011 g_mutex_unlock (&allocator->slab_mutex);
1013 else /* delegate to system malloc */
1014 mem = g_malloc (mem_size);
1015 if (G_UNLIKELY (allocator->config.debug_blocks))
1016 smc_notify_alloc (mem, mem_size);
1018 TRACE (GLIB_SLICE_ALLOC((void*)mem, mem_size));
1025 * @block_size: the number of bytes to allocate
1027 * Allocates a block of memory via g_slice_alloc() and initializes
1028 * the returned memory to 0. Note that the underlying slice allocation
1029 * mechanism can be changed with the
1030 * <link linkend="G_SLICE">G_SLICE=always-malloc</link>
1031 * environment variable.
1033 * Returns: a pointer to the allocated block
1038 g_slice_alloc0 (gsize mem_size)
1040 gpointer mem = g_slice_alloc (mem_size);
1042 memset (mem, 0, mem_size);
1048 * @block_size: the number of bytes to allocate
1049 * @mem_block: the memory to copy
1051 * Allocates a block of memory from the slice allocator
1052 * and copies @block_size bytes into it from @mem_block.
1054 * Returns: a pointer to the allocated memory block
1059 g_slice_copy (gsize mem_size,
1060 gconstpointer mem_block)
1062 gpointer mem = g_slice_alloc (mem_size);
1064 memcpy (mem, mem_block, mem_size);
1070 * @block_size: the size of the block
1071 * @mem_block: a pointer to the block to free
1073 * Frees a block of memory.
1075 * The memory must have been allocated via g_slice_alloc() or
1076 * g_slice_alloc0() and the @block_size has to match the size
1077 * specified upon allocation. Note that the exact release behaviour
1078 * can be changed with the
1079 * <link linkend="G_DEBUG">G_DEBUG=gc-friendly</link> environment
1080 * variable, also see <link linkend="G_SLICE">G_SLICE</link> for
1081 * related debugging options.
1086 g_slice_free1 (gsize mem_size,
1089 gsize chunk_size = P2ALIGN (mem_size);
1090 guint acat = allocator_categorize (chunk_size);
1091 if (G_UNLIKELY (!mem_block))
1093 if (G_UNLIKELY (allocator->config.debug_blocks) &&
1094 !smc_notify_free (mem_block, mem_size))
1096 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
1098 ThreadMemory *tmem = thread_memory_from_self();
1099 guint ix = SLAB_INDEX (allocator, chunk_size);
1100 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1102 thread_memory_swap_magazines (tmem, ix);
1103 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1104 thread_memory_magazine2_unload (tmem, ix);
1106 if (G_UNLIKELY (g_mem_gc_friendly))
1107 memset (mem_block, 0, chunk_size);
1108 thread_memory_magazine2_free (tmem, ix, mem_block);
1110 else if (acat == 2) /* allocate through slab allocator */
1112 if (G_UNLIKELY (g_mem_gc_friendly))
1113 memset (mem_block, 0, chunk_size);
1114 g_mutex_lock (&allocator->slab_mutex);
1115 slab_allocator_free_chunk (chunk_size, mem_block);
1116 g_mutex_unlock (&allocator->slab_mutex);
1118 else /* delegate to system malloc */
1120 if (G_UNLIKELY (g_mem_gc_friendly))
1121 memset (mem_block, 0, mem_size);
1124 TRACE (GLIB_SLICE_FREE((void*)mem_block, mem_size));
1128 * g_slice_free_chain_with_offset:
1129 * @block_size: the size of the blocks
1130 * @mem_chain: a pointer to the first block of the chain
1131 * @next_offset: the offset of the @next field in the blocks
1133 * Frees a linked list of memory blocks of structure type @type.
1135 * The memory blocks must be equal-sized, allocated via
1136 * g_slice_alloc() or g_slice_alloc0() and linked together by a
1137 * @next pointer (similar to #GSList). The offset of the @next
1138 * field in each block is passed as third argument.
1139 * Note that the exact release behaviour can be changed with the
1140 * <link linkend="G_DEBUG">G_DEBUG=gc-friendly</link> environment
1141 * variable, also see <link linkend="G_SLICE">G_SLICE</link> for
1142 * related debugging options.
1147 g_slice_free_chain_with_offset (gsize mem_size,
1151 gpointer slice = mem_chain;
1152 /* while the thread magazines and the magazine cache are implemented so that
1153 * they can easily be extended to allow for free lists containing more free
1154 * lists for the first level nodes, which would allow O(1) freeing in this
1155 * function, the benefit of such an extension is questionable, because:
1156 * - the magazine size counts will become mere lower bounds which confuses
1157 * the code adapting to lock contention;
1158 * - freeing a single node to the thread magazines is very fast, so this
1159 * O(list_length) operation is multiplied by a fairly small factor;
1160 * - memory usage histograms on larger applications seem to indicate that
1161 * the amount of released multi node lists is negligible in comparison
1162 * to single node releases.
1163 * - the major performance bottle neck, namely g_private_get() or
1164 * g_mutex_lock()/g_mutex_unlock() has already been moved out of the
1165 * inner loop for freeing chained slices.
1167 gsize chunk_size = P2ALIGN (mem_size);
1168 guint acat = allocator_categorize (chunk_size);
1169 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
1171 ThreadMemory *tmem = thread_memory_from_self();
1172 guint ix = SLAB_INDEX (allocator, chunk_size);
1175 guint8 *current = slice;
1176 slice = *(gpointer*) (current + next_offset);
1177 if (G_UNLIKELY (allocator->config.debug_blocks) &&
1178 !smc_notify_free (current, mem_size))
1180 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1182 thread_memory_swap_magazines (tmem, ix);
1183 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
1184 thread_memory_magazine2_unload (tmem, ix);
1186 if (G_UNLIKELY (g_mem_gc_friendly))
1187 memset (current, 0, chunk_size);
1188 thread_memory_magazine2_free (tmem, ix, current);
1191 else if (acat == 2) /* allocate through slab allocator */
1193 g_mutex_lock (&allocator->slab_mutex);
1196 guint8 *current = slice;
1197 slice = *(gpointer*) (current + next_offset);
1198 if (G_UNLIKELY (allocator->config.debug_blocks) &&
1199 !smc_notify_free (current, mem_size))
1201 if (G_UNLIKELY (g_mem_gc_friendly))
1202 memset (current, 0, chunk_size);
1203 slab_allocator_free_chunk (chunk_size, current);
1205 g_mutex_unlock (&allocator->slab_mutex);
1207 else /* delegate to system malloc */
1210 guint8 *current = slice;
1211 slice = *(gpointer*) (current + next_offset);
1212 if (G_UNLIKELY (allocator->config.debug_blocks) &&
1213 !smc_notify_free (current, mem_size))
1215 if (G_UNLIKELY (g_mem_gc_friendly))
1216 memset (current, 0, mem_size);
1221 /* --- single page allocator --- */
1223 allocator_slab_stack_push (Allocator *allocator,
1227 /* insert slab at slab ring head */
1228 if (!allocator->slab_stack[ix])
1230 sinfo->next = sinfo;
1231 sinfo->prev = sinfo;
1235 SlabInfo *next = allocator->slab_stack[ix], *prev = next->prev;
1241 allocator->slab_stack[ix] = sinfo;
1245 allocator_aligned_page_size (Allocator *allocator,
1248 gsize val = 1 << g_bit_storage (n_bytes - 1);
1249 val = MAX (val, allocator->min_page_size);
1254 allocator_add_slab (Allocator *allocator,
1260 gsize addr, padding, n_chunks, color = 0;
1261 gsize page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
1262 /* allocate 1 page for the chunks and the slab */
1263 gpointer aligned_memory = allocator_memalign (page_size, page_size - NATIVE_MALLOC_PADDING);
1264 guint8 *mem = aligned_memory;
1268 const gchar *syserr = strerror (errno);
1269 mem_error ("failed to allocate %u bytes (alignment: %u): %s\n",
1270 (guint) (page_size - NATIVE_MALLOC_PADDING), (guint) page_size, syserr);
1272 /* mask page address */
1273 addr = ((gsize) mem / page_size) * page_size;
1274 /* assert alignment */
1275 mem_assert (aligned_memory == (gpointer) addr);
1276 /* basic slab info setup */
1277 sinfo = (SlabInfo*) (mem + page_size - SLAB_INFO_SIZE);
1278 sinfo->n_allocated = 0;
1279 sinfo->chunks = NULL;
1280 /* figure cache colorization */
1281 n_chunks = ((guint8*) sinfo - mem) / chunk_size;
1282 padding = ((guint8*) sinfo - mem) - n_chunks * chunk_size;
1285 color = (allocator->color_accu * P2ALIGNMENT) % padding;
1286 allocator->color_accu += allocator->config.color_increment;
1288 /* add chunks to free list */
1289 chunk = (ChunkLink*) (mem + color);
1290 sinfo->chunks = chunk;
1291 for (i = 0; i < n_chunks - 1; i++)
1293 chunk->next = (ChunkLink*) ((guint8*) chunk + chunk_size);
1294 chunk = chunk->next;
1296 chunk->next = NULL; /* last chunk */
1297 /* add slab to slab ring */
1298 allocator_slab_stack_push (allocator, ix, sinfo);
1302 slab_allocator_alloc_chunk (gsize chunk_size)
1305 guint ix = SLAB_INDEX (allocator, chunk_size);
1306 /* ensure non-empty slab */
1307 if (!allocator->slab_stack[ix] || !allocator->slab_stack[ix]->chunks)
1308 allocator_add_slab (allocator, ix, chunk_size);
1309 /* allocate chunk */
1310 chunk = allocator->slab_stack[ix]->chunks;
1311 allocator->slab_stack[ix]->chunks = chunk->next;
1312 allocator->slab_stack[ix]->n_allocated++;
1313 /* rotate empty slabs */
1314 if (!allocator->slab_stack[ix]->chunks)
1315 allocator->slab_stack[ix] = allocator->slab_stack[ix]->next;
1320 slab_allocator_free_chunk (gsize chunk_size,
1325 guint ix = SLAB_INDEX (allocator, chunk_size);
1326 gsize page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
1327 gsize addr = ((gsize) mem / page_size) * page_size;
1328 /* mask page address */
1329 guint8 *page = (guint8*) addr;
1330 SlabInfo *sinfo = (SlabInfo*) (page + page_size - SLAB_INFO_SIZE);
1331 /* assert valid chunk count */
1332 mem_assert (sinfo->n_allocated > 0);
1333 /* add chunk to free list */
1334 was_empty = sinfo->chunks == NULL;
1335 chunk = (ChunkLink*) mem;
1336 chunk->next = sinfo->chunks;
1337 sinfo->chunks = chunk;
1338 sinfo->n_allocated--;
1339 /* keep slab ring partially sorted, empty slabs at end */
1343 SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1346 if (allocator->slab_stack[ix] == sinfo)
1347 allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1348 /* insert slab at head */
1349 allocator_slab_stack_push (allocator, ix, sinfo);
1351 /* eagerly free complete unused slabs */
1352 if (!sinfo->n_allocated)
1355 SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1358 if (allocator->slab_stack[ix] == sinfo)
1359 allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1361 allocator_memfree (page_size, page);
1365 /* --- memalign implementation --- */
1366 #ifdef HAVE_MALLOC_H
1367 #include <malloc.h> /* memalign() */
1371 * define HAVE_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works, <stdlib.h>
1372 * define HAVE_COMPLIANT_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works for sizes != 2^n, <stdlib.h>
1373 * define HAVE_MEMALIGN 1 // if free(memalign(3)) works, <malloc.h>
1374 * define HAVE_VALLOC 1 // if free(valloc(3)) works, <stdlib.h> or <malloc.h>
1375 * if none is provided, we implement malloc(3)-based alloc-only page alignment
1378 #if !(HAVE_COMPLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC)
1379 static GTrashStack *compat_valloc_trash = NULL;
1383 allocator_memalign (gsize alignment,
1386 gpointer aligned_memory = NULL;
1388 #if HAVE_COMPLIANT_POSIX_MEMALIGN
1389 err = posix_memalign (&aligned_memory, alignment, memsize);
1392 aligned_memory = memalign (alignment, memsize);
1396 aligned_memory = valloc (memsize);
1399 /* simplistic non-freeing page allocator */
1400 mem_assert (alignment == sys_page_size);
1401 mem_assert (memsize <= sys_page_size);
1402 if (!compat_valloc_trash)
1404 const guint n_pages = 16;
1405 guint8 *mem = malloc (n_pages * sys_page_size);
1410 guint8 *amem = (guint8*) ALIGN ((gsize) mem, sys_page_size);
1412 i--; /* mem wasn't page aligned */
1414 g_trash_stack_push (&compat_valloc_trash, amem + i * sys_page_size);
1417 aligned_memory = g_trash_stack_pop (&compat_valloc_trash);
1419 if (!aligned_memory)
1421 return aligned_memory;
1425 allocator_memfree (gsize memsize,
1428 #if HAVE_COMPLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC
1431 mem_assert (memsize <= sys_page_size);
1432 g_trash_stack_push (&compat_valloc_trash, mem);
1437 mem_error (const char *format,
1442 /* at least, put out "MEMORY-ERROR", in case we segfault during the rest of the function */
1443 fputs ("\n***MEMORY-ERROR***: ", stderr);
1444 pname = g_get_prgname();
1445 fprintf (stderr, "%s[%ld]: GSlice: ", pname ? pname : "", (long)getpid());
1446 va_start (args, format);
1447 vfprintf (stderr, format, args);
1449 fputs ("\n", stderr);
1454 /* --- g-slice memory checker tree --- */
1455 typedef size_t SmcKType; /* key type */
1456 typedef size_t SmcVType; /* value type */
1461 static void smc_tree_insert (SmcKType key,
1463 static gboolean smc_tree_lookup (SmcKType key,
1465 static gboolean smc_tree_remove (SmcKType key);
1468 /* --- g-slice memory checker implementation --- */
1470 smc_notify_alloc (void *pointer,
1473 size_t adress = (size_t) pointer;
1475 smc_tree_insert (adress, size);
1480 smc_notify_ignore (void *pointer)
1482 size_t adress = (size_t) pointer;
1484 smc_tree_remove (adress);
1489 smc_notify_free (void *pointer,
1492 size_t adress = (size_t) pointer;
1497 return 1; /* ignore */
1498 found_one = smc_tree_lookup (adress, &real_size);
1501 fprintf (stderr, "GSlice: MemChecker: attempt to release non-allocated block: %p size=%" G_GSIZE_FORMAT "\n", pointer, size);
1504 if (real_size != size && (real_size || size))
1506 fprintf (stderr, "GSlice: MemChecker: attempt to release block with invalid size: %p size=%" G_GSIZE_FORMAT " invalid-size=%" G_GSIZE_FORMAT "\n", pointer, real_size, size);
1509 if (!smc_tree_remove (adress))
1511 fprintf (stderr, "GSlice: MemChecker: attempt to release non-allocated block: %p size=%" G_GSIZE_FORMAT "\n", pointer, size);
1514 return 1; /* all fine */
1517 /* --- g-slice memory checker tree implementation --- */
1518 #define SMC_TRUNK_COUNT (4093 /* 16381 */) /* prime, to distribute trunk collisions (big, allocated just once) */
1519 #define SMC_BRANCH_COUNT (511) /* prime, to distribute branch collisions */
1520 #define SMC_TRUNK_EXTENT (SMC_BRANCH_COUNT * 2039) /* key address space per trunk, should distribute uniformly across BRANCH_COUNT */
1521 #define SMC_TRUNK_HASH(k) ((k / SMC_TRUNK_EXTENT) % SMC_TRUNK_COUNT) /* generate new trunk hash per megabyte (roughly) */
1522 #define SMC_BRANCH_HASH(k) (k % SMC_BRANCH_COUNT)
1526 unsigned int n_entries;
1529 static SmcBranch **smc_tree_root = NULL;
1532 smc_tree_abort (int errval)
1534 const char *syserr = strerror (errval);
1535 mem_error ("MemChecker: failure in debugging tree: %s", syserr);
1538 static inline SmcEntry*
1539 smc_tree_branch_grow_L (SmcBranch *branch,
1542 unsigned int old_size = branch->n_entries * sizeof (branch->entries[0]);
1543 unsigned int new_size = old_size + sizeof (branch->entries[0]);
1545 mem_assert (index <= branch->n_entries);
1546 branch->entries = (SmcEntry*) realloc (branch->entries, new_size);
1547 if (!branch->entries)
1548 smc_tree_abort (errno);
1549 entry = branch->entries + index;
1550 memmove (entry + 1, entry, (branch->n_entries - index) * sizeof (entry[0]));
1551 branch->n_entries += 1;
1555 static inline SmcEntry*
1556 smc_tree_branch_lookup_nearest_L (SmcBranch *branch,
1559 unsigned int n_nodes = branch->n_entries, offs = 0;
1560 SmcEntry *check = branch->entries;
1562 while (offs < n_nodes)
1564 unsigned int i = (offs + n_nodes) >> 1;
1565 check = branch->entries + i;
1566 cmp = key < check->key ? -1 : key != check->key;
1568 return check; /* return exact match */
1571 else /* (cmp > 0) */
1574 /* check points at last mismatch, cmp > 0 indicates greater key */
1575 return cmp > 0 ? check + 1 : check; /* return insertion position for inexact match */
1579 smc_tree_insert (SmcKType key,
1582 unsigned int ix0, ix1;
1585 g_mutex_lock (&smc_tree_mutex);
1586 ix0 = SMC_TRUNK_HASH (key);
1587 ix1 = SMC_BRANCH_HASH (key);
1590 smc_tree_root = calloc (SMC_TRUNK_COUNT, sizeof (smc_tree_root[0]));
1592 smc_tree_abort (errno);
1594 if (!smc_tree_root[ix0])
1596 smc_tree_root[ix0] = calloc (SMC_BRANCH_COUNT, sizeof (smc_tree_root[0][0]));
1597 if (!smc_tree_root[ix0])
1598 smc_tree_abort (errno);
1600 entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1601 if (!entry || /* need create */
1602 entry >= smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries || /* need append */
1603 entry->key != key) /* need insert */
1604 entry = smc_tree_branch_grow_L (&smc_tree_root[ix0][ix1], entry - smc_tree_root[ix0][ix1].entries);
1606 entry->value = value;
1607 g_mutex_unlock (&smc_tree_mutex);
1611 smc_tree_lookup (SmcKType key,
1614 SmcEntry *entry = NULL;
1615 unsigned int ix0 = SMC_TRUNK_HASH (key), ix1 = SMC_BRANCH_HASH (key);
1616 gboolean found_one = FALSE;
1618 g_mutex_lock (&smc_tree_mutex);
1619 if (smc_tree_root && smc_tree_root[ix0])
1621 entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1623 entry < smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries &&
1627 *value_p = entry->value;
1630 g_mutex_unlock (&smc_tree_mutex);
1635 smc_tree_remove (SmcKType key)
1637 unsigned int ix0 = SMC_TRUNK_HASH (key), ix1 = SMC_BRANCH_HASH (key);
1638 gboolean found_one = FALSE;
1639 g_mutex_lock (&smc_tree_mutex);
1640 if (smc_tree_root && smc_tree_root[ix0])
1642 SmcEntry *entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1644 entry < smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries &&
1647 unsigned int i = entry - smc_tree_root[ix0][ix1].entries;
1648 smc_tree_root[ix0][ix1].n_entries -= 1;
1649 memmove (entry, entry + 1, (smc_tree_root[ix0][ix1].n_entries - i) * sizeof (entry[0]));
1650 if (!smc_tree_root[ix0][ix1].n_entries)
1652 /* avoid useless pressure on the memory system */
1653 free (smc_tree_root[ix0][ix1].entries);
1654 smc_tree_root[ix0][ix1].entries = NULL;
1659 g_mutex_unlock (&smc_tree_mutex);
1663 #ifdef G_ENABLE_DEBUG
1665 g_slice_debug_tree_statistics (void)
1667 g_mutex_lock (&smc_tree_mutex);
1670 unsigned int i, j, t = 0, o = 0, b = 0, su = 0, ex = 0, en = 4294967295u;
1672 for (i = 0; i < SMC_TRUNK_COUNT; i++)
1673 if (smc_tree_root[i])
1676 for (j = 0; j < SMC_BRANCH_COUNT; j++)
1677 if (smc_tree_root[i][j].n_entries)
1680 su += smc_tree_root[i][j].n_entries;
1681 en = MIN (en, smc_tree_root[i][j].n_entries);
1682 ex = MAX (ex, smc_tree_root[i][j].n_entries);
1684 else if (smc_tree_root[i][j].entries)
1685 o++; /* formerly used, now empty */
1688 tf = MAX (t, 1.0); /* max(1) to be a valid divisor */
1689 bf = MAX (b, 1.0); /* max(1) to be a valid divisor */
1690 fprintf (stderr, "GSlice: MemChecker: %u trunks, %u branches, %u old branches\n", t, b, o);
1691 fprintf (stderr, "GSlice: MemChecker: %f branches per trunk, %.2f%% utilization\n",
1693 100.0 - (SMC_BRANCH_COUNT - b / tf) / (0.01 * SMC_BRANCH_COUNT));
1694 fprintf (stderr, "GSlice: MemChecker: %f entries per branch, %u minimum, %u maximum\n",
1698 fprintf (stderr, "GSlice: MemChecker: root=NULL\n");
1699 g_mutex_unlock (&smc_tree_mutex);
1701 /* sample statistics (beast + GSLice + 24h scripted core & GUI activity):
1702 * PID %CPU %MEM VSZ RSS COMMAND
1703 * 8887 30.3 45.8 456068 414856 beast-0.7.1 empty.bse
1704 * $ cat /proc/8887/statm # total-program-size resident-set-size shared-pages text/code data/stack library dirty-pages
1705 * 114017 103714 2354 344 0 108676 0
1706 * $ cat /proc/8887/status
1717 * (gdb) print g_slice_debug_tree_statistics ()
1718 * GSlice: MemChecker: 422 trunks, 213068 branches, 0 old branches
1719 * GSlice: MemChecker: 504.900474 branches per trunk, 98.81% utilization
1720 * GSlice: MemChecker: 4.965039 entries per branch, 1 minimum, 37 maximum
1723 #endif /* G_ENABLE_DEBUG */