1 /* GLIB sliced memory - fast concurrent memory chunk allocator
2 * Copyright (C) 2005 Tim Janik
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
25 #if defined HAVE_POSIX_MEMALIGN && defined POSIX_MEMALIGN_WITH_COMPLIANT_ALLOCS
26 # define HAVE_COMPLIANT_POSIX_MEMALIGN 1
29 #ifdef HAVE_COMPLIANT_POSIX_MEMALIGN
30 #define _XOPEN_SOURCE 600 /* posix_memalign() */
32 #include <stdlib.h> /* posix_memalign() */
35 #include "gmem.h" /* gslice.h */
36 #include "gthreadprivate.h"
38 #include "glib_trace.h"
41 #include <unistd.h> /* sysconf() */
48 #include <stdio.h> /* fputs/fprintf */
51 /* the GSlice allocator is split up into 4 layers, roughly modelled after the slab
52 * allocator and magazine extensions as outlined in:
53 * + [Bonwick94] Jeff Bonwick, The slab allocator: An object-caching kernel
54 * memory allocator. USENIX 1994, http://citeseer.ist.psu.edu/bonwick94slab.html
55 * + [Bonwick01] Bonwick and Jonathan Adams, Magazines and vmem: Extending the
56 * slab allocator to many cpu's and arbitrary resources.
57 * USENIX 2001, http://citeseer.ist.psu.edu/bonwick01magazines.html
59 * - the thread magazines. for each (aligned) chunk size, a magazine (a list)
60 * of recently freed and soon to be allocated chunks is maintained per thread.
61 * this way, most alloc/free requests can be quickly satisfied from per-thread
62 * free lists which only require one g_private_get() call to retrive the
64 * - the magazine cache. allocating and freeing chunks to/from threads only
65 * occours at magazine sizes from a global depot of magazines. the depot
66 * maintaines a 15 second working set of allocated magazines, so full
67 * magazines are not allocated and released too often.
68 * the chunk size dependent magazine sizes automatically adapt (within limits,
69 * see [3]) to lock contention to properly scale performance across a variety
71 * - the slab allocator. this allocator allocates slabs (blocks of memory) close
72 * to the system page size or multiples thereof which have to be page aligned.
73 * the blocks are divided into smaller chunks which are used to satisfy
74 * allocations from the upper layers. the space provided by the reminder of
75 * the chunk size division is used for cache colorization (random distribution
76 * of chunk addresses) to improve processor cache utilization. multiple slabs
77 * with the same chunk size are kept in a partially sorted ring to allow O(1)
78 * freeing and allocation of chunks (as long as the allocation of an entirely
79 * new slab can be avoided).
80 * - the page allocator. on most modern systems, posix_memalign(3) or
81 * memalign(3) should be available, so this is used to allocate blocks with
82 * system page size based alignments and sizes or multiples thereof.
83 * if no memalign variant is provided, valloc() is used instead and
84 * block sizes are limited to the system page size (no multiples thereof).
85 * as a fallback, on system without even valloc(), a malloc(3)-based page
86 * allocator with alloc-only behaviour is used.
89 * [1] some systems memalign(3) implementations may rely on boundary tagging for
90 * the handed out memory chunks. to avoid excessive page-wise fragmentation,
91 * we reserve 2 * sizeof (void*) per block size for the systems memalign(3),
92 * specified in NATIVE_MALLOC_PADDING.
93 * [2] using the slab allocator alone already provides for a fast and efficient
94 * allocator, it doesn't properly scale beyond single-threaded uses though.
95 * also, the slab allocator implements eager free(3)-ing, i.e. does not
96 * provide any form of caching or working set maintenance. so if used alone,
97 * it's vulnerable to trashing for sequences of balanced (alloc, free) pairs
98 * at certain thresholds.
99 * [3] magazine sizes are bound by an implementation specific minimum size and
100 * a chunk size specific maximum to limit magazine storage sizes to roughly
102 * [4] allocating ca. 8 chunks per block/page keeps a good balance between
103 * external and internal fragmentation (<= 12.5%). [Bonwick94]
106 /* --- macros and constants --- */
107 #define LARGEALIGNMENT (256)
108 #define P2ALIGNMENT (2 * sizeof (gsize)) /* fits 2 pointers (assumed to be 2 * GLIB_SIZEOF_SIZE_T below) */
109 #define ALIGN(size, base) ((base) * (gsize) (((size) + (base) - 1) / (base)))
110 #define NATIVE_MALLOC_PADDING P2ALIGNMENT /* per-page padding left for native malloc(3) see [1] */
111 #define SLAB_INFO_SIZE P2ALIGN (sizeof (SlabInfo) + NATIVE_MALLOC_PADDING)
112 #define MAX_MAGAZINE_SIZE (256) /* see [3] and allocator_get_magazine_threshold() for this */
113 #define MIN_MAGAZINE_SIZE (4)
114 #define MAX_STAMP_COUNTER (7) /* distributes the load of gettimeofday() */
115 #define MAX_SLAB_CHUNK_SIZE(al) (((al)->max_page_size - SLAB_INFO_SIZE) / 8) /* we want at last 8 chunks per page, see [4] */
116 #define MAX_SLAB_INDEX(al) (SLAB_INDEX (al, MAX_SLAB_CHUNK_SIZE (al)) + 1)
117 #define SLAB_INDEX(al, asize) ((asize) / P2ALIGNMENT - 1) /* asize must be P2ALIGNMENT aligned */
118 #define SLAB_CHUNK_SIZE(al, ix) (((ix) + 1) * P2ALIGNMENT)
119 #define SLAB_BPAGE_SIZE(al,csz) (8 * (csz) + SLAB_INFO_SIZE)
121 /* optimized version of ALIGN (size, P2ALIGNMENT) */
122 #if GLIB_SIZEOF_SIZE_T * 2 == 8 /* P2ALIGNMENT */
123 #define P2ALIGN(size) (((size) + 0x7) & ~(gsize) 0x7)
124 #elif GLIB_SIZEOF_SIZE_T * 2 == 16 /* P2ALIGNMENT */
125 #define P2ALIGN(size) (((size) + 0xf) & ~(gsize) 0xf)
127 #define P2ALIGN(size) ALIGN (size, P2ALIGNMENT)
130 /* special helpers to avoid gmessage.c dependency */
131 static void mem_error (const char *format, ...) G_GNUC_PRINTF (1,2);
132 #define mem_assert(cond) do { if (G_LIKELY (cond)) ; else mem_error ("assertion failed: %s", #cond); } while (0)
134 /* --- structures --- */
135 typedef struct _ChunkLink ChunkLink;
136 typedef struct _SlabInfo SlabInfo;
137 typedef struct _CachedMagazine CachedMagazine;
145 SlabInfo *next, *prev;
149 gsize count; /* approximative chunks list length */
152 Magazine *magazine1; /* array of MAX_SLAB_INDEX (allocator) */
153 Magazine *magazine2; /* array of MAX_SLAB_INDEX (allocator) */
156 gboolean always_malloc;
157 gboolean bypass_magazines;
158 gboolean debug_blocks;
159 gsize working_set_msecs;
160 guint color_increment;
163 /* const after initialization */
164 gsize min_page_size, max_page_size;
166 gsize max_slab_chunk_size_for_magazine_cache;
168 GMutex *magazine_mutex;
169 ChunkLink **magazines; /* array of MAX_SLAB_INDEX (allocator) */
170 guint *contention_counters; /* array of MAX_SLAB_INDEX (allocator) */
176 SlabInfo **slab_stack; /* array of MAX_SLAB_INDEX (allocator) */
180 /* --- g-slice prototypes --- */
181 static gpointer slab_allocator_alloc_chunk (gsize chunk_size);
182 static void slab_allocator_free_chunk (gsize chunk_size,
184 static void private_thread_memory_cleanup (gpointer data);
185 static gpointer allocator_memalign (gsize alignment,
187 static void allocator_memfree (gsize memsize,
189 static inline void magazine_cache_update_stamp (void);
190 static inline gsize allocator_get_magazine_threshold (Allocator *allocator,
193 /* --- g-slice memory checker --- */
194 static void smc_notify_alloc (void *pointer,
196 static int smc_notify_free (void *pointer,
199 /* --- variables --- */
200 static GPrivate *private_thread_memory = NULL;
201 static gsize sys_page_size = 0;
202 static Allocator allocator[1] = { { 0, }, };
203 static SliceConfig slice_config = {
204 FALSE, /* always_malloc */
205 FALSE, /* bypass_magazines */
206 FALSE, /* debug_blocks */
207 15 * 1000, /* working_set_msecs */
208 1, /* color increment, alt: 0x7fffffff */
210 static GMutex *smc_tree_mutex = NULL; /* mutex for G_SLICE=debug-blocks */
212 /* --- auxillary funcitons --- */
214 g_slice_set_config (GSliceConfig ckey,
217 g_return_if_fail (sys_page_size == 0);
220 case G_SLICE_CONFIG_ALWAYS_MALLOC:
221 slice_config.always_malloc = value != 0;
223 case G_SLICE_CONFIG_BYPASS_MAGAZINES:
224 slice_config.bypass_magazines = value != 0;
226 case G_SLICE_CONFIG_WORKING_SET_MSECS:
227 slice_config.working_set_msecs = value;
229 case G_SLICE_CONFIG_COLOR_INCREMENT:
230 slice_config.color_increment = value;
236 g_slice_get_config (GSliceConfig ckey)
240 case G_SLICE_CONFIG_ALWAYS_MALLOC:
241 return slice_config.always_malloc;
242 case G_SLICE_CONFIG_BYPASS_MAGAZINES:
243 return slice_config.bypass_magazines;
244 case G_SLICE_CONFIG_WORKING_SET_MSECS:
245 return slice_config.working_set_msecs;
246 case G_SLICE_CONFIG_CHUNK_SIZES:
247 return MAX_SLAB_INDEX (allocator);
248 case G_SLICE_CONFIG_COLOR_INCREMENT:
249 return slice_config.color_increment;
256 g_slice_get_config_state (GSliceConfig ckey,
261 g_return_val_if_fail (n_values != NULL, NULL);
266 case G_SLICE_CONFIG_CONTENTION_COUNTER:
267 array[i++] = SLAB_CHUNK_SIZE (allocator, address);
268 array[i++] = allocator->contention_counters[address];
269 array[i++] = allocator_get_magazine_threshold (allocator, address);
271 return g_memdup (array, sizeof (array[0]) * *n_values);
278 slice_config_init (SliceConfig *config)
280 /* don't use g_malloc/g_message here */
282 const gchar *val = _g_getenv_nomalloc ("G_SLICE", buffer);
283 const GDebugKey keys[] = {
284 { "always-malloc", 1 << 0 },
285 { "debug-blocks", 1 << 1 },
287 gint flags = !val ? 0 : g_parse_debug_string (val, keys, G_N_ELEMENTS (keys));
288 *config = slice_config;
289 if (flags & (1 << 0)) /* always-malloc */
290 config->always_malloc = TRUE;
291 if (flags & (1 << 1)) /* debug-blocks */
292 config->debug_blocks = TRUE;
296 g_slice_init_nomessage (void)
298 /* we may not use g_error() or friends here */
299 mem_assert (sys_page_size == 0);
300 mem_assert (MIN_MAGAZINE_SIZE >= 4);
304 SYSTEM_INFO system_info;
305 GetSystemInfo (&system_info);
306 sys_page_size = system_info.dwPageSize;
309 sys_page_size = sysconf (_SC_PAGESIZE); /* = sysconf (_SC_PAGE_SIZE); = getpagesize(); */
311 mem_assert (sys_page_size >= 2 * LARGEALIGNMENT);
312 mem_assert ((sys_page_size & (sys_page_size - 1)) == 0);
313 slice_config_init (&allocator->config);
314 allocator->min_page_size = sys_page_size;
315 #if HAVE_COMPLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN
316 /* allow allocation of pages up to 8KB (with 8KB alignment).
317 * this is useful because many medium to large sized structures
318 * fit less than 8 times (see [4]) into 4KB pages.
319 * we allow very small page sizes here, to reduce wastage in
320 * threads if only small allocations are required (this does
321 * bear the risk of incresing allocation times and fragmentation
324 allocator->min_page_size = MAX (allocator->min_page_size, 4096);
325 allocator->max_page_size = MAX (allocator->min_page_size, 8192);
326 allocator->min_page_size = MIN (allocator->min_page_size, 128);
328 /* we can only align to system page size */
329 allocator->max_page_size = sys_page_size;
331 if (allocator->config.always_malloc)
333 allocator->contention_counters = NULL;
334 allocator->magazines = NULL;
335 allocator->slab_stack = NULL;
339 allocator->contention_counters = g_new0 (guint, MAX_SLAB_INDEX (allocator));
340 allocator->magazines = g_new0 (ChunkLink*, MAX_SLAB_INDEX (allocator));
341 allocator->slab_stack = g_new0 (SlabInfo*, MAX_SLAB_INDEX (allocator));
344 allocator->magazine_mutex = NULL; /* _g_slice_thread_init_nomessage() */
345 allocator->mutex_counter = 0;
346 allocator->stamp_counter = MAX_STAMP_COUNTER; /* force initial update */
347 allocator->last_stamp = 0;
348 allocator->slab_mutex = NULL; /* _g_slice_thread_init_nomessage() */
349 allocator->color_accu = 0;
350 magazine_cache_update_stamp();
351 /* values cached for performance reasons */
352 allocator->max_slab_chunk_size_for_magazine_cache = MAX_SLAB_CHUNK_SIZE (allocator);
353 if (allocator->config.always_malloc || allocator->config.bypass_magazines)
354 allocator->max_slab_chunk_size_for_magazine_cache = 0; /* non-optimized cases */
355 /* at this point, g_mem_gc_friendly() should be initialized, this
356 * should have been accomplished by the above g_malloc/g_new calls
361 allocator_categorize (gsize aligned_chunk_size)
363 /* speed up the likely path */
364 if (G_LIKELY (aligned_chunk_size && aligned_chunk_size <= allocator->max_slab_chunk_size_for_magazine_cache))
365 return 1; /* use magazine cache */
367 /* the above will fail (max_slab_chunk_size_for_magazine_cache == 0) if the
368 * allocator is still uninitialized, or if we are not configured to use the
372 g_slice_init_nomessage ();
373 if (!allocator->config.always_malloc &&
374 aligned_chunk_size &&
375 aligned_chunk_size <= MAX_SLAB_CHUNK_SIZE (allocator))
377 if (allocator->config.bypass_magazines)
378 return 2; /* use slab allocator, see [2] */
379 return 1; /* use magazine cache */
381 return 0; /* use malloc() */
385 _g_slice_thread_init_nomessage (void)
387 /* we may not use g_error() or friends here */
389 g_slice_init_nomessage();
392 /* g_slice_init_nomessage() has been called already, probably due
393 * to a g_slice_alloc1() before g_thread_init().
396 private_thread_memory = g_private_new (private_thread_memory_cleanup);
397 allocator->magazine_mutex = g_mutex_new();
398 allocator->slab_mutex = g_mutex_new();
399 if (allocator->config.debug_blocks)
400 smc_tree_mutex = g_mutex_new();
404 g_mutex_lock_a (GMutex *mutex,
405 guint *contention_counter)
407 gboolean contention = FALSE;
408 if (!g_mutex_trylock (mutex))
410 g_mutex_lock (mutex);
415 allocator->mutex_counter++;
416 if (allocator->mutex_counter >= 1) /* quickly adapt to contention */
418 allocator->mutex_counter = 0;
419 *contention_counter = MIN (*contention_counter + 1, MAX_MAGAZINE_SIZE);
422 else /* !contention */
424 allocator->mutex_counter--;
425 if (allocator->mutex_counter < -11) /* moderately recover magazine sizes */
427 allocator->mutex_counter = 0;
428 *contention_counter = MAX (*contention_counter, 1) - 1;
433 static inline ThreadMemory*
434 thread_memory_from_self (void)
436 ThreadMemory *tmem = g_private_get (private_thread_memory);
437 if (G_UNLIKELY (!tmem))
439 static ThreadMemory *single_thread_memory = NULL; /* remember single-thread info for multi-threaded case */
440 if (single_thread_memory && g_thread_supported ())
442 g_mutex_lock (allocator->slab_mutex);
443 if (single_thread_memory)
445 /* GSlice has been used before g_thread_init(), and now
446 * we are running threaded. to cope with it, use the saved
447 * thread memory structure from when we weren't threaded.
449 tmem = single_thread_memory;
450 single_thread_memory = NULL; /* slab_mutex protected when multi-threaded */
452 g_mutex_unlock (allocator->slab_mutex);
456 const guint n_magazines = MAX_SLAB_INDEX (allocator);
457 tmem = g_malloc0 (sizeof (ThreadMemory) + sizeof (Magazine) * 2 * n_magazines);
458 tmem->magazine1 = (Magazine*) (tmem + 1);
459 tmem->magazine2 = &tmem->magazine1[n_magazines];
461 /* g_private_get/g_private_set works in the single-threaded xor the multi-
462 * threaded case. but not *across* g_thread_init(), after multi-thread
463 * initialization it returns NULL for previously set single-thread data.
465 g_private_set (private_thread_memory, tmem);
466 /* save single-thread thread memory structure, in case we need to
467 * pick it up again after multi-thread initialization happened.
469 if (!single_thread_memory && !g_thread_supported ())
470 single_thread_memory = tmem; /* no slab_mutex created yet */
475 static inline ChunkLink*
476 magazine_chain_pop_head (ChunkLink **magazine_chunks)
478 /* magazine chains are linked via ChunkLink->next.
479 * each ChunkLink->data of the toplevel chain may point to a subchain,
480 * linked via ChunkLink->next. ChunkLink->data of the subchains just
481 * contains uninitialized junk.
483 ChunkLink *chunk = (*magazine_chunks)->data;
484 if (G_UNLIKELY (chunk))
486 /* allocating from freed list */
487 (*magazine_chunks)->data = chunk->next;
491 chunk = *magazine_chunks;
492 *magazine_chunks = chunk->next;
497 #if 0 /* useful for debugging */
499 magazine_count (ChunkLink *head)
506 ChunkLink *child = head->data;
508 for (child = head->data; child; child = child->next)
517 allocator_get_magazine_threshold (Allocator *allocator,
520 /* the magazine size calculated here has a lower bound of MIN_MAGAZINE_SIZE,
521 * which is required by the implementation. also, for moderately sized chunks
522 * (say >= 64 bytes), magazine sizes shouldn't be much smaller then the number
523 * of chunks available per page/2 to avoid excessive traffic in the magazine
524 * cache for small to medium sized structures.
525 * the upper bound of the magazine size is effectively provided by
526 * MAX_MAGAZINE_SIZE. for larger chunks, this number is scaled down so that
527 * the content of a single magazine doesn't exceed ca. 16KB.
529 gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
530 guint threshold = MAX (MIN_MAGAZINE_SIZE, allocator->max_page_size / MAX (5 * chunk_size, 5 * 32));
531 guint contention_counter = allocator->contention_counters[ix];
532 if (G_UNLIKELY (contention_counter)) /* single CPU bias */
534 /* adapt contention counter thresholds to chunk sizes */
535 contention_counter = contention_counter * 64 / chunk_size;
536 threshold = MAX (threshold, contention_counter);
541 /* --- magazine cache --- */
543 magazine_cache_update_stamp (void)
545 if (allocator->stamp_counter >= MAX_STAMP_COUNTER)
548 g_get_current_time (&tv);
549 allocator->last_stamp = tv.tv_sec * 1000 + tv.tv_usec / 1000; /* milli seconds */
550 allocator->stamp_counter = 0;
553 allocator->stamp_counter++;
556 static inline ChunkLink*
557 magazine_chain_prepare_fields (ChunkLink *magazine_chunks)
563 /* checked upon initialization: mem_assert (MIN_MAGAZINE_SIZE >= 4); */
564 /* ensure a magazine with at least 4 unused data pointers */
565 chunk1 = magazine_chain_pop_head (&magazine_chunks);
566 chunk2 = magazine_chain_pop_head (&magazine_chunks);
567 chunk3 = magazine_chain_pop_head (&magazine_chunks);
568 chunk4 = magazine_chain_pop_head (&magazine_chunks);
569 chunk4->next = magazine_chunks;
570 chunk3->next = chunk4;
571 chunk2->next = chunk3;
572 chunk1->next = chunk2;
576 /* access the first 3 fields of a specially prepared magazine chain */
577 #define magazine_chain_prev(mc) ((mc)->data)
578 #define magazine_chain_stamp(mc) ((mc)->next->data)
579 #define magazine_chain_uint_stamp(mc) GPOINTER_TO_UINT ((mc)->next->data)
580 #define magazine_chain_next(mc) ((mc)->next->next->data)
581 #define magazine_chain_count(mc) ((mc)->next->next->next->data)
584 magazine_cache_trim (Allocator *allocator,
588 /* g_mutex_lock (allocator->mutex); done by caller */
589 /* trim magazine cache from tail */
590 ChunkLink *current = magazine_chain_prev (allocator->magazines[ix]);
591 ChunkLink *trash = NULL;
592 while (ABS (stamp - magazine_chain_uint_stamp (current)) >= allocator->config.working_set_msecs)
595 ChunkLink *prev = magazine_chain_prev (current);
596 ChunkLink *next = magazine_chain_next (current);
597 magazine_chain_next (prev) = next;
598 magazine_chain_prev (next) = prev;
599 /* clear special fields, put on trash stack */
600 magazine_chain_next (current) = NULL;
601 magazine_chain_count (current) = NULL;
602 magazine_chain_stamp (current) = NULL;
603 magazine_chain_prev (current) = trash;
605 /* fixup list head if required */
606 if (current == allocator->magazines[ix])
608 allocator->magazines[ix] = NULL;
613 g_mutex_unlock (allocator->magazine_mutex);
617 const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
618 g_mutex_lock (allocator->slab_mutex);
622 trash = magazine_chain_prev (current);
623 magazine_chain_prev (current) = NULL; /* clear special field */
626 ChunkLink *chunk = magazine_chain_pop_head (¤t);
627 slab_allocator_free_chunk (chunk_size, chunk);
630 g_mutex_unlock (allocator->slab_mutex);
635 magazine_cache_push_magazine (guint ix,
636 ChunkLink *magazine_chunks,
637 gsize count) /* must be >= MIN_MAGAZINE_SIZE */
639 ChunkLink *current = magazine_chain_prepare_fields (magazine_chunks);
640 ChunkLink *next, *prev;
641 g_mutex_lock (allocator->magazine_mutex);
642 /* add magazine at head */
643 next = allocator->magazines[ix];
645 prev = magazine_chain_prev (next);
647 next = prev = current;
648 magazine_chain_next (prev) = current;
649 magazine_chain_prev (next) = current;
650 magazine_chain_prev (current) = prev;
651 magazine_chain_next (current) = next;
652 magazine_chain_count (current) = (gpointer) count;
654 magazine_cache_update_stamp();
655 magazine_chain_stamp (current) = GUINT_TO_POINTER (allocator->last_stamp);
656 allocator->magazines[ix] = current;
657 /* free old magazines beyond a certain threshold */
658 magazine_cache_trim (allocator, ix, allocator->last_stamp);
659 /* g_mutex_unlock (allocator->mutex); was done by magazine_cache_trim() */
663 magazine_cache_pop_magazine (guint ix,
666 g_mutex_lock_a (allocator->magazine_mutex, &allocator->contention_counters[ix]);
667 if (!allocator->magazines[ix])
669 guint magazine_threshold = allocator_get_magazine_threshold (allocator, ix);
670 gsize i, chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
671 ChunkLink *chunk, *head;
672 g_mutex_unlock (allocator->magazine_mutex);
673 g_mutex_lock (allocator->slab_mutex);
674 head = slab_allocator_alloc_chunk (chunk_size);
677 for (i = 1; i < magazine_threshold; i++)
679 chunk->next = slab_allocator_alloc_chunk (chunk_size);
684 g_mutex_unlock (allocator->slab_mutex);
690 ChunkLink *current = allocator->magazines[ix];
691 ChunkLink *prev = magazine_chain_prev (current);
692 ChunkLink *next = magazine_chain_next (current);
694 magazine_chain_next (prev) = next;
695 magazine_chain_prev (next) = prev;
696 allocator->magazines[ix] = next == current ? NULL : next;
697 g_mutex_unlock (allocator->magazine_mutex);
698 /* clear special fields and hand out */
699 *countp = (gsize) magazine_chain_count (current);
700 magazine_chain_prev (current) = NULL;
701 magazine_chain_next (current) = NULL;
702 magazine_chain_count (current) = NULL;
703 magazine_chain_stamp (current) = NULL;
708 /* --- thread magazines --- */
710 private_thread_memory_cleanup (gpointer data)
712 ThreadMemory *tmem = data;
713 const guint n_magazines = MAX_SLAB_INDEX (allocator);
715 for (ix = 0; ix < n_magazines; ix++)
719 mags[0] = &tmem->magazine1[ix];
720 mags[1] = &tmem->magazine2[ix];
721 for (j = 0; j < 2; j++)
723 Magazine *mag = mags[j];
724 if (mag->count >= MIN_MAGAZINE_SIZE)
725 magazine_cache_push_magazine (ix, mag->chunks, mag->count);
728 const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
729 g_mutex_lock (allocator->slab_mutex);
732 ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
733 slab_allocator_free_chunk (chunk_size, chunk);
735 g_mutex_unlock (allocator->slab_mutex);
743 thread_memory_magazine1_reload (ThreadMemory *tmem,
746 Magazine *mag = &tmem->magazine1[ix];
747 mem_assert (mag->chunks == NULL); /* ensure that we may reset mag->count */
749 mag->chunks = magazine_cache_pop_magazine (ix, &mag->count);
753 thread_memory_magazine2_unload (ThreadMemory *tmem,
756 Magazine *mag = &tmem->magazine2[ix];
757 magazine_cache_push_magazine (ix, mag->chunks, mag->count);
763 thread_memory_swap_magazines (ThreadMemory *tmem,
766 Magazine xmag = tmem->magazine1[ix];
767 tmem->magazine1[ix] = tmem->magazine2[ix];
768 tmem->magazine2[ix] = xmag;
771 static inline gboolean
772 thread_memory_magazine1_is_empty (ThreadMemory *tmem,
775 return tmem->magazine1[ix].chunks == NULL;
778 static inline gboolean
779 thread_memory_magazine2_is_full (ThreadMemory *tmem,
782 return tmem->magazine2[ix].count >= allocator_get_magazine_threshold (allocator, ix);
785 static inline gpointer
786 thread_memory_magazine1_alloc (ThreadMemory *tmem,
789 Magazine *mag = &tmem->magazine1[ix];
790 ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
791 if (G_LIKELY (mag->count > 0))
797 thread_memory_magazine2_free (ThreadMemory *tmem,
801 Magazine *mag = &tmem->magazine2[ix];
802 ChunkLink *chunk = mem;
804 chunk->next = mag->chunks;
809 /* --- API functions --- */
811 g_slice_alloc (gsize mem_size)
816 chunk_size = P2ALIGN (mem_size);
817 acat = allocator_categorize (chunk_size);
818 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
820 ThreadMemory *tmem = thread_memory_from_self();
821 guint ix = SLAB_INDEX (allocator, chunk_size);
822 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
824 thread_memory_swap_magazines (tmem, ix);
825 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
826 thread_memory_magazine1_reload (tmem, ix);
828 mem = thread_memory_magazine1_alloc (tmem, ix);
830 else if (acat == 2) /* allocate through slab allocator */
832 g_mutex_lock (allocator->slab_mutex);
833 mem = slab_allocator_alloc_chunk (chunk_size);
834 g_mutex_unlock (allocator->slab_mutex);
836 else /* delegate to system malloc */
837 mem = g_malloc (mem_size);
838 if (G_UNLIKELY (allocator->config.debug_blocks))
839 smc_notify_alloc (mem, mem_size);
841 TRACE (GLIB_SLICE_ALLOC((void*)mem, mem_size));
847 g_slice_alloc0 (gsize mem_size)
849 gpointer mem = g_slice_alloc (mem_size);
851 memset (mem, 0, mem_size);
856 g_slice_copy (gsize mem_size,
857 gconstpointer mem_block)
859 gpointer mem = g_slice_alloc (mem_size);
861 memcpy (mem, mem_block, mem_size);
866 g_slice_free1 (gsize mem_size,
869 gsize chunk_size = P2ALIGN (mem_size);
870 guint acat = allocator_categorize (chunk_size);
871 if (G_UNLIKELY (!mem_block))
873 if (G_UNLIKELY (allocator->config.debug_blocks) &&
874 !smc_notify_free (mem_block, mem_size))
876 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
878 ThreadMemory *tmem = thread_memory_from_self();
879 guint ix = SLAB_INDEX (allocator, chunk_size);
880 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
882 thread_memory_swap_magazines (tmem, ix);
883 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
884 thread_memory_magazine2_unload (tmem, ix);
886 if (G_UNLIKELY (g_mem_gc_friendly))
887 memset (mem_block, 0, chunk_size);
888 thread_memory_magazine2_free (tmem, ix, mem_block);
890 else if (acat == 2) /* allocate through slab allocator */
892 if (G_UNLIKELY (g_mem_gc_friendly))
893 memset (mem_block, 0, chunk_size);
894 g_mutex_lock (allocator->slab_mutex);
895 slab_allocator_free_chunk (chunk_size, mem_block);
896 g_mutex_unlock (allocator->slab_mutex);
898 else /* delegate to system malloc */
900 if (G_UNLIKELY (g_mem_gc_friendly))
901 memset (mem_block, 0, mem_size);
904 TRACE (GLIB_SLICE_FREE((void*)mem_block, mem_size));
908 g_slice_free_chain_with_offset (gsize mem_size,
912 gpointer slice = mem_chain;
913 /* while the thread magazines and the magazine cache are implemented so that
914 * they can easily be extended to allow for free lists containing more free
915 * lists for the first level nodes, which would allow O(1) freeing in this
916 * function, the benefit of such an extension is questionable, because:
917 * - the magazine size counts will become mere lower bounds which confuses
918 * the code adapting to lock contention;
919 * - freeing a single node to the thread magazines is very fast, so this
920 * O(list_length) operation is multiplied by a fairly small factor;
921 * - memory usage histograms on larger applications seem to indicate that
922 * the amount of released multi node lists is negligible in comparison
923 * to single node releases.
924 * - the major performance bottle neck, namely g_private_get() or
925 * g_mutex_lock()/g_mutex_unlock() has already been moved out of the
926 * inner loop for freeing chained slices.
928 gsize chunk_size = P2ALIGN (mem_size);
929 guint acat = allocator_categorize (chunk_size);
930 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
932 ThreadMemory *tmem = thread_memory_from_self();
933 guint ix = SLAB_INDEX (allocator, chunk_size);
936 guint8 *current = slice;
937 slice = *(gpointer*) (current + next_offset);
938 if (G_UNLIKELY (allocator->config.debug_blocks) &&
939 !smc_notify_free (current, mem_size))
941 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
943 thread_memory_swap_magazines (tmem, ix);
944 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
945 thread_memory_magazine2_unload (tmem, ix);
947 if (G_UNLIKELY (g_mem_gc_friendly))
948 memset (current, 0, chunk_size);
949 thread_memory_magazine2_free (tmem, ix, current);
952 else if (acat == 2) /* allocate through slab allocator */
954 g_mutex_lock (allocator->slab_mutex);
957 guint8 *current = slice;
958 slice = *(gpointer*) (current + next_offset);
959 if (G_UNLIKELY (allocator->config.debug_blocks) &&
960 !smc_notify_free (current, mem_size))
962 if (G_UNLIKELY (g_mem_gc_friendly))
963 memset (current, 0, chunk_size);
964 slab_allocator_free_chunk (chunk_size, current);
966 g_mutex_unlock (allocator->slab_mutex);
968 else /* delegate to system malloc */
971 guint8 *current = slice;
972 slice = *(gpointer*) (current + next_offset);
973 if (G_UNLIKELY (allocator->config.debug_blocks) &&
974 !smc_notify_free (current, mem_size))
976 if (G_UNLIKELY (g_mem_gc_friendly))
977 memset (current, 0, mem_size);
982 /* --- single page allocator --- */
984 allocator_slab_stack_push (Allocator *allocator,
988 /* insert slab at slab ring head */
989 if (!allocator->slab_stack[ix])
996 SlabInfo *next = allocator->slab_stack[ix], *prev = next->prev;
1002 allocator->slab_stack[ix] = sinfo;
1006 allocator_aligned_page_size (Allocator *allocator,
1009 gsize val = 1 << g_bit_storage (n_bytes - 1);
1010 val = MAX (val, allocator->min_page_size);
1015 allocator_add_slab (Allocator *allocator,
1021 gsize addr, padding, n_chunks, color = 0;
1022 gsize page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
1023 /* allocate 1 page for the chunks and the slab */
1024 gpointer aligned_memory = allocator_memalign (page_size, page_size - NATIVE_MALLOC_PADDING);
1025 guint8 *mem = aligned_memory;
1029 const gchar *syserr = "unknown error";
1031 syserr = strerror (errno);
1033 mem_error ("failed to allocate %u bytes (alignment: %u): %s\n",
1034 (guint) (page_size - NATIVE_MALLOC_PADDING), (guint) page_size, syserr);
1036 /* mask page adress */
1037 addr = ((gsize) mem / page_size) * page_size;
1038 /* assert alignment */
1039 mem_assert (aligned_memory == (gpointer) addr);
1040 /* basic slab info setup */
1041 sinfo = (SlabInfo*) (mem + page_size - SLAB_INFO_SIZE);
1042 sinfo->n_allocated = 0;
1043 sinfo->chunks = NULL;
1044 /* figure cache colorization */
1045 n_chunks = ((guint8*) sinfo - mem) / chunk_size;
1046 padding = ((guint8*) sinfo - mem) - n_chunks * chunk_size;
1049 color = (allocator->color_accu * P2ALIGNMENT) % padding;
1050 allocator->color_accu += allocator->config.color_increment;
1052 /* add chunks to free list */
1053 chunk = (ChunkLink*) (mem + color);
1054 sinfo->chunks = chunk;
1055 for (i = 0; i < n_chunks - 1; i++)
1057 chunk->next = (ChunkLink*) ((guint8*) chunk + chunk_size);
1058 chunk = chunk->next;
1060 chunk->next = NULL; /* last chunk */
1061 /* add slab to slab ring */
1062 allocator_slab_stack_push (allocator, ix, sinfo);
1066 slab_allocator_alloc_chunk (gsize chunk_size)
1069 guint ix = SLAB_INDEX (allocator, chunk_size);
1070 /* ensure non-empty slab */
1071 if (!allocator->slab_stack[ix] || !allocator->slab_stack[ix]->chunks)
1072 allocator_add_slab (allocator, ix, chunk_size);
1073 /* allocate chunk */
1074 chunk = allocator->slab_stack[ix]->chunks;
1075 allocator->slab_stack[ix]->chunks = chunk->next;
1076 allocator->slab_stack[ix]->n_allocated++;
1077 /* rotate empty slabs */
1078 if (!allocator->slab_stack[ix]->chunks)
1079 allocator->slab_stack[ix] = allocator->slab_stack[ix]->next;
1084 slab_allocator_free_chunk (gsize chunk_size,
1089 guint ix = SLAB_INDEX (allocator, chunk_size);
1090 gsize page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
1091 gsize addr = ((gsize) mem / page_size) * page_size;
1092 /* mask page adress */
1093 guint8 *page = (guint8*) addr;
1094 SlabInfo *sinfo = (SlabInfo*) (page + page_size - SLAB_INFO_SIZE);
1095 /* assert valid chunk count */
1096 mem_assert (sinfo->n_allocated > 0);
1097 /* add chunk to free list */
1098 was_empty = sinfo->chunks == NULL;
1099 chunk = (ChunkLink*) mem;
1100 chunk->next = sinfo->chunks;
1101 sinfo->chunks = chunk;
1102 sinfo->n_allocated--;
1103 /* keep slab ring partially sorted, empty slabs at end */
1107 SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1110 if (allocator->slab_stack[ix] == sinfo)
1111 allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1112 /* insert slab at head */
1113 allocator_slab_stack_push (allocator, ix, sinfo);
1115 /* eagerly free complete unused slabs */
1116 if (!sinfo->n_allocated)
1119 SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1122 if (allocator->slab_stack[ix] == sinfo)
1123 allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1125 allocator_memfree (page_size, page);
1129 /* --- memalign implementation --- */
1130 #ifdef HAVE_MALLOC_H
1131 #include <malloc.h> /* memalign() */
1135 * define HAVE_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works, <stdlib.h>
1136 * define HAVE_COMPLIANT_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works for sizes != 2^n, <stdlib.h>
1137 * define HAVE_MEMALIGN 1 // if free(memalign(3)) works, <malloc.h>
1138 * define HAVE_VALLOC 1 // if free(valloc(3)) works, <stdlib.h> or <malloc.h>
1139 * if none is provided, we implement malloc(3)-based alloc-only page alignment
1142 #if !(HAVE_COMPLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC)
1143 static GTrashStack *compat_valloc_trash = NULL;
1147 allocator_memalign (gsize alignment,
1150 gpointer aligned_memory = NULL;
1152 #if HAVE_COMPLIANT_POSIX_MEMALIGN
1153 err = posix_memalign (&aligned_memory, alignment, memsize);
1156 aligned_memory = memalign (alignment, memsize);
1160 aligned_memory = valloc (memsize);
1163 /* simplistic non-freeing page allocator */
1164 mem_assert (alignment == sys_page_size);
1165 mem_assert (memsize <= sys_page_size);
1166 if (!compat_valloc_trash)
1168 const guint n_pages = 16;
1169 guint8 *mem = malloc (n_pages * sys_page_size);
1174 guint8 *amem = (guint8*) ALIGN ((gsize) mem, sys_page_size);
1176 i--; /* mem wasn't page aligned */
1178 g_trash_stack_push (&compat_valloc_trash, amem + i * sys_page_size);
1181 aligned_memory = g_trash_stack_pop (&compat_valloc_trash);
1183 if (!aligned_memory)
1185 return aligned_memory;
1189 allocator_memfree (gsize memsize,
1192 #if HAVE_COMPLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC
1195 mem_assert (memsize <= sys_page_size);
1196 g_trash_stack_push (&compat_valloc_trash, mem);
1201 mem_error (const char *format,
1206 /* at least, put out "MEMORY-ERROR", in case we segfault during the rest of the function */
1207 fputs ("\n***MEMORY-ERROR***: ", stderr);
1208 pname = g_get_prgname();
1209 fprintf (stderr, "%s[%ld]: GSlice: ", pname ? pname : "", (long)getpid());
1210 va_start (args, format);
1211 vfprintf (stderr, format, args);
1213 fputs ("\n", stderr);
1218 /* --- g-slice memory checker tree --- */
1219 typedef size_t SmcKType; /* key type */
1220 typedef size_t SmcVType; /* value type */
1225 static void smc_tree_insert (SmcKType key,
1227 static gboolean smc_tree_lookup (SmcKType key,
1229 static gboolean smc_tree_remove (SmcKType key);
1232 /* --- g-slice memory checker implementation --- */
1234 smc_notify_alloc (void *pointer,
1237 size_t adress = (size_t) pointer;
1239 smc_tree_insert (adress, size);
1244 smc_notify_ignore (void *pointer)
1246 size_t adress = (size_t) pointer;
1248 smc_tree_remove (adress);
1253 smc_notify_free (void *pointer,
1256 size_t adress = (size_t) pointer;
1261 return 1; /* ignore */
1262 found_one = smc_tree_lookup (adress, &real_size);
1265 fprintf (stderr, "GSlice: MemChecker: attempt to release non-allocated block: %p size=%" G_GSIZE_FORMAT "\n", pointer, size);
1268 if (real_size != size && (real_size || size))
1270 fprintf (stderr, "GSlice: MemChecker: attempt to release block with invalid size: %p size=%" G_GSIZE_FORMAT " invalid-size=%" G_GSIZE_FORMAT "\n", pointer, real_size, size);
1273 if (!smc_tree_remove (adress))
1275 fprintf (stderr, "GSlice: MemChecker: attempt to release non-allocated block: %p size=%" G_GSIZE_FORMAT "\n", pointer, size);
1278 return 1; /* all fine */
1281 /* --- g-slice memory checker tree implementation --- */
1282 #define SMC_TRUNK_COUNT (4093 /* 16381 */) /* prime, to distribute trunk collisions (big, allocated just once) */
1283 #define SMC_BRANCH_COUNT (511) /* prime, to distribute branch collisions */
1284 #define SMC_TRUNK_EXTENT (SMC_BRANCH_COUNT * 2039) /* key adress space per trunk, should distribute uniformly across BRANCH_COUNT */
1285 #define SMC_TRUNK_HASH(k) ((k / SMC_TRUNK_EXTENT) % SMC_TRUNK_COUNT) /* generate new trunk hash per megabyte (roughly) */
1286 #define SMC_BRANCH_HASH(k) (k % SMC_BRANCH_COUNT)
1290 unsigned int n_entries;
1293 static SmcBranch **smc_tree_root = NULL;
1296 smc_tree_abort (int errval)
1298 const char *syserr = "unknown error";
1300 syserr = strerror (errval);
1302 mem_error ("MemChecker: failure in debugging tree: %s", syserr);
1305 static inline SmcEntry*
1306 smc_tree_branch_grow_L (SmcBranch *branch,
1309 unsigned int old_size = branch->n_entries * sizeof (branch->entries[0]);
1310 unsigned int new_size = old_size + sizeof (branch->entries[0]);
1312 mem_assert (index <= branch->n_entries);
1313 branch->entries = (SmcEntry*) realloc (branch->entries, new_size);
1314 if (!branch->entries)
1315 smc_tree_abort (errno);
1316 entry = branch->entries + index;
1317 g_memmove (entry + 1, entry, (branch->n_entries - index) * sizeof (entry[0]));
1318 branch->n_entries += 1;
1322 static inline SmcEntry*
1323 smc_tree_branch_lookup_nearest_L (SmcBranch *branch,
1326 unsigned int n_nodes = branch->n_entries, offs = 0;
1327 SmcEntry *check = branch->entries;
1329 while (offs < n_nodes)
1331 unsigned int i = (offs + n_nodes) >> 1;
1332 check = branch->entries + i;
1333 cmp = key < check->key ? -1 : key != check->key;
1335 return check; /* return exact match */
1338 else /* (cmp > 0) */
1341 /* check points at last mismatch, cmp > 0 indicates greater key */
1342 return cmp > 0 ? check + 1 : check; /* return insertion position for inexact match */
1346 smc_tree_insert (SmcKType key,
1349 unsigned int ix0, ix1;
1352 g_mutex_lock (smc_tree_mutex);
1353 ix0 = SMC_TRUNK_HASH (key);
1354 ix1 = SMC_BRANCH_HASH (key);
1357 smc_tree_root = calloc (SMC_TRUNK_COUNT, sizeof (smc_tree_root[0]));
1359 smc_tree_abort (errno);
1361 if (!smc_tree_root[ix0])
1363 smc_tree_root[ix0] = calloc (SMC_BRANCH_COUNT, sizeof (smc_tree_root[0][0]));
1364 if (!smc_tree_root[ix0])
1365 smc_tree_abort (errno);
1367 entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1368 if (!entry || /* need create */
1369 entry >= smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries || /* need append */
1370 entry->key != key) /* need insert */
1371 entry = smc_tree_branch_grow_L (&smc_tree_root[ix0][ix1], entry - smc_tree_root[ix0][ix1].entries);
1373 entry->value = value;
1374 g_mutex_unlock (smc_tree_mutex);
1378 smc_tree_lookup (SmcKType key,
1381 SmcEntry *entry = NULL;
1382 unsigned int ix0 = SMC_TRUNK_HASH (key), ix1 = SMC_BRANCH_HASH (key);
1383 gboolean found_one = FALSE;
1385 g_mutex_lock (smc_tree_mutex);
1386 if (smc_tree_root && smc_tree_root[ix0])
1388 entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1390 entry < smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries &&
1394 *value_p = entry->value;
1397 g_mutex_unlock (smc_tree_mutex);
1402 smc_tree_remove (SmcKType key)
1404 unsigned int ix0 = SMC_TRUNK_HASH (key), ix1 = SMC_BRANCH_HASH (key);
1405 gboolean found_one = FALSE;
1406 g_mutex_lock (smc_tree_mutex);
1407 if (smc_tree_root && smc_tree_root[ix0])
1409 SmcEntry *entry = smc_tree_branch_lookup_nearest_L (&smc_tree_root[ix0][ix1], key);
1411 entry < smc_tree_root[ix0][ix1].entries + smc_tree_root[ix0][ix1].n_entries &&
1414 unsigned int i = entry - smc_tree_root[ix0][ix1].entries;
1415 smc_tree_root[ix0][ix1].n_entries -= 1;
1416 g_memmove (entry, entry + 1, (smc_tree_root[ix0][ix1].n_entries - i) * sizeof (entry[0]));
1417 if (!smc_tree_root[ix0][ix1].n_entries)
1419 /* avoid useless pressure on the memory system */
1420 free (smc_tree_root[ix0][ix1].entries);
1421 smc_tree_root[ix0][ix1].entries = NULL;
1426 g_mutex_unlock (smc_tree_mutex);
1430 #ifdef G_ENABLE_DEBUG
1432 g_slice_debug_tree_statistics (void)
1434 g_mutex_lock (smc_tree_mutex);
1437 unsigned int i, j, t = 0, o = 0, b = 0, su = 0, ex = 0, en = 4294967295u;
1439 for (i = 0; i < SMC_TRUNK_COUNT; i++)
1440 if (smc_tree_root[i])
1443 for (j = 0; j < SMC_BRANCH_COUNT; j++)
1444 if (smc_tree_root[i][j].n_entries)
1447 su += smc_tree_root[i][j].n_entries;
1448 en = MIN (en, smc_tree_root[i][j].n_entries);
1449 ex = MAX (ex, smc_tree_root[i][j].n_entries);
1451 else if (smc_tree_root[i][j].entries)
1452 o++; /* formerly used, now empty */
1455 tf = MAX (t, 1.0); /* max(1) to be a valid divisor */
1456 bf = MAX (b, 1.0); /* max(1) to be a valid divisor */
1457 fprintf (stderr, "GSlice: MemChecker: %u trunks, %u branches, %u old branches\n", t, b, o);
1458 fprintf (stderr, "GSlice: MemChecker: %f branches per trunk, %.2f%% utilization\n",
1460 100.0 - (SMC_BRANCH_COUNT - b / tf) / (0.01 * SMC_BRANCH_COUNT));
1461 fprintf (stderr, "GSlice: MemChecker: %f entries per branch, %u minimum, %u maximum\n",
1465 fprintf (stderr, "GSlice: MemChecker: root=NULL\n");
1466 g_mutex_unlock (smc_tree_mutex);
1468 /* sample statistics (beast + GSLice + 24h scripted core & GUI activity):
1469 * PID %CPU %MEM VSZ RSS COMMAND
1470 * 8887 30.3 45.8 456068 414856 beast-0.7.1 empty.bse
1471 * $ cat /proc/8887/statm # total-program-size resident-set-size shared-pages text/code data/stack library dirty-pages
1472 * 114017 103714 2354 344 0 108676 0
1473 * $ cat /proc/8887/status
1484 * (gdb) print g_slice_debug_tree_statistics ()
1485 * GSlice: MemChecker: 422 trunks, 213068 branches, 0 old branches
1486 * GSlice: MemChecker: 504.900474 branches per trunk, 98.81% utilization
1487 * GSlice: MemChecker: 4.965039 entries per branch, 1 minimum, 37 maximum
1490 #endif /* G_ENABLE_DEBUG */
1492 #define __G_SLICE_C__
1493 #include "galiasdef.c"