1 /* GLIB sliced memory - fast concurrent memory chunk allocator
2 * Copyright (C) 2005 Tim Janik
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
23 #ifdef HAVE_POSIX_MEMALIGN
24 #define _XOPEN_SOURCE 600 /* posix_memalign() */
26 #include <stdlib.h> /* posix_memalign() */
29 #include "gmem.h" /* gslice.h */
30 #include "gthreadinit.h"
34 #include <unistd.h> /* sysconf() */
41 #if defined HAVE_POSIX_MEMALIGN && defined POSIX_MEMALIGN_WITH_COMPLIANT_ALLOCS
42 # define HAVE_COMLIANT_POSIX_MEMALIGN 1
46 /* the GSlice allocator is split up into 4 layers, roughly modelled after the slab
47 * allocator and magazine extensions as outlined in:
48 * + [Bonwick94] Jeff Bonwick, The slab allocator: An object-caching kernel
49 * memory allocator. USENIX 1994, http://citeseer.ist.psu.edu/bonwick94slab.html
50 * + [Bonwick01] Bonwick and Jonathan Adams, Magazines and vmem: Extending the
51 * slab allocator to many cpu's and arbitrary resources.
52 * USENIX 2001, http://citeseer.ist.psu.edu/bonwick01magazines.html
54 * - the thread magazines. for each (aligned) chunk size, a magazine (a list)
55 * of recently freed and soon to be allocated chunks is maintained per thread.
56 * this way, most alloc/free requests can be quickly satisfied from per-thread
57 * free lists which only require one g_private_get() call to retrive the
59 * - the magazine cache. allocating and freeing chunks to/from threads only
60 * occours at magazine sizes from a global depot of magazines. the depot
61 * maintaines a 15 second working set of allocated magazines, so full
62 * magazines are not allocated and released too often.
63 * the chunk size dependent magazine sizes automatically adapt (within limits,
64 * see [3]) to lock contention to properly scale performance across a variety
66 * - the slab allocator. this allocator allocates slabs (blocks of memory) close
67 * to the system page size or multiples thereof which have to be page aligned.
68 * the blocks are divided into smaller chunks which are used to satisfy
69 * allocations from the upper layers. the space provided by the reminder of
70 * the chunk size division is used for cache colorization (random distribution
71 * of chunk addresses) to improve processor cache utilization. multiple slabs
72 * with the same chunk size are kept in a partially sorted ring to allow O(1)
73 * freeing and allocation of chunks (as long as the allocation of an entirely
74 * new slab can be avoided).
75 * - the page allocator. on most modern systems, posix_memalign(3) or
76 * memalign(3) should be available, so this is used to allocate blocks with
77 * system page size based alignments and sizes or multiples thereof.
78 * if no memalign variant is provided, valloc() is used instead and
79 * block sizes are limited to the system page size (no multiples thereof).
80 * as a fallback, on system without even valloc(), a malloc(3)-based page
81 * allocator with alloc-only behaviour is used.
84 * [1] some systems memalign(3) implementations may rely on boundary tagging for
85 * the handed out memory chunks. to avoid excessive page-wise fragmentation,
86 * we reserve 2 * sizeof (void*) per block size for the systems memalign(3),
87 * specified in NATIVE_MALLOC_PADDING.
88 * [2] using the slab allocator alone already provides for a fast and efficient
89 * allocator, it doesn't properly scale beyond single-threaded uses though.
90 * also, the slab allocator implements eager free(3)-ing, i.e. does not
91 * provide any form of caching or working set maintenance. so if used alone,
92 * it's vulnerable to trashing for sequences of balanced (alloc, free) pairs
93 * at certain thresholds.
94 * [3] magazine sizes are bound by an implementation specific minimum size and
95 * a chunk size specific maximum to limit magazine storage sizes to roughly
97 * [4] allocating ca. 8 chunks per block/page keeps a good balance between
98 * external and internal fragmentation (<= 12.5%). [Bonwick94]
101 /* --- macros and constants --- */
102 #define LARGEALIGNMENT (256)
103 #define P2ALIGNMENT (2 * sizeof (gsize)) /* fits 2 pointers (assumed to be 2 * GLIB_SIZEOF_SIZE_T below) */
104 #define ALIGN(size, base) ((base) * (gsize) (((size) + (base) - 1) / (base)))
105 #define NATIVE_MALLOC_PADDING P2ALIGNMENT /* per-page padding left for native malloc(3) see [1] */
106 #define SLAB_INFO_SIZE P2ALIGN (sizeof (SlabInfo) + NATIVE_MALLOC_PADDING)
107 #define MAX_MAGAZINE_SIZE (256) /* see [3] and allocator_get_magazine_threshold() for this */
108 #define MIN_MAGAZINE_SIZE (4)
109 #define MAX_STAMP_COUNTER (7) /* distributes the load of gettimeofday() */
110 #define MAX_SLAB_CHUNK_SIZE(al) (((al)->max_page_size - SLAB_INFO_SIZE) / 8) /* we want at last 8 chunks per page, see [4] */
111 #define MAX_SLAB_INDEX(al) (SLAB_INDEX (al, MAX_SLAB_CHUNK_SIZE (al)) + 1)
112 #define SLAB_INDEX(al, asize) ((asize) / P2ALIGNMENT - 1) /* asize must be P2ALIGNMENT aligned */
113 #define SLAB_CHUNK_SIZE(al, ix) (((ix) + 1) * P2ALIGNMENT)
114 #define SLAB_BPAGE_SIZE(al,csz) (8 * (csz) + SLAB_INFO_SIZE)
116 /* optimized version of ALIGN (size, P2ALIGNMENT) */
117 #if GLIB_SIZEOF_SIZE_T * 2 == 8 /* P2ALIGNMENT */
118 #define P2ALIGN(size) (((size) + 0x7) & ~(gsize) 0x7)
119 #elif GLIB_SIZEOF_SIZE_T * 2 == 16 /* P2ALIGNMENT */
120 #define P2ALIGN(size) (((size) + 0xf) & ~(gsize) 0xf)
122 #define P2ALIGN(size) ALIGN (size, P2ALIGNMENT)
125 /* special helpers to avoid gmessage.c dependency */
126 static void mem_error (const char *format, ...) G_GNUC_PRINTF (1,2);
127 #define mem_assert(cond) do { if (G_LIKELY (cond)) ; else mem_error ("assertion failed: %s", #cond); } while (0)
129 /* --- structures --- */
130 typedef struct _ChunkLink ChunkLink;
131 typedef struct _SlabInfo SlabInfo;
132 typedef struct _CachedMagazine CachedMagazine;
140 SlabInfo *next, *prev;
144 gsize count; /* approximative chunks list length */
147 Magazine *magazine1; /* array of MAX_SLAB_INDEX (allocator) */
148 Magazine *magazine2; /* array of MAX_SLAB_INDEX (allocator) */
151 gboolean always_malloc;
152 gboolean bypass_magazines;
153 gsize working_set_msecs;
154 guint color_increment;
157 /* const after initialization */
158 gsize min_page_size, max_page_size;
160 gsize max_slab_chunk_size_for_magazine_cache;
162 GMutex *magazine_mutex;
163 ChunkLink **magazines; /* array of MAX_SLAB_INDEX (allocator) */
164 guint *contention_counters; /* array of MAX_SLAB_INDEX (allocator) */
170 SlabInfo **slab_stack; /* array of MAX_SLAB_INDEX (allocator) */
174 /* --- prototypes --- */
175 static gpointer slab_allocator_alloc_chunk (gsize chunk_size);
176 static void slab_allocator_free_chunk (gsize chunk_size,
178 static void private_thread_memory_cleanup (gpointer data);
179 static gpointer allocator_memalign (gsize alignment,
181 static void allocator_memfree (gsize memsize,
183 static inline void magazine_cache_update_stamp (void);
184 static inline gsize allocator_get_magazine_threshold (Allocator *allocator,
187 /* --- variables --- */
188 static GPrivate *private_thread_memory = NULL;
189 static gsize sys_page_size = 0;
190 static Allocator allocator[1] = { { 0, }, };
191 static SliceConfig slice_config = {
192 FALSE, /* always_malloc */
193 FALSE, /* bypass_magazines */
194 15 * 1000, /* working_set_msecs */
195 1, /* color increment, alt: 0x7fffffff */
198 /* --- auxillary funcitons --- */
200 g_slice_set_config (GSliceConfig ckey,
203 g_return_if_fail (sys_page_size == 0);
206 case G_SLICE_CONFIG_ALWAYS_MALLOC:
207 slice_config.always_malloc = value != 0;
209 case G_SLICE_CONFIG_BYPASS_MAGAZINES:
210 slice_config.bypass_magazines = value != 0;
212 case G_SLICE_CONFIG_WORKING_SET_MSECS:
213 slice_config.working_set_msecs = value;
215 case G_SLICE_CONFIG_COLOR_INCREMENT:
216 slice_config.color_increment = value;
222 g_slice_get_config (GSliceConfig ckey)
226 case G_SLICE_CONFIG_ALWAYS_MALLOC:
227 return slice_config.always_malloc;
228 case G_SLICE_CONFIG_BYPASS_MAGAZINES:
229 return slice_config.bypass_magazines;
230 case G_SLICE_CONFIG_WORKING_SET_MSECS:
231 return slice_config.working_set_msecs;
232 case G_SLICE_CONFIG_CHUNK_SIZES:
233 return MAX_SLAB_INDEX (allocator);
234 case G_SLICE_CONFIG_COLOR_INCREMENT:
235 return slice_config.color_increment;
242 g_slice_get_config_state (GSliceConfig ckey,
247 g_return_val_if_fail (n_values != NULL, NULL);
252 case G_SLICE_CONFIG_CONTENTION_COUNTER:
253 array[i++] = SLAB_CHUNK_SIZE (allocator, address);
254 array[i++] = allocator->contention_counters[address];
255 array[i++] = allocator_get_magazine_threshold (allocator, address);
257 return g_memdup (array, sizeof (array[0]) * *n_values);
264 slice_config_init (SliceConfig *config)
266 /* don't use g_malloc/g_message here */
268 const gchar *val = _g_getenv_nomalloc ("G_SLICE", buffer);
269 static const GDebugKey keys[] = {
270 { "always-malloc", 1 << 0 },
272 gint flags = !val ? 0 : g_parse_debug_string (val, keys, G_N_ELEMENTS (keys));
273 *config = slice_config;
274 if (flags & (1 << 0)) /* always-malloc */
276 config->always_malloc = TRUE;
281 g_slice_init_nomessage (void)
283 /* we may not use g_error() or friends here */
284 mem_assert (sys_page_size == 0);
285 mem_assert (MIN_MAGAZINE_SIZE >= 4);
289 SYSTEM_INFO system_info;
290 GetSystemInfo (&system_info);
291 sys_page_size = system_info.dwPageSize;
294 sys_page_size = sysconf (_SC_PAGESIZE); /* = sysconf (_SC_PAGE_SIZE); = getpagesize(); */
296 mem_assert (sys_page_size >= 2 * LARGEALIGNMENT);
297 mem_assert ((sys_page_size & (sys_page_size - 1)) == 0);
298 slice_config_init (&allocator->config);
299 allocator->min_page_size = sys_page_size;
300 #if HAVE_COMLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN
301 /* allow allocation of pages up to 8KB (with 8KB alignment).
302 * this is useful because many medium to large sized structures
303 * fit less than 8 times (see [4]) into 4KB pages.
304 * we allow very small page sizes here, to reduce wastage in
305 * threads if only small allocations are required (this does
306 * bear the risk of incresing allocation times and fragmentation
309 allocator->min_page_size = MAX (allocator->min_page_size, 4096);
310 allocator->max_page_size = MAX (allocator->min_page_size, 8192);
311 allocator->min_page_size = MIN (allocator->min_page_size, 128);
313 /* we can only align to system page size */
314 allocator->max_page_size = sys_page_size;
316 allocator->magazine_mutex = NULL; /* _g_slice_thread_init_nomessage() */
317 allocator->magazines = g_new0 (ChunkLink*, MAX_SLAB_INDEX (allocator));
318 allocator->contention_counters = g_new0 (guint, MAX_SLAB_INDEX (allocator));
319 allocator->mutex_counter = 0;
320 allocator->stamp_counter = MAX_STAMP_COUNTER; /* force initial update */
321 allocator->last_stamp = 0;
322 allocator->slab_mutex = NULL; /* _g_slice_thread_init_nomessage() */
323 allocator->slab_stack = g_new0 (SlabInfo*, MAX_SLAB_INDEX (allocator));
324 allocator->color_accu = 0;
325 magazine_cache_update_stamp();
326 /* values cached for performance reasons */
327 allocator->max_slab_chunk_size_for_magazine_cache = MAX_SLAB_CHUNK_SIZE (allocator);
328 if (allocator->config.always_malloc || allocator->config.bypass_magazines)
329 allocator->max_slab_chunk_size_for_magazine_cache = 0; /* non-optimized cases */
333 allocator_categorize (gsize aligned_chunk_size)
335 /* speed up the likely path */
336 if (G_LIKELY (aligned_chunk_size && aligned_chunk_size <= allocator->max_slab_chunk_size_for_magazine_cache))
337 return 1; /* use magazine cache */
339 /* the above will fail (max_slab_chunk_size_for_magazine_cache == 0) if the
340 * allocator is still uninitialized, or if we are not configured to use the
344 g_slice_init_nomessage ();
345 if (!allocator->config.always_malloc &&
346 aligned_chunk_size &&
347 aligned_chunk_size <= MAX_SLAB_CHUNK_SIZE (allocator))
349 if (allocator->config.bypass_magazines)
350 return 2; /* use slab allocator, see [2] */
351 return 1; /* use magazine cache */
353 return 0; /* use malloc() */
357 _g_slice_thread_init_nomessage (void)
359 /* we may not use g_error() or friends here */
361 g_slice_init_nomessage();
362 private_thread_memory = g_private_new (private_thread_memory_cleanup);
363 allocator->magazine_mutex = g_mutex_new();
364 allocator->slab_mutex = g_mutex_new();
368 g_mutex_lock_a (GMutex *mutex,
369 guint *contention_counter)
371 gboolean contention = FALSE;
372 if (!g_mutex_trylock (mutex))
374 g_mutex_lock (mutex);
379 allocator->mutex_counter++;
380 if (allocator->mutex_counter >= 1) /* quickly adapt to contention */
382 allocator->mutex_counter = 0;
383 *contention_counter = MIN (*contention_counter + 1, MAX_MAGAZINE_SIZE);
386 else /* !contention */
388 allocator->mutex_counter--;
389 if (allocator->mutex_counter < -11) /* moderately recover magazine sizes */
391 allocator->mutex_counter = 0;
392 *contention_counter = MAX (*contention_counter, 1) - 1;
397 static inline ThreadMemory*
398 thread_memory_from_self (void)
400 ThreadMemory *tmem = g_private_get (private_thread_memory);
401 if (G_UNLIKELY (!tmem))
403 const guint n_magazines = MAX_SLAB_INDEX (allocator);
404 tmem = g_malloc0 (sizeof (ThreadMemory) + sizeof (Magazine) * 2 * n_magazines);
405 tmem->magazine1 = (Magazine*) (tmem + 1);
406 tmem->magazine2 = &tmem->magazine1[n_magazines];
407 g_private_set (private_thread_memory, tmem);
412 static inline ChunkLink*
413 magazine_chain_pop_head (ChunkLink **magazine_chunks)
415 /* magazine chains are linked via ChunkLink->next.
416 * each ChunkLink->data of the toplevel chain may point to a subchain,
417 * linked via ChunkLink->next. ChunkLink->data of the subchains just
418 * contains uninitialized junk.
420 ChunkLink *chunk = (*magazine_chunks)->data;
421 if (G_UNLIKELY (chunk))
423 /* allocating from freed list */
424 (*magazine_chunks)->data = chunk->next;
428 chunk = *magazine_chunks;
429 *magazine_chunks = chunk->next;
434 #if 0 /* useful for debugging */
436 magazine_count (ChunkLink *head)
443 ChunkLink *child = head->data;
445 for (child = head->data; child; child = child->next)
454 allocator_get_magazine_threshold (Allocator *allocator,
457 /* the magazine size calculated here has a lower bound of MIN_MAGAZINE_SIZE,
458 * which is required by the implementation. also, for moderately sized chunks
459 * (say >= 64 bytes), magazine sizes shouldn't be much smaller then the number
460 * of chunks available per page/2 to avoid excessive traffic in the magazine
461 * cache for small to medium sized structures.
462 * the upper bound of the magazine size is effectively provided by
463 * MAX_MAGAZINE_SIZE. for larger chunks, this number is scaled down so that
464 * the content of a single magazine doesn't exceed ca. 16KB.
466 gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
467 guint threshold = MAX (MIN_MAGAZINE_SIZE, allocator->max_page_size / MAX (5 * chunk_size, 5 * 32));
468 guint contention_counter = allocator->contention_counters[ix];
469 if (G_UNLIKELY (contention_counter)) /* single CPU bias */
471 /* adapt contention counter thresholds to chunk sizes */
472 contention_counter = contention_counter * 64 / chunk_size;
473 threshold = MAX (threshold, contention_counter);
478 /* --- magazine cache --- */
480 magazine_cache_update_stamp (void)
482 if (allocator->stamp_counter >= MAX_STAMP_COUNTER)
485 g_get_current_time (&tv);
486 allocator->last_stamp = tv.tv_sec * 1000 + tv.tv_usec / 1000; /* milli seconds */
487 allocator->stamp_counter = 0;
490 allocator->stamp_counter++;
493 static inline ChunkLink*
494 magazine_chain_prepare_fields (ChunkLink *magazine_chunks)
500 /* checked upon initialization: mem_assert (MIN_MAGAZINE_SIZE >= 4); */
501 /* ensure a magazine with at least 4 unused data pointers */
502 chunk1 = magazine_chain_pop_head (&magazine_chunks);
503 chunk2 = magazine_chain_pop_head (&magazine_chunks);
504 chunk3 = magazine_chain_pop_head (&magazine_chunks);
505 chunk4 = magazine_chain_pop_head (&magazine_chunks);
506 chunk4->next = magazine_chunks;
507 chunk3->next = chunk4;
508 chunk2->next = chunk3;
509 chunk1->next = chunk2;
513 /* access the first 3 fields of a specially prepared magazine chain */
514 #define magazine_chain_prev(mc) ((mc)->data)
515 #define magazine_chain_stamp(mc) ((mc)->next->data)
516 #define magazine_chain_uint_stamp(mc) GPOINTER_TO_UINT ((mc)->next->data)
517 #define magazine_chain_next(mc) ((mc)->next->next->data)
518 #define magazine_chain_count(mc) ((mc)->next->next->next->data)
521 magazine_cache_trim (Allocator *allocator,
525 /* g_mutex_lock (allocator->mutex); done by caller */
526 /* trim magazine cache from tail */
527 ChunkLink *current = magazine_chain_prev (allocator->magazines[ix]);
528 ChunkLink *trash = NULL;
529 while (ABS (stamp - magazine_chain_uint_stamp (current)) >= allocator->config.working_set_msecs)
532 ChunkLink *prev = magazine_chain_prev (current);
533 ChunkLink *next = magazine_chain_next (current);
534 magazine_chain_next (prev) = next;
535 magazine_chain_prev (next) = prev;
536 /* clear special fields, put on trash stack */
537 magazine_chain_next (current) = NULL;
538 magazine_chain_count (current) = NULL;
539 magazine_chain_stamp (current) = NULL;
540 magazine_chain_prev (current) = trash;
542 /* fixup list head if required */
543 if (current == allocator->magazines[ix])
545 allocator->magazines[ix] = NULL;
550 g_mutex_unlock (allocator->magazine_mutex);
554 const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
555 g_mutex_lock (allocator->slab_mutex);
559 trash = magazine_chain_prev (current);
560 magazine_chain_prev (current) = NULL; /* clear special field */
563 ChunkLink *chunk = magazine_chain_pop_head (¤t);
564 slab_allocator_free_chunk (chunk_size, chunk);
567 g_mutex_unlock (allocator->slab_mutex);
572 magazine_cache_push_magazine (guint ix,
573 ChunkLink *magazine_chunks,
574 gsize count) /* must be >= MIN_MAGAZINE_SIZE */
576 ChunkLink *current = magazine_chain_prepare_fields (magazine_chunks);
577 ChunkLink *next, *prev;
578 g_mutex_lock (allocator->magazine_mutex);
579 /* add magazine at head */
580 next = allocator->magazines[ix];
582 prev = magazine_chain_prev (next);
584 next = prev = current;
585 magazine_chain_next (prev) = current;
586 magazine_chain_prev (next) = current;
587 magazine_chain_prev (current) = prev;
588 magazine_chain_next (current) = next;
589 magazine_chain_count (current) = (gpointer) count;
591 magazine_cache_update_stamp();
592 magazine_chain_stamp (current) = GUINT_TO_POINTER (allocator->last_stamp);
593 allocator->magazines[ix] = current;
594 /* free old magazines beyond a certain threshold */
595 magazine_cache_trim (allocator, ix, allocator->last_stamp);
596 /* g_mutex_unlock (allocator->mutex); was done by magazine_cache_trim() */
600 magazine_cache_pop_magazine (guint ix,
603 g_mutex_lock_a (allocator->magazine_mutex, &allocator->contention_counters[ix]);
604 if (!allocator->magazines[ix])
606 guint magazine_threshold = allocator_get_magazine_threshold (allocator, ix);
607 gsize i, chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
608 ChunkLink *chunk, *head;
609 g_mutex_unlock (allocator->magazine_mutex);
610 g_mutex_lock (allocator->slab_mutex);
611 head = slab_allocator_alloc_chunk (chunk_size);
614 for (i = 1; i < magazine_threshold; i++)
616 chunk->next = slab_allocator_alloc_chunk (chunk_size);
621 g_mutex_unlock (allocator->slab_mutex);
627 ChunkLink *current = allocator->magazines[ix];
628 ChunkLink *prev = magazine_chain_prev (current);
629 ChunkLink *next = magazine_chain_next (current);
631 magazine_chain_next (prev) = next;
632 magazine_chain_prev (next) = prev;
633 allocator->magazines[ix] = next == current ? NULL : next;
634 g_mutex_unlock (allocator->magazine_mutex);
635 /* clear special fields and hand out */
636 *countp = (gsize) magazine_chain_count (current);
637 magazine_chain_prev (current) = NULL;
638 magazine_chain_next (current) = NULL;
639 magazine_chain_count (current) = NULL;
640 magazine_chain_stamp (current) = NULL;
645 /* --- thread magazines --- */
647 private_thread_memory_cleanup (gpointer data)
649 ThreadMemory *tmem = data;
650 const guint n_magazines = MAX_SLAB_INDEX (allocator);
652 for (ix = 0; ix < n_magazines; ix++)
656 mags[0] = &tmem->magazine1[ix];
657 mags[1] = &tmem->magazine2[ix];
658 for (j = 0; j < 2; j++)
660 Magazine *mag = mags[j];
661 if (mag->count >= MIN_MAGAZINE_SIZE)
662 magazine_cache_push_magazine (ix, mag->chunks, mag->count);
665 const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
666 g_mutex_lock (allocator->slab_mutex);
669 ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
670 slab_allocator_free_chunk (chunk_size, chunk);
672 g_mutex_unlock (allocator->slab_mutex);
680 thread_memory_magazine1_reload (ThreadMemory *tmem,
683 Magazine *mag = &tmem->magazine1[ix];
684 mem_assert (mag->chunks == NULL); /* ensure that we may reset mag->count */
686 mag->chunks = magazine_cache_pop_magazine (ix, &mag->count);
690 thread_memory_magazine2_unload (ThreadMemory *tmem,
693 Magazine *mag = &tmem->magazine2[ix];
694 magazine_cache_push_magazine (ix, mag->chunks, mag->count);
700 thread_memory_swap_magazines (ThreadMemory *tmem,
703 Magazine xmag = tmem->magazine1[ix];
704 tmem->magazine1[ix] = tmem->magazine2[ix];
705 tmem->magazine2[ix] = xmag;
708 static inline gboolean
709 thread_memory_magazine1_is_empty (ThreadMemory *tmem,
712 return tmem->magazine1[ix].chunks == NULL;
715 static inline gboolean
716 thread_memory_magazine2_is_full (ThreadMemory *tmem,
719 return tmem->magazine2[ix].count >= allocator_get_magazine_threshold (allocator, ix);
722 static inline gpointer
723 thread_memory_magazine1_alloc (ThreadMemory *tmem,
726 Magazine *mag = &tmem->magazine1[ix];
727 ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
728 if (G_LIKELY (mag->count > 0))
734 thread_memory_magazine2_free (ThreadMemory *tmem,
738 Magazine *mag = &tmem->magazine2[ix];
739 ChunkLink *chunk = mem;
741 chunk->next = mag->chunks;
746 /* --- API functions --- */
748 g_slice_alloc (gsize mem_size)
753 chunk_size = P2ALIGN (mem_size);
754 acat = allocator_categorize (chunk_size);
755 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
757 ThreadMemory *tmem = thread_memory_from_self();
758 guint ix = SLAB_INDEX (allocator, chunk_size);
759 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
761 thread_memory_swap_magazines (tmem, ix);
762 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
763 thread_memory_magazine1_reload (tmem, ix);
765 mem = thread_memory_magazine1_alloc (tmem, ix);
767 else if (acat == 2) /* allocate through slab allocator */
769 g_mutex_lock (allocator->slab_mutex);
770 mem = slab_allocator_alloc_chunk (chunk_size);
771 g_mutex_unlock (allocator->slab_mutex);
773 else /* delegate to system malloc */
774 mem = g_malloc (mem_size);
779 g_slice_alloc0 (gsize mem_size)
781 gpointer mem = g_slice_alloc (mem_size);
783 memset (mem, 0, mem_size);
788 g_slice_free1 (gsize mem_size,
791 gsize chunk_size = P2ALIGN (mem_size);
792 guint acat = allocator_categorize (chunk_size);
793 if (G_UNLIKELY (!mem_block))
795 else if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
797 ThreadMemory *tmem = thread_memory_from_self();
798 guint ix = SLAB_INDEX (allocator, chunk_size);
799 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
801 thread_memory_swap_magazines (tmem, ix);
802 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
803 thread_memory_magazine2_unload (tmem, ix);
805 thread_memory_magazine2_free (tmem, ix, mem_block);
807 else if (acat == 2) /* allocate through slab allocator */
809 g_mutex_lock (allocator->slab_mutex);
810 slab_allocator_free_chunk (chunk_size, mem_block);
811 g_mutex_unlock (allocator->slab_mutex);
813 else /* delegate to system malloc */
818 g_slice_free_chain_with_offset (gsize mem_size,
822 gpointer slice = mem_chain;
823 /* while the thread magazines and the magazine cache are implemented so that
824 * they can easily be extended to allow for free lists containing more free
825 * lists for the first level nodes, which would allow O(1) freeing in this
826 * function, the benefit of such an extension is questionable, because:
827 * - the magazine size counts will become mere lower bounds which confuses
828 * the code adapting to lock contention;
829 * - freeing a single node to the thread magazines is very fast, so this
830 * O(list_length) operation is multiplied by a fairly small factor;
831 * - memory usage histograms on larger applications seem to indicate that
832 * the amount of released multi node lists is negligible in comparison
833 * to single node releases.
834 * - the major performance bottle neck, namely g_private_get() or
835 * g_mutex_lock()/g_mutex_unlock() has already been moved out of the
836 * inner loop for freeing chained slices.
838 gsize chunk_size = P2ALIGN (mem_size);
839 guint acat = allocator_categorize (chunk_size);
840 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
842 ThreadMemory *tmem = thread_memory_from_self();
843 guint ix = SLAB_INDEX (allocator, chunk_size);
846 guint8 *current = slice;
847 slice = *(gpointer*) (current + next_offset);
848 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
850 thread_memory_swap_magazines (tmem, ix);
851 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
852 thread_memory_magazine2_unload (tmem, ix);
854 thread_memory_magazine2_free (tmem, ix, current);
857 else if (acat == 2) /* allocate through slab allocator */
859 g_mutex_lock (allocator->slab_mutex);
862 guint8 *current = slice;
863 slice = *(gpointer*) (current + next_offset);
864 slab_allocator_free_chunk (chunk_size, current);
866 g_mutex_unlock (allocator->slab_mutex);
868 else /* delegate to system malloc */
871 guint8 *current = slice;
872 slice = *(gpointer*) (current + next_offset);
877 /* --- single page allocator --- */
879 allocator_slab_stack_push (Allocator *allocator,
883 /* insert slab at slab ring head */
884 if (!allocator->slab_stack[ix])
891 SlabInfo *next = allocator->slab_stack[ix], *prev = next->prev;
897 allocator->slab_stack[ix] = sinfo;
901 allocator_aligned_page_size (Allocator *allocator,
904 gsize val = 1 << g_bit_storage (n_bytes - 1);
905 val = MAX (val, allocator->min_page_size);
910 allocator_add_slab (Allocator *allocator,
916 gsize addr, padding, n_chunks, color = 0;
917 gsize page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
918 /* allocate 1 page for the chunks and the slab */
919 gpointer aligned_memory = allocator_memalign (page_size, page_size - NATIVE_MALLOC_PADDING);
920 guint8 *mem = aligned_memory;
924 const gchar *syserr = "unknown error";
926 syserr = strerror (errno);
928 mem_error ("failed to allocate %u bytes (alignment: %u): %s\n",
929 (guint) (page_size - NATIVE_MALLOC_PADDING), (guint) page_size, syserr);
931 /* mask page adress */
932 addr = ((gsize) mem / page_size) * page_size;
933 /* assert alignment */
934 mem_assert (aligned_memory == (gpointer) addr);
935 /* basic slab info setup */
936 sinfo = (SlabInfo*) (mem + page_size - SLAB_INFO_SIZE);
937 sinfo->n_allocated = 0;
938 sinfo->chunks = NULL;
939 /* figure cache colorization */
940 n_chunks = ((guint8*) sinfo - mem) / chunk_size;
941 padding = ((guint8*) sinfo - mem) - n_chunks * chunk_size;
944 color = (allocator->color_accu * P2ALIGNMENT) % padding;
945 allocator->color_accu += allocator->config.color_increment;
947 /* add chunks to free list */
948 chunk = (ChunkLink*) (mem + color);
949 sinfo->chunks = chunk;
950 for (i = 0; i < n_chunks - 1; i++)
952 chunk->next = (ChunkLink*) ((guint8*) chunk + chunk_size);
955 chunk->next = NULL; /* last chunk */
956 /* add slab to slab ring */
957 allocator_slab_stack_push (allocator, ix, sinfo);
961 slab_allocator_alloc_chunk (gsize chunk_size)
964 guint ix = SLAB_INDEX (allocator, chunk_size);
965 /* ensure non-empty slab */
966 if (!allocator->slab_stack[ix] || !allocator->slab_stack[ix]->chunks)
967 allocator_add_slab (allocator, ix, chunk_size);
969 chunk = allocator->slab_stack[ix]->chunks;
970 allocator->slab_stack[ix]->chunks = chunk->next;
971 allocator->slab_stack[ix]->n_allocated++;
972 /* rotate empty slabs */
973 if (!allocator->slab_stack[ix]->chunks)
974 allocator->slab_stack[ix] = allocator->slab_stack[ix]->next;
979 slab_allocator_free_chunk (gsize chunk_size,
984 guint ix = SLAB_INDEX (allocator, chunk_size);
985 gsize page_size = allocator_aligned_page_size (allocator, SLAB_BPAGE_SIZE (allocator, chunk_size));
986 gsize addr = ((gsize) mem / page_size) * page_size;
987 /* mask page adress */
988 guint8 *page = (guint8*) addr;
989 SlabInfo *sinfo = (SlabInfo*) (page + page_size - SLAB_INFO_SIZE);
990 /* assert valid chunk count */
991 mem_assert (sinfo->n_allocated > 0);
992 /* add chunk to free list */
993 was_empty = sinfo->chunks == NULL;
994 chunk = (ChunkLink*) mem;
995 chunk->next = sinfo->chunks;
996 sinfo->chunks = chunk;
997 sinfo->n_allocated--;
998 /* keep slab ring partially sorted, empty slabs at end */
1002 SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1005 if (allocator->slab_stack[ix] == sinfo)
1006 allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1007 /* insert slab at head */
1008 allocator_slab_stack_push (allocator, ix, sinfo);
1010 /* eagerly free complete unused slabs */
1011 if (!sinfo->n_allocated)
1014 SlabInfo *next = sinfo->next, *prev = sinfo->prev;
1017 if (allocator->slab_stack[ix] == sinfo)
1018 allocator->slab_stack[ix] = next == sinfo ? NULL : next;
1020 allocator_memfree (page_size, page);
1024 /* --- memalign implementation --- */
1025 #ifdef HAVE_MALLOC_H
1026 #include <malloc.h> /* memalign() */
1030 * define HAVE_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works, <stdlib.h>
1031 * define HAVE_COMLIANT_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works for sizes != 2^n, <stdlib.h>
1032 * define HAVE_MEMALIGN 1 // if free(memalign(3)) works, <malloc.h>
1033 * define HAVE_VALLOC 1 // if free(valloc(3)) works, <stdlib.h> or <malloc.h>
1034 * if none is provided, we implement malloc(3)-based alloc-only page alignment
1037 #if !(HAVE_COMLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC)
1038 static GTrashStack *compat_valloc_trash = NULL;
1042 allocator_memalign (gsize alignment,
1045 gpointer aligned_memory = NULL;
1047 #if HAVE_COMLIANT_POSIX_MEMALIGN
1048 err = posix_memalign (&aligned_memory, alignment, memsize);
1051 aligned_memory = memalign (alignment, memsize);
1055 aligned_memory = valloc (memsize);
1058 /* simplistic non-freeing page allocator */
1059 mem_assert (alignment == sys_page_size);
1060 mem_assert (memsize <= sys_page_size);
1061 if (!compat_valloc_trash)
1063 const guint n_pages = 16;
1064 guint8 *mem = malloc (n_pages * sys_page_size);
1069 guint8 *amem = (guint8*) ALIGN ((gsize) mem, sys_page_size);
1071 i--; /* mem wasn't page aligned */
1073 g_trash_stack_push (&compat_valloc_trash, amem + i * sys_page_size);
1076 aligned_memory = g_trash_stack_pop (&compat_valloc_trash);
1078 if (!aligned_memory)
1080 return aligned_memory;
1084 allocator_memfree (gsize memsize,
1087 #if HAVE_COMLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC
1090 mem_assert (memsize <= sys_page_size);
1091 g_trash_stack_push (&compat_valloc_trash, mem);
1098 mem_error (const char *format,
1103 /* at least, put out "MEMORY-ERROR", in case we segfault during the rest of the function */
1104 fputs ("\n***MEMORY-ERROR***: ", stderr);
1105 pname = g_get_prgname();
1106 fprintf (stderr, "%s[%u]: GSlice: ", pname ? pname : "", getpid());
1107 va_start (args, format);
1108 vfprintf (stderr, format, args);
1110 fputs ("\n", stderr);
1114 #define __G_SLICE_C__
1115 #include "galiasdef.c"