1 /* GLIB sliced memory - fast concurrent memory chunk allocator
2 * Copyright (C) 2005 Tim Janik
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
20 #define _XOPEN_SOURCE 600 /* posix_memalign() */
21 #include <stdlib.h> /* posix_memalign() */
22 #include <unistd.h> /* sysconf() */
23 #include <assert.h> /* assert() for nomessage phase */
27 #include "gmem.h" /* gslice.h */
28 #include "gthreadinit.h"
32 /* the GSlice allocator is split up into 4 layers, roughly modelled after the slab
33 * allocator and magazine extensions as outlined in:
34 * + [Bonwick94] Jeff Bonwick, The slab allocator: An object-caching kernel
35 * memory allocator. USENIX 1994, http://citeseer.ist.psu.edu/bonwick94slab.html
36 * + [Bonwick01] Bonwick and Jonathan Adams, Magazines and vmem: Extending the
37 * slab allocator to many cpu's and arbitrary resources.
38 * USENIX 2001, http://citeseer.ist.psu.edu/bonwick01magazines.html
40 * - the thread magazines. for each (aligned) chunk size, a magazine (a list)
41 * of recently freed and soon to be allocated chunks is maintained per thread.
42 * this way, most alloc/free requests can be quickly satisfied from per-thread
43 * free lists which only require one g_private_get() call to retrive the
45 * - the magazine cache. allocating and freeing chunks to/from threads only
46 * occours at magazine sizes from a global depot of magazines. the depot
47 * maintaines a 15 second working set of allocated magazines, so full
48 * magazines are not allocated and released too often.
49 * the chunk size dependent magazine sizes automatically adapt (within limits,
50 * see [3]) to lock contention to properly scale performance across a variety
52 * - the slab allocator. this allocator allocates slabs (blocks of memory) close
53 * to the system page size or multiples thereof which have to be page aligned.
54 * the blocks are divided into smaller chunks which are used to satisfy
55 * allocations from the upper layers. the space provided by the reminder of
56 * the chunk size division is used for cache colorization (random distribution
57 * of chunk addresses) to improve processor cache utilization. multiple slabs
58 * with the same chunk size are kept in a partially sorted ring to allow O(1)
59 * freeing and allocation of chunks (as long as the allocation of an entirely
60 * new slab can be avoided).
61 * - the page allocator. on most modern systems, posix_memalign(3) or
62 * memalign(3) should be available, so this is used to allocate blocks with
63 * system page size based alignments and sizes or multiples thereof.
64 * if no memalign variant is provided, valloc() is used instead and
65 * block sizes are limited to the system page size (no multiples thereof).
66 * as a fallback, on system without even valloc(), a malloc(3)-based page
67 * allocator with alloc-only behaviour is used.
70 * [1] some systems memalign(3) implementations may rely on boundary tagging for
71 * the handed out memory chunks. to avoid excessive page-wise fragmentation,
72 * we reserve 2 * sizeof (void*) per block size for the systems memalign(3),
73 * specified in NATIVE_MALLOC_PADDING.
74 * [2] using the slab allocator alone already provides for a fast and efficient
75 * allocator, it doesn't properly scale beyond single-threaded uses though.
76 * also, the slab allocator implements eager free(3)-ing, i.e. does not
77 * provide any form of caching or working set maintenance. so if used alone,
78 * it's vulnerable to trashing for sequences of balanced (alloc, free) pairs
79 * at certain thresholds.
80 * [3] magazine sizes are bound by an implementation specific minimum size and
81 * a chunk size specific maximum to limit magazine storage sizes to roughly
83 * [4] allocating ca. 8 chunks per block/page keeps a good balance between
84 * external and internal fragmentation (<= 12.5%) [Bonwick94]
87 /* --- macros and constants --- */
88 #define LARGEALIGNMENT (256)
89 #define P2ALIGNMENT (2 * sizeof (gsize)) /* fits 2 pointers (assumed to be 2 * GLIB_SIZEOF_SIZE_T below) */
90 #define ALIGN(size, base) ((base) * (gsize) (((size) + (base) - 1) / (base)))
91 #define NATIVE_MALLOC_PADDING P2ALIGNMENT /* per-page padding left for native malloc(3) see [1] */
92 #define SLAB_INFO_SIZE P2ALIGN (sizeof (SlabInfo) + NATIVE_MALLOC_PADDING)
93 #define MAX_MAGAZINE_SIZE (256) /* see [3] and allocator_get_magazine_threshold() for this */
94 #define MIN_MAGAZINE_SIZE (4)
95 #define MAX_STAMP_COUNTER (7) /* distributes the load of gettimeofday() */
96 #define MAX_SLAB_CHUNK_SIZE(al) (((al)->max_page_size - SLAB_INFO_SIZE) / 8) /* we want at last 8 chunks per page, see [4] */
97 #define MAX_SLAB_INDEX(al) (SLAB_INDEX (al, MAX_SLAB_CHUNK_SIZE (al)) + 1)
98 #define SLAB_INDEX(al, asize) ((asize) / P2ALIGNMENT - 1) /* asize must be P2ALIGNMENT aligned */
99 #define SLAB_CHUNK_SIZE(al, ix) (((ix) + 1) * P2ALIGNMENT)
100 #define SLAB_PAGE_SIZE(al,csz) (ALIGN (8 * (csz) + SLAB_INFO_SIZE, (al)->min_page_size))
102 /* optimized version of ALIGN (size, P2ALIGNMENT) */
103 #if GLIB_SIZEOF_SIZE_T * 2 == 8 /* P2ALIGNMENT */
104 #define P2ALIGN(size) (((size) + 0x7) & ~(gsize) 0x7)
105 #elif GLIB_SIZEOF_SIZE_T * 2 == 16 /* P2ALIGNMENT */
106 #define P2ALIGN(size) (((size) + 0xf) & ~(gsize) 0xf)
108 #define P2ALIGN(size) ALIGN (size, P2ALIGNMENT)
111 /* --- structures --- */
112 typedef struct _ChunkLink ChunkLink;
113 typedef struct _SlabInfo SlabInfo;
114 typedef struct _CachedMagazine CachedMagazine;
122 SlabInfo *next, *prev;
126 gsize count; /* approximative chunks list length */
129 Magazine *magazine1; /* array of MAX_SLAB_INDEX (allocator) */
130 Magazine *magazine2; /* array of MAX_SLAB_INDEX (allocator) */
133 gboolean always_malloc;
134 gboolean bypass_magazines;
135 gboolean always_free;
136 gsize working_set_msecs;
139 /* const after initialization */
140 gsize min_page_size, max_page_size;
142 guint max_slab_chunk_size_for_magazine_cache;
144 GMutex *magazine_mutex;
145 ChunkLink **magazines; /* array of MAX_SLAB_INDEX (allocator) */
146 guint *contention_counters; /* array of MAX_SLAB_INDEX (allocator) */
152 SlabInfo **slab_stack; /* array of MAX_SLAB_INDEX (allocator) */
156 /* --- prototypes --- */
157 static gpointer slab_allocator_alloc_chunk (gsize chunk_size);
158 static void slab_allocator_free_chunk (gsize chunk_size,
160 static void private_thread_memory_cleanup (gpointer data);
161 static gpointer allocator_memalign (gsize alignment,
163 static void allocator_memfree (gsize memsize,
165 static inline void magazine_cache_update_stamp (void);
166 static inline gsize allocator_get_magazine_threshold (Allocator *allocator,
169 /* --- variables --- */
170 static GPrivate *private_thread_memory = NULL;
171 static gsize sys_page_size = 0;
172 static Allocator allocator[1] = { { 0, }, };
173 static SliceConfig slice_config = {
174 FALSE, /* always_malloc */
175 FALSE, /* bypass_magazines */
176 FALSE, /* always_free */
177 15 * 1000, /* working_set_msecs */
180 /* --- auxillary funcitons --- */
182 g_slice_set_config (GSliceConfig ckey,
185 g_return_if_fail (sys_page_size == 0);
188 case G_SLICE_CONFIG_ALWAYS_MALLOC:
189 slice_config.always_malloc = value != 0;
191 case G_SLICE_CONFIG_BYPASS_MAGAZINES:
192 slice_config.bypass_magazines = value != 0;
194 case G_SLICE_CONFIG_ALWAYS_FREE:
195 slice_config.always_free = value != 0;
197 case G_SLICE_CONFIG_WORKING_SET_MSECS:
198 slice_config.working_set_msecs = value;
205 g_slice_get_config (GSliceConfig ckey)
209 case G_SLICE_CONFIG_ALWAYS_MALLOC:
210 return slice_config.always_malloc;
211 case G_SLICE_CONFIG_BYPASS_MAGAZINES:
212 return slice_config.bypass_magazines;
213 case G_SLICE_CONFIG_ALWAYS_FREE:
214 return slice_config.always_free;
215 case G_SLICE_CONFIG_WORKING_SET_MSECS:
216 return slice_config.working_set_msecs;
217 case G_SLICE_CONFIG_CHUNK_SIZES:
218 return MAX_SLAB_INDEX (allocator);
225 g_slice_get_config_state (GSliceConfig ckey,
230 g_return_val_if_fail (n_values != NULL, NULL);
235 case G_SLICE_CONFIG_CONTENTION_COUNTER:
236 array[i++] = SLAB_CHUNK_SIZE (allocator, address);
237 array[i++] = allocator->contention_counters[address];
238 array[i++] = allocator_get_magazine_threshold (allocator, address);
240 return g_memdup (array, sizeof (array[0]) * *n_values);
247 g_slice_init_nomessage (void)
249 /* we may not use g_error() or friends here */
250 assert (sys_page_size == 0);
252 sys_page_size = sysconf (_SC_PAGESIZE); /* = sysconf (_SC_PAGE_SIZE); = getpagesize(); */
253 assert (sys_page_size >= 2 * LARGEALIGNMENT);
254 allocator->config = slice_config;
255 allocator->min_page_size = sys_page_size;
256 #if HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN
257 /* allow allocation of pages up to 8KB (with 8KB alignment).
258 * this is useful because many medium to large sized structures
259 * fit less than 8 times (see [4]) into 4KB pages.
261 allocator->min_page_size = MAX (allocator->min_page_size, 4096);
262 allocator->max_page_size = MAX (allocator->min_page_size, 8192);
264 /* we can only align to system page size */
265 allocator->max_page_size = sys_page_size;
267 allocator->magazine_mutex = NULL; /* _g_slice_thread_init_nomessage() */
268 allocator->magazines = g_new0 (ChunkLink*, MAX_SLAB_INDEX (allocator));
269 allocator->contention_counters = g_new0 (guint, MAX_SLAB_INDEX (allocator));
270 allocator->mutex_counter = 0;
271 allocator->stamp_counter = MAX_STAMP_COUNTER; /* force initial update */
272 allocator->last_stamp = 0;
273 allocator->slab_mutex = NULL; /* _g_slice_thread_init_nomessage() */
274 allocator->slab_stack = g_new0 (SlabInfo*, MAX_SLAB_INDEX (allocator));
275 allocator->color_accu = 0;
276 magazine_cache_update_stamp();
277 /* values cached for performance reasons */
278 allocator->max_slab_chunk_size_for_magazine_cache = MAX_SLAB_CHUNK_SIZE (allocator);
279 if (allocator->config.always_malloc || allocator->config.bypass_magazines)
280 allocator->max_slab_chunk_size_for_magazine_cache = 0; /* non-optimized cases */
284 allocator_categorize (gsize aligned_chunk_size)
286 /* speed up the likely path */
287 if (G_LIKELY (aligned_chunk_size && aligned_chunk_size <= allocator->max_slab_chunk_size_for_magazine_cache))
288 return 1; /* use magazine cache */
290 /* the above will fail (max_slab_chunk_size_for_magazine_cache == 0) if the
291 * allocator is still uninitialized, or if we are not configured to use the
295 g_slice_init_nomessage ();
296 if (!allocator->config.always_malloc &&
297 aligned_chunk_size &&
298 aligned_chunk_size <= MAX_SLAB_CHUNK_SIZE (allocator))
300 if (allocator->config.bypass_magazines)
301 return 2; /* use slab allocator, see [2] */
302 return 1; /* use magazine cache */
304 return 0; /* use malloc() */
308 _g_slice_thread_init_nomessage (void)
310 /* we may not use g_error() or friends here */
312 g_slice_init_nomessage();
313 private_thread_memory = g_private_new (private_thread_memory_cleanup);
314 allocator->magazine_mutex = g_mutex_new();
315 allocator->slab_mutex = g_mutex_new();
319 g_mutex_lock_a (GMutex *mutex,
320 guint *contention_counter)
322 gboolean contention = FALSE;
323 if (!g_mutex_trylock (mutex))
325 g_mutex_lock (mutex);
330 allocator->mutex_counter++;
331 if (allocator->mutex_counter >= 1) /* quickly adapt to contention */
333 allocator->mutex_counter = 0;
334 *contention_counter = MIN (*contention_counter + 1, MAX_MAGAZINE_SIZE);
337 else /* !contention */
339 allocator->mutex_counter--;
340 if (allocator->mutex_counter < -11) /* moderately recover magazine sizes */
342 allocator->mutex_counter = 0;
343 *contention_counter = MAX (*contention_counter, 1) - 1;
348 static inline ThreadMemory*
349 thread_memory_from_self (void)
351 ThreadMemory *tmem = g_private_get (private_thread_memory);
352 if (G_UNLIKELY (!tmem))
354 const guint n_magazines = MAX_SLAB_INDEX (allocator);
355 tmem = g_malloc0 (sizeof (ThreadMemory) + sizeof (Magazine) * 2 * n_magazines);
356 tmem->magazine1 = (Magazine*) (tmem + 1);
357 tmem->magazine2 = &tmem->magazine1[n_magazines];
358 g_private_set (private_thread_memory, tmem);
363 static inline ChunkLink*
364 magazine_chain_pop_head (ChunkLink **magazine_chunks)
366 /* magazine chains are linked via ChunkLink->next.
367 * each ChunkLink->data of the toplevel chain may point to a subchain,
368 * linked via ChunkLink->next. ChunkLink->data of the subchains just
369 * contains uninitialized junk.
371 ChunkLink *chunk = (*magazine_chunks)->data;
372 if (G_UNLIKELY (chunk))
374 /* allocating from freed list */
375 (*magazine_chunks)->data = chunk->next;
379 chunk = *magazine_chunks;
380 *magazine_chunks = chunk->next;
385 #if 0 /* useful for debugging */
387 magazine_count (ChunkLink *head)
394 ChunkLink *child = head->data;
396 for (child = head->data; child; child = child->next)
405 allocator_get_magazine_threshold (Allocator *allocator,
408 /* the magazine size calculated here has a lower bound of MIN_MAGAZINE_SIZE,
409 * which is required by the implementation. also, for moderately sized chunks
410 * (say >= 64 bytes), magazine sizes shouldn't be much smaller then the number
411 * of chunks available per page/2 to avoid excessive traffic in the magazine
412 * cache for small to medium sized structures.
413 * the upper bound of the magazine size is effectively provided by
414 * MAX_MAGAZINE_SIZE. for larger chunks, this number is scaled down so that
415 * the content of a single magazine doesn't exceed ca. 16KB.
417 gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
418 guint threshold = MAX (MIN_MAGAZINE_SIZE, allocator->max_page_size / MAX (5 * chunk_size, 5 * 32));
419 guint contention_counter = allocator->contention_counters[ix];
420 if (G_UNLIKELY (contention_counter)) /* single CPU bias */
422 /* adapt contention counter thresholds to chunk sizes */
423 contention_counter = contention_counter * 64 / chunk_size;
424 threshold = MAX (threshold, contention_counter);
429 /* --- magazine cache --- */
431 magazine_cache_update_stamp (void)
433 if (allocator->stamp_counter >= MAX_STAMP_COUNTER)
436 g_get_current_time (&tv);
437 allocator->last_stamp = tv.tv_sec * 1000 + tv.tv_usec / 1000; /* milli seconds */
438 allocator->stamp_counter = 0;
441 allocator->stamp_counter++;
444 static inline ChunkLink*
445 magazine_chain_prepare_fields (ChunkLink *magazine_chunks)
447 g_assert (MIN_MAGAZINE_SIZE >= 4);
448 /* ensure a magazine with at least 4 unused data pointers */
449 ChunkLink *chunk1 = magazine_chain_pop_head (&magazine_chunks);
450 ChunkLink *chunk2 = magazine_chain_pop_head (&magazine_chunks);
451 ChunkLink *chunk3 = magazine_chain_pop_head (&magazine_chunks);
452 ChunkLink *chunk4 = magazine_chain_pop_head (&magazine_chunks);
453 chunk4->next = magazine_chunks;
454 chunk3->next = chunk4;
455 chunk2->next = chunk3;
456 chunk1->next = chunk2;
460 /* access the first 3 fields of a specially prepared magazine chain */
461 #define magazine_chain_prev(mc) ((mc)->data)
462 #define magazine_chain_stamp(mc) ((mc)->next->data)
463 #define magazine_chain_next(mc) ((mc)->next->next->data)
464 #define magazine_chain_count(mc) ((mc)->next->next->next->data)
467 magazine_cache_trim (Allocator *allocator,
471 /* g_mutex_lock (allocator->mutex); done by caller */
472 /* trim magazine cache from tail */
473 ChunkLink *current = magazine_chain_prev (allocator->magazines[ix]);
474 ChunkLink *trash = NULL;
475 while (allocator->config.always_free ||
476 ABS (stamp - (guint) magazine_chain_stamp (current)) > allocator->config.working_set_msecs)
479 ChunkLink *prev = magazine_chain_prev (current);
480 ChunkLink *next = magazine_chain_next (current);
481 magazine_chain_next (prev) = next;
482 magazine_chain_prev (next) = prev;
483 /* clear special fields, put on trash stack */
484 magazine_chain_next (current) = NULL;
485 magazine_chain_count (current) = NULL;
486 magazine_chain_stamp (current) = NULL;
487 magazine_chain_prev (current) = trash;
489 /* fixup list head if required */
490 if (current == allocator->magazines[ix])
492 allocator->magazines[ix] = NULL;
497 g_mutex_unlock (allocator->magazine_mutex);
501 const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
502 g_mutex_lock (allocator->slab_mutex);
506 trash = magazine_chain_prev (current);
507 magazine_chain_prev (current) = NULL; /* clear special field */
510 ChunkLink *chunk = magazine_chain_pop_head (¤t);
511 slab_allocator_free_chunk (chunk_size, chunk);
514 g_mutex_unlock (allocator->slab_mutex);
519 magazine_cache_push_magazine (guint ix,
520 ChunkLink *magazine_chunks,
521 gsize count) /* must be >= MIN_MAGAZINE_SIZE */
523 ChunkLink *current = magazine_chain_prepare_fields (magazine_chunks);
524 ChunkLink *next, *prev;
525 g_mutex_lock (allocator->magazine_mutex);
526 /* add magazine at head */
527 next = allocator->magazines[ix];
529 prev = magazine_chain_prev (next);
531 next = prev = current;
532 magazine_chain_next (prev) = current;
533 magazine_chain_prev (next) = current;
534 magazine_chain_prev (current) = prev;
535 magazine_chain_next (current) = next;
536 magazine_chain_count (current) = (gpointer) count;
538 magazine_cache_update_stamp();
539 magazine_chain_stamp (current) = (gpointer) allocator->last_stamp;
540 allocator->magazines[ix] = current;
541 /* free old magazines beyond a certain threshold */
542 magazine_cache_trim (allocator, ix, allocator->last_stamp);
543 /* g_mutex_unlock (allocator->mutex); was done by magazine_cache_trim() */
547 magazine_cache_pop_magazine (guint ix,
550 g_mutex_lock_a (allocator->magazine_mutex, &allocator->contention_counters[ix]);
551 if (!allocator->magazines[ix])
553 guint magazine_threshold = allocator_get_magazine_threshold (allocator, ix);
554 gsize i, chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
555 ChunkLink *current = NULL;
556 g_mutex_unlock (allocator->magazine_mutex);
557 g_mutex_lock (allocator->slab_mutex);
558 for (i = 0; i < magazine_threshold; i++)
560 ChunkLink *chunk = slab_allocator_alloc_chunk (chunk_size);
562 chunk->next = current;
565 g_mutex_unlock (allocator->slab_mutex);
571 ChunkLink *current = allocator->magazines[ix];
572 ChunkLink *prev = magazine_chain_prev (current);
573 ChunkLink *next = magazine_chain_next (current);
575 magazine_chain_next (prev) = next;
576 magazine_chain_prev (next) = prev;
577 allocator->magazines[ix] = next == current ? NULL : next;
578 g_mutex_unlock (allocator->magazine_mutex);
579 /* clear special fields and hand out */
580 *countp = (gsize) magazine_chain_count (current);
581 magazine_chain_prev (current) = NULL;
582 magazine_chain_next (current) = NULL;
583 magazine_chain_count (current) = NULL;
584 magazine_chain_stamp (current) = NULL;
589 /* --- thread magazines --- */
591 private_thread_memory_cleanup (gpointer data)
593 ThreadMemory *tmem = data;
594 const guint n_magazines = MAX_SLAB_INDEX (allocator);
596 for (ix = 0; ix < n_magazines; ix++)
600 mags[0] = &tmem->magazine1[ix];
601 mags[1] = &tmem->magazine2[ix];
602 for (j = 0; j < 2; j++)
604 Magazine *mag = mags[j];
605 if (mag->count >= MIN_MAGAZINE_SIZE)
606 magazine_cache_push_magazine (ix, mag->chunks, mag->count);
609 const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
610 g_mutex_lock (allocator->slab_mutex);
613 ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
614 slab_allocator_free_chunk (chunk_size, chunk);
616 g_mutex_unlock (allocator->slab_mutex);
624 thread_memory_magazine1_reload (ThreadMemory *tmem,
627 Magazine *mag = &tmem->magazine1[ix];
628 g_assert (mag->chunks == NULL); /* ensure that we may reset mag->count */
630 mag->chunks = magazine_cache_pop_magazine (ix, &mag->count);
634 thread_memory_magazine2_unload (ThreadMemory *tmem,
637 Magazine *mag = &tmem->magazine2[ix];
638 magazine_cache_push_magazine (ix, mag->chunks, mag->count);
644 thread_memory_swap_magazines (ThreadMemory *tmem,
647 Magazine xmag = tmem->magazine1[ix];
648 tmem->magazine1[ix] = tmem->magazine2[ix];
649 tmem->magazine2[ix] = xmag;
652 static inline gboolean
653 thread_memory_magazine1_is_empty (ThreadMemory *tmem,
656 return tmem->magazine1[ix].chunks == NULL;
659 static inline gboolean
660 thread_memory_magazine2_is_full (ThreadMemory *tmem,
663 return tmem->magazine2[ix].count >= allocator_get_magazine_threshold (allocator, ix);
666 static inline gpointer
667 thread_memory_magazine1_alloc (ThreadMemory *tmem,
670 Magazine *mag = &tmem->magazine1[ix];
671 ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
672 if (G_LIKELY (mag->count > 0))
678 thread_memory_magazine2_free (ThreadMemory *tmem,
682 Magazine *mag = &tmem->magazine2[ix];
683 ChunkLink *chunk = mem;
685 chunk->next = mag->chunks;
690 /* --- API functions --- */
692 g_slice_alloc (gsize mem_size)
697 chunk_size = P2ALIGN (mem_size);
698 acat = allocator_categorize (chunk_size);
699 if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
701 ThreadMemory *tmem = thread_memory_from_self();
702 guint ix = SLAB_INDEX (allocator, chunk_size);
703 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
705 thread_memory_swap_magazines (tmem, ix);
706 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
707 thread_memory_magazine1_reload (tmem, ix);
709 mem = thread_memory_magazine1_alloc (tmem, ix);
711 else if (acat == 2) /* allocate through slab allocator */
713 g_mutex_lock (allocator->slab_mutex);
714 mem = slab_allocator_alloc_chunk (chunk_size);
715 g_mutex_unlock (allocator->slab_mutex);
717 else /* delegate to system malloc */
718 mem = g_malloc (mem_size);
723 g_slice_alloc0 (gsize mem_size)
725 gpointer mem = g_slice_alloc (mem_size);
727 memset (mem, 0, mem_size);
732 g_slice_free1 (gsize mem_size,
735 gsize chunk_size = P2ALIGN (mem_size);
736 guint acat = allocator_categorize (chunk_size);
737 if (G_UNLIKELY (!mem_block))
739 else if (G_LIKELY (acat == 1)) /* allocate through magazine layer */
741 ThreadMemory *tmem = thread_memory_from_self();
742 guint ix = SLAB_INDEX (allocator, chunk_size);
743 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
745 thread_memory_swap_magazines (tmem, ix);
746 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
747 thread_memory_magazine2_unload (tmem, ix);
749 thread_memory_magazine2_free (tmem, ix, mem_block);
751 else if (acat == 2) /* allocate through slab allocator */
753 g_mutex_lock (allocator->slab_mutex);
754 slab_allocator_free_chunk (chunk_size, mem_block);
755 g_mutex_unlock (allocator->slab_mutex);
757 else /* delegate to system malloc */
762 g_slice_free_chain (gsize mem_size,
766 GSList *slice = mem_chain;
767 g_return_if_fail (next_offset == G_STRUCT_OFFSET (GSList, next));
768 g_return_if_fail (mem_size >= sizeof (GSList));
771 GSList *current = slice;
773 g_slice_free1 (mem_size, current);
775 /* while the thread magazines and the magazine cache are implemented so that
776 * they can easily be extended to allow for free lists containing more free
777 * lists for the first level nodes, which would allow O(1) freeing in this
778 * function, the benefit of such an extension is questionable, because:
779 * - the magazine size counts will become mere lower bounds which confuses
780 * the code adapting to lock contention;
781 * - freeing a single node to the thread magazines is very fast, so this
782 * O(list_length) operation is multiplied by a fairly small factor;
783 * - memory usage histograms on larger applications seem to indicate that
784 * the amount of released multi node lists is negligible in comparison
785 * to single node releases.
789 /* --- single page allocator --- */
791 allocator_slab_stack_push (Allocator *allocator,
795 /* insert slab at slab ring head */
796 if (!allocator->slab_stack[ix])
803 SlabInfo *next = allocator->slab_stack[ix], *prev = next->prev;
809 allocator->slab_stack[ix] = sinfo;
813 allocator_add_slab (Allocator *allocator,
818 gsize padding, n_chunks, color = 0;
819 gsize page_size = SLAB_PAGE_SIZE (allocator, chunk_size);
820 /* allocate 1 page for the chunks and the slab */
821 gpointer aligned_memory = allocator_memalign (page_size, page_size - NATIVE_MALLOC_PADDING);
822 guint8 *mem = aligned_memory;
824 g_error ("%s: failed to allocate %lu bytes: %s", "GSlicedMemory", (gulong) (page_size - NATIVE_MALLOC_PADDING), g_strerror (errno));
825 /* mask page adress */
826 gsize addr = ((gsize) mem / page_size) * page_size;
827 /* assert alignment */
828 g_assert (aligned_memory == (gpointer) addr);
829 /* basic slab info setup */
830 sinfo = (SlabInfo*) (mem + page_size - SLAB_INFO_SIZE);
831 sinfo->n_allocated = 0;
832 sinfo->chunks = NULL;
833 /* figure cache colorization */
834 n_chunks = ((guint8*) sinfo - mem) / chunk_size;
835 padding = ((guint8*) sinfo - mem) - n_chunks * chunk_size;
838 color = (allocator->color_accu * P2ALIGNMENT) % padding;
839 allocator->color_accu += 1; /* alternatively: + 0x7fffffff */
841 /* add chunks to free list */
842 ChunkLink *chunk = (ChunkLink*) (mem + color);
844 sinfo->chunks = chunk;
845 for (i = 0; i < n_chunks - 1; i++)
847 chunk->next = (ChunkLink*) ((guint8*) chunk + chunk_size);
850 chunk->next = NULL; /* last chunk */
851 /* add slab to slab ring */
852 allocator_slab_stack_push (allocator, ix, sinfo);
856 slab_allocator_alloc_chunk (gsize chunk_size)
858 guint ix = SLAB_INDEX (allocator, chunk_size);
859 /* ensure non-empty slab */
860 if (!allocator->slab_stack[ix] || !allocator->slab_stack[ix]->chunks)
861 allocator_add_slab (allocator, ix, chunk_size);
863 ChunkLink *chunk = allocator->slab_stack[ix]->chunks;
864 allocator->slab_stack[ix]->chunks = chunk->next;
865 allocator->slab_stack[ix]->n_allocated++;
866 /* rotate empty slabs */
867 if (!allocator->slab_stack[ix]->chunks)
868 allocator->slab_stack[ix] = allocator->slab_stack[ix]->next;
873 slab_allocator_free_chunk (gsize chunk_size,
876 guint ix = SLAB_INDEX (allocator, chunk_size);
877 gsize page_size = SLAB_PAGE_SIZE (allocator, chunk_size);
878 gsize addr = ((gsize) mem / page_size) * page_size;
879 /* mask page adress */
880 guint8 *page = (guint8*) addr;
881 SlabInfo *sinfo = (SlabInfo*) (page + page_size - SLAB_INFO_SIZE);
882 /* assert valid chunk count */
883 g_assert (sinfo->n_allocated > 0);
884 /* add chunk to free list */
885 gboolean was_empty = sinfo->chunks == NULL;
886 ChunkLink *chunk = (ChunkLink*) mem;
887 chunk->next = sinfo->chunks;
888 sinfo->chunks = chunk;
889 sinfo->n_allocated--;
890 /* keep slab ring partially sorted, empty slabs at end */
894 SlabInfo *next = sinfo->next, *prev = sinfo->prev;
897 if (allocator->slab_stack[ix] == sinfo)
898 allocator->slab_stack[ix] = next == sinfo ? NULL : next;
899 /* insert slab at head */
900 allocator_slab_stack_push (allocator, ix, sinfo);
902 /* eagerly free complete unused slabs */
903 if (!sinfo->n_allocated)
906 SlabInfo *next = sinfo->next, *prev = sinfo->prev;
909 if (allocator->slab_stack[ix] == sinfo)
910 allocator->slab_stack[ix] = next == sinfo ? NULL : next;
912 allocator_memfree (page_size, page);
916 /* --- memalign implementation --- */
917 #include <malloc.h> /* memalign() */
920 * define HAVE_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works, <stdlib.h>
921 * define HAVE_MEMALIGN 1 // if free(memalign(3)) works, <malloc.h>
922 * define HAVE_VALLOC 1 // if free(valloc(3)) works, <stdlib.h> or <malloc.h>
923 * if none is provided, we implement malloc(3)-based alloc-only page alignment
926 #if !(HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC)
927 static GTrashStack *compat_valloc_trash = NULL;
931 allocator_memalign (gsize alignment,
934 gpointer aligned_memory = NULL;
936 #if HAVE_POSIX_MEMALIGN
937 err = posix_memalign (&aligned_memory, alignment, memsize);
940 aligned_memory = memalign (alignment, memsize);
944 aligned_memory = valloc (memsize);
947 /* simplistic non-freeing page allocator */
948 g_assert (alignment == sys_page_size);
949 g_assert (memsize <= sys_page_size);
950 if (!compat_valloc_trash)
952 const guint n_pages = 16;
953 guint8 *mem = malloc (n_pages * sys_page_size);
958 guint8 *amem = (guint8*) ALIGN ((gsize) mem, sys_page_size);
960 i--; /* mem wasn't page aligned */
962 g_trash_stack_push (&compat_valloc_trash, amem + i * sys_page_size);
965 aligned_memory = g_trash_stack_pop (&compat_valloc_trash);
969 return aligned_memory;
973 allocator_memfree (gsize memsize,
976 #if HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC
979 g_assert (memsize <= sys_page_size);
980 g_trash_stack_push (&compat_valloc_trash, mem);
984 #define __G_SLICE_C__
985 #include "galiasdef.c"