1 // SPDX-License-Identifier: GPL-2.0
5 * memory buffer pool support. Such pools are mostly used
6 * for guaranteed, deadlock-free memory allocations during
9 * started by Ingo Molnar, Copyright (C) 2001
10 * debugging by David Rientjes, Copyright (C) 2015
14 #include <linux/slab.h>
15 #include <linux/highmem.h>
16 #include <linux/kasan.h>
17 #include <linux/kmemleak.h>
18 #include <linux/export.h>
19 #include <linux/mempool.h>
20 #include <linux/writeback.h>
23 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
24 static void poison_error(mempool_t *pool, void *element, size_t size,
27 const int nr = pool->curr_nr;
28 const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
29 const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
32 pr_err("BUG: mempool element poison mismatch\n");
33 pr_err("Mempool %p size %zu\n", pool, size);
34 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
35 for (i = start; i < end; i++)
36 pr_cont("%x ", *(u8 *)(element + i));
37 pr_cont("%s\n", end < size ? "..." : "");
41 static void __check_element(mempool_t *pool, void *element, size_t size)
46 for (i = 0; i < size; i++) {
47 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
50 poison_error(pool, element, size, i);
54 memset(obj, POISON_INUSE, size);
57 static void check_element(mempool_t *pool, void *element)
59 /* Mempools backed by slab allocator */
60 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
61 __check_element(pool, element, ksize(element));
62 } else if (pool->free == mempool_free_pages) {
63 /* Mempools backed by page allocator */
64 int order = (int)(long)pool->pool_data;
65 void *addr = kmap_atomic((struct page *)element);
67 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
72 static void __poison_element(void *element, size_t size)
76 memset(obj, POISON_FREE, size - 1);
77 obj[size - 1] = POISON_END;
80 static void poison_element(mempool_t *pool, void *element)
82 /* Mempools backed by slab allocator */
83 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
84 __poison_element(element, ksize(element));
85 } else if (pool->alloc == mempool_alloc_pages) {
86 /* Mempools backed by page allocator */
87 int order = (int)(long)pool->pool_data;
88 void *addr = kmap_atomic((struct page *)element);
90 __poison_element(addr, 1UL << (PAGE_SHIFT + order));
94 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
95 static inline void check_element(mempool_t *pool, void *element)
98 static inline void poison_element(mempool_t *pool, void *element)
101 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
103 static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
105 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
106 kasan_slab_free_mempool(element);
107 else if (pool->alloc == mempool_alloc_pages)
108 kasan_poison_pages(element, (unsigned long)pool->pool_data,
112 static void kasan_unpoison_element(mempool_t *pool, void *element)
114 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
115 kasan_unpoison_range(element, __ksize(element));
116 else if (pool->alloc == mempool_alloc_pages)
117 kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
121 static __always_inline void add_element(mempool_t *pool, void *element)
123 BUG_ON(pool->curr_nr >= pool->min_nr);
124 poison_element(pool, element);
125 kasan_poison_element(pool, element);
126 pool->elements[pool->curr_nr++] = element;
129 static void *remove_element(mempool_t *pool)
131 void *element = pool->elements[--pool->curr_nr];
133 BUG_ON(pool->curr_nr < 0);
134 kasan_unpoison_element(pool, element);
135 check_element(pool, element);
140 * mempool_exit - exit a mempool initialized with mempool_init()
141 * @pool: pointer to the memory pool which was initialized with
144 * Free all reserved elements in @pool and @pool itself. This function
145 * only sleeps if the free_fn() function sleeps.
147 * May be called on a zeroed but uninitialized mempool (i.e. allocated with
150 void mempool_exit(mempool_t *pool)
152 while (pool->curr_nr) {
153 void *element = remove_element(pool);
154 pool->free(element, pool->pool_data);
156 kfree(pool->elements);
157 pool->elements = NULL;
159 EXPORT_SYMBOL(mempool_exit);
162 * mempool_destroy - deallocate a memory pool
163 * @pool: pointer to the memory pool which was allocated via
166 * Free all reserved elements in @pool and @pool itself. This function
167 * only sleeps if the free_fn() function sleeps.
169 void mempool_destroy(mempool_t *pool)
177 EXPORT_SYMBOL(mempool_destroy);
179 int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
180 mempool_free_t *free_fn, void *pool_data,
181 gfp_t gfp_mask, int node_id)
183 spin_lock_init(&pool->lock);
184 pool->min_nr = min_nr;
185 pool->pool_data = pool_data;
186 pool->alloc = alloc_fn;
187 pool->free = free_fn;
188 init_waitqueue_head(&pool->wait);
190 pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
196 * First pre-allocate the guaranteed number of buffers.
198 while (pool->curr_nr < pool->min_nr) {
201 element = pool->alloc(gfp_mask, pool->pool_data);
202 if (unlikely(!element)) {
206 add_element(pool, element);
211 EXPORT_SYMBOL(mempool_init_node);
214 * mempool_init - initialize a memory pool
215 * @pool: pointer to the memory pool that should be initialized
216 * @min_nr: the minimum number of elements guaranteed to be
217 * allocated for this pool.
218 * @alloc_fn: user-defined element-allocation function.
219 * @free_fn: user-defined element-freeing function.
220 * @pool_data: optional private data available to the user-defined functions.
222 * Like mempool_create(), but initializes the pool in (i.e. embedded in another
225 * Return: %0 on success, negative error code otherwise.
227 int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
228 mempool_free_t *free_fn, void *pool_data)
230 return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
231 pool_data, GFP_KERNEL, NUMA_NO_NODE);
234 EXPORT_SYMBOL(mempool_init);
237 * mempool_create - create a memory pool
238 * @min_nr: the minimum number of elements guaranteed to be
239 * allocated for this pool.
240 * @alloc_fn: user-defined element-allocation function.
241 * @free_fn: user-defined element-freeing function.
242 * @pool_data: optional private data available to the user-defined functions.
244 * this function creates and allocates a guaranteed size, preallocated
245 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
246 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
247 * functions might sleep - as long as the mempool_alloc() function is not called
250 * Return: pointer to the created memory pool object or %NULL on error.
252 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
253 mempool_free_t *free_fn, void *pool_data)
255 return mempool_create_node(min_nr, alloc_fn, free_fn, pool_data,
256 GFP_KERNEL, NUMA_NO_NODE);
258 EXPORT_SYMBOL(mempool_create);
260 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
261 mempool_free_t *free_fn, void *pool_data,
262 gfp_t gfp_mask, int node_id)
266 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
270 if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
271 gfp_mask, node_id)) {
278 EXPORT_SYMBOL(mempool_create_node);
281 * mempool_resize - resize an existing memory pool
282 * @pool: pointer to the memory pool which was allocated via
284 * @new_min_nr: the new minimum number of elements guaranteed to be
285 * allocated for this pool.
287 * This function shrinks/grows the pool. In the case of growing,
288 * it cannot be guaranteed that the pool will be grown to the new
289 * size immediately, but new mempool_free() calls will refill it.
290 * This function may sleep.
292 * Note, the caller must guarantee that no mempool_destroy is called
293 * while this function is running. mempool_alloc() & mempool_free()
294 * might be called (eg. from IRQ contexts) while this function executes.
296 * Return: %0 on success, negative error code otherwise.
298 int mempool_resize(mempool_t *pool, int new_min_nr)
304 BUG_ON(new_min_nr <= 0);
307 spin_lock_irqsave(&pool->lock, flags);
308 if (new_min_nr <= pool->min_nr) {
309 while (new_min_nr < pool->curr_nr) {
310 element = remove_element(pool);
311 spin_unlock_irqrestore(&pool->lock, flags);
312 pool->free(element, pool->pool_data);
313 spin_lock_irqsave(&pool->lock, flags);
315 pool->min_nr = new_min_nr;
318 spin_unlock_irqrestore(&pool->lock, flags);
321 new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
326 spin_lock_irqsave(&pool->lock, flags);
327 if (unlikely(new_min_nr <= pool->min_nr)) {
328 /* Raced, other resize will do our work */
329 spin_unlock_irqrestore(&pool->lock, flags);
333 memcpy(new_elements, pool->elements,
334 pool->curr_nr * sizeof(*new_elements));
335 kfree(pool->elements);
336 pool->elements = new_elements;
337 pool->min_nr = new_min_nr;
339 while (pool->curr_nr < pool->min_nr) {
340 spin_unlock_irqrestore(&pool->lock, flags);
341 element = pool->alloc(GFP_KERNEL, pool->pool_data);
344 spin_lock_irqsave(&pool->lock, flags);
345 if (pool->curr_nr < pool->min_nr) {
346 add_element(pool, element);
348 spin_unlock_irqrestore(&pool->lock, flags);
349 pool->free(element, pool->pool_data); /* Raced */
354 spin_unlock_irqrestore(&pool->lock, flags);
358 EXPORT_SYMBOL(mempool_resize);
361 * mempool_alloc - allocate an element from a specific memory pool
362 * @pool: pointer to the memory pool which was allocated via
364 * @gfp_mask: the usual allocation bitmask.
366 * this function only sleeps if the alloc_fn() function sleeps or
367 * returns NULL. Note that due to preallocation, this function
368 * *never* fails when called from process contexts. (it might
369 * fail if called from an IRQ context.)
370 * Note: using __GFP_ZERO is not supported.
372 * Return: pointer to the allocated element or %NULL on error.
374 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
378 wait_queue_entry_t wait;
381 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
382 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
384 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
385 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
386 gfp_mask |= __GFP_NOWARN; /* failures are OK */
388 gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
392 element = pool->alloc(gfp_temp, pool->pool_data);
393 if (likely(element != NULL))
396 spin_lock_irqsave(&pool->lock, flags);
397 if (likely(pool->curr_nr)) {
398 element = remove_element(pool);
399 spin_unlock_irqrestore(&pool->lock, flags);
400 /* paired with rmb in mempool_free(), read comment there */
403 * Update the allocation stack trace as this is more useful
406 kmemleak_update_trace(element);
411 * We use gfp mask w/o direct reclaim or IO for the first round. If
412 * alloc failed with that and @pool was empty, retry immediately.
414 if (gfp_temp != gfp_mask) {
415 spin_unlock_irqrestore(&pool->lock, flags);
420 /* We must not sleep if !__GFP_DIRECT_RECLAIM */
421 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
422 spin_unlock_irqrestore(&pool->lock, flags);
426 /* Let's wait for someone else to return an element to @pool */
428 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
430 spin_unlock_irqrestore(&pool->lock, flags);
433 * FIXME: this should be io_schedule(). The timeout is there as a
434 * workaround for some DM problems in 2.6.18.
436 io_schedule_timeout(5*HZ);
438 finish_wait(&pool->wait, &wait);
441 EXPORT_SYMBOL(mempool_alloc);
444 * mempool_free - return an element to the pool.
445 * @element: pool element pointer.
446 * @pool: pointer to the memory pool which was allocated via
449 * this function only sleeps if the free_fn() function sleeps.
451 void mempool_free(void *element, mempool_t *pool)
455 if (unlikely(element == NULL))
459 * Paired with the wmb in mempool_alloc(). The preceding read is
460 * for @element and the following @pool->curr_nr. This ensures
461 * that the visible value of @pool->curr_nr is from after the
462 * allocation of @element. This is necessary for fringe cases
463 * where @element was passed to this task without going through
466 * For example, assume @p is %NULL at the beginning and one task
467 * performs "p = mempool_alloc(...);" while another task is doing
468 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
469 * may end up using curr_nr value which is from before allocation
470 * of @p without the following rmb.
475 * For correctness, we need a test which is guaranteed to trigger
476 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
477 * without locking achieves that and refilling as soon as possible
480 * Because curr_nr visible here is always a value after the
481 * allocation of @element, any task which decremented curr_nr below
482 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
483 * incremented to min_nr afterwards. If curr_nr gets incremented
484 * to min_nr after the allocation of @element, the elements
485 * allocated after that are subject to the same guarantee.
487 * Waiters happen iff curr_nr is 0 and the above guarantee also
488 * ensures that there will be frees which return elements to the
489 * pool waking up the waiters.
491 if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
492 spin_lock_irqsave(&pool->lock, flags);
493 if (likely(pool->curr_nr < pool->min_nr)) {
494 add_element(pool, element);
495 spin_unlock_irqrestore(&pool->lock, flags);
496 wake_up(&pool->wait);
499 spin_unlock_irqrestore(&pool->lock, flags);
501 pool->free(element, pool->pool_data);
503 EXPORT_SYMBOL(mempool_free);
506 * A commonly used alloc and free fn.
508 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
510 struct kmem_cache *mem = pool_data;
511 VM_BUG_ON(mem->ctor);
512 return kmem_cache_alloc(mem, gfp_mask);
514 EXPORT_SYMBOL(mempool_alloc_slab);
516 void mempool_free_slab(void *element, void *pool_data)
518 struct kmem_cache *mem = pool_data;
519 kmem_cache_free(mem, element);
521 EXPORT_SYMBOL(mempool_free_slab);
524 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
525 * specified by pool_data
527 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
529 size_t size = (size_t)pool_data;
530 return kmalloc(size, gfp_mask);
532 EXPORT_SYMBOL(mempool_kmalloc);
534 void mempool_kfree(void *element, void *pool_data)
538 EXPORT_SYMBOL(mempool_kfree);
541 * A simple mempool-backed page allocator that allocates pages
542 * of the order specified by pool_data.
544 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
546 int order = (int)(long)pool_data;
547 return alloc_pages(gfp_mask, order);
549 EXPORT_SYMBOL(mempool_alloc_pages);
551 void mempool_free_pages(void *element, void *pool_data)
553 int order = (int)(long)pool_data;
554 __free_pages(element, order);
556 EXPORT_SYMBOL(mempool_free_pages);