1 #ifndef IOU_ALLOC_CACHE_H
2 #define IOU_ALLOC_CACHE_H
5 * Don't allow the cache to grow beyond this size.
7 #define IO_ALLOC_CACHE_MAX 512
9 struct io_cache_entry {
10 struct io_wq_work_node node;
13 static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
14 struct io_cache_entry *entry)
16 if (cache->nr_cached < cache->max_cached) {
18 wq_stack_add_head(&entry->node, &cache->list);
19 /* KASAN poisons object */
20 kasan_slab_free_mempool(entry);
26 static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
28 if (cache->list.next) {
29 struct io_cache_entry *entry;
31 entry = container_of(cache->list.next, struct io_cache_entry, node);
32 kasan_unpoison_range(entry, cache->elem_size);
33 cache->list.next = cache->list.next->next;
41 static inline void io_alloc_cache_init(struct io_alloc_cache *cache,
42 unsigned max_nr, size_t size)
44 cache->list.next = NULL;
46 cache->max_cached = max_nr;
47 cache->elem_size = size;
50 static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
51 void (*free)(struct io_cache_entry *))
54 struct io_cache_entry *entry = io_alloc_cache_get(cache);