1 #ifndef IOU_ALLOC_CACHE_H
2 #define IOU_ALLOC_CACHE_H
5 * Don't allow the cache to grow beyond this size.
7 #define IO_ALLOC_CACHE_MAX 512
9 struct io_cache_entry {
10 struct io_wq_work_node node;
13 static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
14 struct io_cache_entry *entry)
16 if (cache->nr_cached < cache->max_cached) {
18 wq_stack_add_head(&entry->node, &cache->list);
19 /* KASAN poisons object */
20 kasan_slab_free_mempool(entry);
26 static inline bool io_alloc_cache_empty(struct io_alloc_cache *cache)
28 return !cache->list.next;
31 static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
33 if (cache->list.next) {
34 struct io_cache_entry *entry;
36 entry = container_of(cache->list.next, struct io_cache_entry, node);
37 kasan_unpoison_range(entry, cache->elem_size);
38 cache->list.next = cache->list.next->next;
46 static inline void io_alloc_cache_init(struct io_alloc_cache *cache,
47 unsigned max_nr, size_t size)
49 cache->list.next = NULL;
51 cache->max_cached = max_nr;
52 cache->elem_size = size;
55 static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
56 void (*free)(struct io_cache_entry *))
59 struct io_cache_entry *entry = io_alloc_cache_get(cache);