Caches like this tend to grow to the peak size, and then never get any
smaller. Impose a max limit on the size, to prevent it from growing too
big.
A somewhat randomly chosen 512 is the max size we'll allow the cache
to get. If a batch of frees come in and would bring it over that, we
simply start kfree'ing the surplus.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
struct io_alloc_cache {
struct hlist_head list;
struct io_alloc_cache {
struct hlist_head list;
+ unsigned int nr_cached;
#ifndef IOU_ALLOC_CACHE_H
#define IOU_ALLOC_CACHE_H
#ifndef IOU_ALLOC_CACHE_H
#define IOU_ALLOC_CACHE_H
+/*
+ * Don't allow the cache to grow beyond this size.
+ */
+#define IO_ALLOC_CACHE_MAX 512
+
struct io_cache_entry {
struct hlist_node node;
};
struct io_cache_entry {
struct hlist_node node;
};
-static inline void io_alloc_cache_put(struct io_alloc_cache *cache,
+static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
struct io_cache_entry *entry)
{
struct io_cache_entry *entry)
{
- hlist_add_head(&entry->node, &cache->list);
+ if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
+ cache->nr_cached++;
+ hlist_add_head(&entry->node, &cache->list);
+ return true;
+ }
+ return false;
}
static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
}
static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
{
INIT_HLIST_HEAD(&cache->list);
static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
{
INIT_HLIST_HEAD(&cache->list);
}
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
}
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
hlist_del(node);
free(container_of(node, struct io_cache_entry, node));
}
hlist_del(node);
free(container_of(node, struct io_cache_entry, node));
}
if (apoll->double_poll)
kfree(apoll->double_poll);
if (apoll->double_poll)
kfree(apoll->double_poll);
- io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache);
+ if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache))
+ kfree(apoll);
req->flags &= ~REQ_F_POLLED;
}
if (req->flags & IO_REQ_LINK_FLAGS)
req->flags &= ~REQ_F_POLLED;
}
if (req->flags & IO_REQ_LINK_FLAGS)