@@ -160,6 +160,7 @@ struct io_ev_fd {
struct io_alloc_cache {
struct hlist_head list;
+ unsigned int nr_cached;
};
struct io_ring_ctx {
@@ -1,4 +1,19 @@
+/*
+ * Don't allow the cache to grow beyond this size.
+ */
+#define IO_ALLOC_CACHE_MAX 512
+
+static inline bool io_alloc_cache_store(struct io_alloc_cache *cache)
+{
+ if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
+ cache->nr_cached++;
+ return true;
+ }
+ return false;
+}
+
static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
{
INIT_HLIST_HEAD(&cache->list);
+ cache->nr_cached = 0;
}
@@ -1181,8 +1181,12 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
if (apoll->double_poll)
kfree(apoll->double_poll);
- hlist_add_head(&apoll->cache_list,
- &ctx->apoll_cache.list);
+ if (io_alloc_cache_store(&ctx->apoll_cache)) {
+ hlist_add_head(&apoll->cache_list,
+ &ctx->apoll_cache.list);
+ } else {
+ kfree(apoll);
+ }
req->flags &= ~REQ_F_POLLED;
}
if (req->flags & IO_REQ_LINK_FLAGS)
@@ -593,6 +593,7 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
apoll = hlist_entry(ctx->apoll_cache.list.first,
struct async_poll, cache_list);
hlist_del(&apoll->cache_list);
+ ctx->apoll_cache.nr_cached--;
} else {
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll))
@@ -969,4 +970,5 @@ void io_flush_apoll_cache(struct io_ring_ctx *ctx)
hlist_del(&apoll->cache_list);
kfree(apoll);
}
+ ctx->apoll_cache.nr_cached = 0;
}
Caches like this tend to grow to the peak size, and then never get any smaller. Impose a max limit on the size, to prevent it from growing too big. A somewhat randomly chosen 512 is the max size we'll allow the cache to get. If a batch of frees come in and would bring it over that, we simply start kfree'ing the surplus. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- include/linux/io_uring_types.h | 1 + io_uring/alloc_cache.h | 15 +++++++++++++++ io_uring/io_uring.c | 8 ++++++-- io_uring/poll.c | 2 ++ 4 files changed, 24 insertions(+), 2 deletions(-)