diff mbox series

[3/4] io_uring: impose max limit on apoll cache

Message ID 20220707232345.54424-4-axboe@kernel.dk (mailing list archive)
State New
Headers show
Series [1/4] io_uring: move apoll cache to poll.c | expand

Commit Message

Jens Axboe July 7, 2022, 11:23 p.m. UTC
Caches like this tend to grow to the peak size, and then never get any
smaller. Impose a max limit on the size, to prevent it from growing too
big.

A somewhat randomly chosen 512 is the max size we'll allow the cache
to get. If a batch of frees come in and would bring it over that, we
simply start kfree'ing the surplus.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 include/linux/io_uring_types.h |  1 +
 io_uring/alloc_cache.h         | 15 +++++++++++++++
 io_uring/io_uring.c            |  8 ++++++--
 io_uring/poll.c                |  2 ++
 4 files changed, 24 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index b548da03b563..bf8f95332eda 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -160,6 +160,7 @@  struct io_ev_fd {
 
 struct io_alloc_cache {
 	struct hlist_head	list;
+	unsigned int		nr_cached;
 };
 
 struct io_ring_ctx {
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index 49ac6ae237ef..0e64030f1ae0 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -1,4 +1,19 @@ 
+/*
+ * Don't allow the cache to grow beyond this size.
+ */
+#define IO_ALLOC_CACHE_MAX	512
+
+static inline bool io_alloc_cache_store(struct io_alloc_cache *cache)
+{
+	if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
+		cache->nr_cached++;
+		return true;
+	}
+	return false;
+}
+
 static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
 {
 	INIT_HLIST_HEAD(&cache->list);
+	cache->nr_cached = 0;
 }
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 3b9033c401bf..b5098773d924 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1181,8 +1181,12 @@  void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
 
 				if (apoll->double_poll)
 					kfree(apoll->double_poll);
-				hlist_add_head(&apoll->cache_list,
-						&ctx->apoll_cache.list);
+				if (io_alloc_cache_store(&ctx->apoll_cache)) {
+					hlist_add_head(&apoll->cache_list,
+							&ctx->apoll_cache.list);
+				} else {
+					kfree(apoll);
+				}
 				req->flags &= ~REQ_F_POLLED;
 			}
 			if (req->flags & IO_REQ_LINK_FLAGS)
diff --git a/io_uring/poll.c b/io_uring/poll.c
index f3aae3cc6501..cc49160975cb 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -593,6 +593,7 @@  static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
 		apoll = hlist_entry(ctx->apoll_cache.list.first,
 						struct async_poll, cache_list);
 		hlist_del(&apoll->cache_list);
+		ctx->apoll_cache.nr_cached--;
 	} else {
 		apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
 		if (unlikely(!apoll))
@@ -969,4 +970,5 @@  void io_flush_apoll_cache(struct io_ring_ctx *ctx)
 		hlist_del(&apoll->cache_list);
 		kfree(apoll);
 	}
+	ctx->apoll_cache.nr_cached = 0;
 }