diff mbox series

[3/3] fuse: Use hash table to link processing request

Message ID 153666073461.19117.1958730317836145457.stgit@localhost.localdomain (mailing list archive)
State New, archived
Headers show
Series fuse: Solve request_find() bottleneck | expand

Commit Message

Kirill Tkhai Sept. 11, 2018, 10:12 a.m. UTC
We noticed the performance bottle neck in FUSE running our
Virtuozzo storage over rdma. On some types of workload
we observe 20% of times pent in request_find() in profiler.
This function is iterating over long requests list, and it
scales bad.

The patch introduces hash table to reduce the number
of iterations, we do in this function. Hash generating
algorithm is taken from hash_add() function, while
512 lines table is used to store pending requests.
This fixes problem and improves the performance.

Reported-by: Alexey Kuznetsov <kuznet@virtuozzo.com>
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 fs/fuse/dev.c    |   29 +++++++++++++++++++++++++----
 fs/fuse/fuse_i.h |    8 +++++---
 fs/fuse/inode.c  |    5 ++++-
 3 files changed, 34 insertions(+), 8 deletions(-)

Comments

Miklos Szeredi Sept. 25, 2018, 9:08 a.m. UTC | #1
On Tue, Sep 11, 2018 at 12:12 PM, Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
> We noticed the performance bottle neck in FUSE running our
> Virtuozzo storage over rdma. On some types of workload
> we observe 20% of times pent in request_find() in profiler.
> This function is iterating over long requests list, and it
> scales bad.
>
> The patch introduces hash table to reduce the number
> of iterations, we do in this function. Hash generating
> algorithm is taken from hash_add() function, while
> 512 lines table is used to store pending requests.
> This fixes problem and improves the performance.

Pushed to fuse.git#for-next with a number of small changes.   E.g. I
reduced the number of cachlines to 256 to make the hashtable size just
4k.   Was there a scientific reason for choosing 512 as the optimal
number of cache lines?

Thanks,
Miklos
Kirill Tkhai Sept. 25, 2018, 9:35 a.m. UTC | #2
On 25.09.2018 12:08, Miklos Szeredi wrote:
> On Tue, Sep 11, 2018 at 12:12 PM, Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
>> We noticed the performance bottle neck in FUSE running our
>> Virtuozzo storage over rdma. On some types of workload
>> we observe 20% of times pent in request_find() in profiler.
>> This function is iterating over long requests list, and it
>> scales bad.
>>
>> The patch introduces hash table to reduce the number
>> of iterations, we do in this function. Hash generating
>> algorithm is taken from hash_add() function, while
>> 512 lines table is used to store pending requests.
>> This fixes problem and improves the performance.
> 
> Pushed to fuse.git#for-next with a number of small changes.   E.g. I

Thanks!

> reduced the number of cachlines to 256 to make the hashtable size just
> 4k.   Was there a scientific reason for choosing 512 as the optimal
> number of cache lines?

I just tried to choose a size, which is not small for all of potential
users. But, it looks like 256 should be also enough. 
So, there was no hidden mathematics...

Kirill
diff mbox series

Patch

diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index dda177b57ea2..867825cd04fa 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -327,6 +327,20 @@  static u64 fuse_get_unique(struct fuse_iqueue *fiq)
 	return fiq->reqctr;
 }
 
+static unsigned int __fuse_req_hash(u64 unique)
+{
+	unique &= ~FUSE_INT_REQ_BIT;
+
+	/* Borrowed from hash_add() */
+	return hash_min(unique,
+			HASH_BITS(((struct fuse_pqueue *)0)->processing));
+}
+
+static unsigned int fuse_req_hash(struct fuse_req *req)
+{
+	return __fuse_req_hash(req->in.h.unique);
+}
+
 static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
 {
 	req->in.h.len = sizeof(struct fuse_in_header) +
@@ -1314,7 +1328,7 @@  static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
 		err = reqsize;
 		goto out_end;
 	}
-	list_move_tail(&req->list, &fpq->processing);
+	list_move_tail(&req->list, &fpq->processing[fuse_req_hash(req)]);
 	spin_unlock(&fpq->lock);
 	set_bit(FR_SENT, &req->flags);
 	/* matches barrier in request_wait_answer() */
@@ -1797,10 +1811,12 @@  static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
 {
 	struct fuse_req *req;
+	unsigned int hash;
 
 	unique &= ~FUSE_INT_REQ_BIT;
+	hash = __fuse_req_hash(unique);
 
-	list_for_each_entry(req, &fpq->processing, list) {
+	list_for_each_entry(req, &fpq->processing[hash], list) {
 		if (req->in.h.unique == unique)
 			return req;
 	}
@@ -2108,6 +2124,7 @@  void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
 		struct fuse_dev *fud;
 		struct fuse_req *req, *next;
 		LIST_HEAD(to_end);
+		int i;
 
 		fc->connected = 0;
 		fc->blocked = 0;
@@ -2129,7 +2146,9 @@  void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
 				}
 				spin_unlock(&req->waitq.lock);
 			}
-			list_splice_tail_init(&fpq->processing, &to_end);
+			for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+				list_splice_tail_init(&fpq->processing[i],
+						      &to_end);
 			spin_unlock(&fpq->lock);
 		}
 		fc->max_background = UINT_MAX;
@@ -2169,10 +2188,12 @@  int fuse_dev_release(struct inode *inode, struct file *file)
 		struct fuse_conn *fc = fud->fc;
 		struct fuse_pqueue *fpq = &fud->pq;
 		LIST_HEAD(to_end);
+		int i;
 
 		spin_lock(&fpq->lock);
 		WARN_ON(!list_empty(&fpq->io));
-		list_splice_init(&fpq->processing, &to_end);
+		for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+			list_splice_init(&fpq->processing[i], &to_end);
 		spin_unlock(&fpq->lock);
 
 		end_requests(fc, &to_end);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index f72e4974b3bb..ed69e1530216 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -408,6 +408,8 @@  struct fuse_iqueue {
 	struct fasync_struct *fasync;
 };
 
+#define FUSE_PQ_HASH_SIZE 512
+
 struct fuse_pqueue {
 	/** Connection established */
 	unsigned connected;
@@ -415,11 +417,11 @@  struct fuse_pqueue {
 	/** Lock protecting accessess to  members of this structure */
 	spinlock_t lock;
 
-	/** The list of requests being processed */
-	struct list_head processing;
-
 	/** The list of requests under I/O */
 	struct list_head io;
+
+	/** The lists of requests being processed */
+	struct list_head processing[FUSE_PQ_HASH_SIZE];
 };
 
 /**
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index db9e60b7eb69..b28412e75c18 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -594,9 +594,12 @@  static void fuse_iqueue_init(struct fuse_iqueue *fiq)
 
 static void fuse_pqueue_init(struct fuse_pqueue *fpq)
 {
+	int i;
+
 	memset(fpq, 0, sizeof(struct fuse_pqueue));
 	spin_lock_init(&fpq->lock);
-	INIT_LIST_HEAD(&fpq->processing);
+	for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+		INIT_LIST_HEAD(&fpq->processing[i]);
 	INIT_LIST_HEAD(&fpq->io);
 	fpq->connected = 1;
 }