diff mbox series

RDMA/srpt: Make slab cache names unique

Message ID 20241004173730.1932859-1-bvanassche@acm.org (mailing list archive)
State Superseded
Headers show
Series RDMA/srpt: Make slab cache names unique | expand

Commit Message

Bart Van Assche Oct. 4, 2024, 5:37 p.m. UTC
Since commit 4c39529663b9 ("slab: Warn on duplicate cache names when
DEBUG_VM=y"), slab complains about duplicate cache names. Hence this
patch that makes cache names unique.

Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Closes: https://lore.kernel.org/linux-block/xpe6bea7rakpyoyfvspvin2dsozjmjtjktpph7rep3h25tv7fb@ooz4cu5z6bq6/
Fixes: 5dabcd0456d7 ("RDMA/srpt: Add support for immediate data")
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 drivers/infiniband/ulp/srpt/ib_srpt.c | 32 ++++++++++++++++++++++-----
 drivers/infiniband/ulp/srpt/ib_srpt.h |  6 +++++
 2 files changed, 32 insertions(+), 6 deletions(-)

Comments

Shinichiro Kawasaki Oct. 7, 2024, 2:44 a.m. UTC | #1
On Oct 04, 2024 / 10:37, Bart Van Assche wrote:
> Since commit 4c39529663b9 ("slab: Warn on duplicate cache names when
> DEBUG_VM=y"), slab complains about duplicate cache names. Hence this
> patch that makes cache names unique.
> 
> Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
> Closes: https://lore.kernel.org/linux-block/xpe6bea7rakpyoyfvspvin2dsozjmjtjktpph7rep3h25tv7fb@ooz4cu5z6bq6/
> Fixes: 5dabcd0456d7 ("RDMA/srpt: Add support for immediate data")
> Signed-off-by: Bart Van Assche <bvanassche@acm.org>

Thank you Bart. I confirmed that this patch avoids the failures that I reported
at the link of the Closes tag. I also ran whole blktests and observed no
regression. Looks good from testing point of view :)

Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Zhu Yanjun Oct. 7, 2024, 9:51 a.m. UTC | #2
在 2024/10/5 1:37, Bart Van Assche 写道:
> Since commit 4c39529663b9 ("slab: Warn on duplicate cache names when
> DEBUG_VM=y"), slab complains about duplicate cache names. Hence this
> patch that makes cache names unique.
> 
> Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
> Closes: https://lore.kernel.org/linux-block/xpe6bea7rakpyoyfvspvin2dsozjmjtjktpph7rep3h25tv7fb@ooz4cu5z6bq6/
> Fixes: 5dabcd0456d7 ("RDMA/srpt: Add support for immediate data")
> Signed-off-by: Bart Van Assche <bvanassche@acm.org>
> ---
>   drivers/infiniband/ulp/srpt/ib_srpt.c | 32 ++++++++++++++++++++++-----
>   drivers/infiniband/ulp/srpt/ib_srpt.h |  6 +++++
>   2 files changed, 32 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
> index 9632afbd727b..4cb462074f00 100644
> --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
> +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
> @@ -41,6 +41,7 @@
>   #include <linux/string.h>
>   #include <linux/delay.h>
>   #include <linux/atomic.h>
> +#include <linux/idr.h>
>   #include <linux/inet.h>
>   #include <rdma/ib_cache.h>
>   #include <scsi/scsi_proto.h>
> @@ -68,6 +69,7 @@ MODULE_LICENSE("Dual BSD/GPL");
>   static u64 srpt_service_guid;
>   static DEFINE_SPINLOCK(srpt_dev_lock);	/* Protects srpt_dev_list. */
>   static LIST_HEAD(srpt_dev_list);	/* List of srpt_device structures. */
> +static DEFINE_IDA(cache_ida);
>   
>   static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
>   module_param(srp_max_req_size, int, 0444);
> @@ -2120,12 +2122,14 @@ static void srpt_release_channel_work(struct work_struct *w)
>   			     ch->rsp_buf_cache, DMA_TO_DEVICE);
>   
>   	kmem_cache_destroy(ch->rsp_buf_cache);
> +	ida_free(&cache_ida, ch->rsp_buf_cache_idx);
>   
>   	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
>   			     sdev, ch->rq_size,
>   			     ch->req_buf_cache, DMA_FROM_DEVICE);
>   
>   	kmem_cache_destroy(ch->req_buf_cache);
> +	ida_free(&cache_ida, ch->req_buf_cache_idx);
>   
>   	kref_put(&ch->kref, srpt_free_ch);
>   }
> @@ -2164,6 +2168,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
>   	u32 it_iu_len;
>   	int i, tag_num, tag_size, ret;
>   	struct srpt_tpg *stpg;
> +	char cache_name[32];

The local variable cache_name is not zeroed.

>   
>   	WARN_ON_ONCE(irqs_disabled());
>   
> @@ -2245,8 +2250,11 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
>   	INIT_LIST_HEAD(&ch->cmd_wait_list);
>   	ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
>   
> -	ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
> -					      512, 0, NULL);
> +	ch->rsp_buf_cache_idx = ida_alloc(&cache_ida, GFP_KERNEL);
> +	snprintf(cache_name, sizeof(cache_name), "srpt-rsp-buf-%u",
> +		 ch->rsp_buf_cache_idx);

IIRC, snprintf will append a '\0' to the string "cache_name". So this 
string "cache_name" will be used correctly even though this string 
"cache_name" is not zeroed before it is used.

> +	ch->rsp_buf_cache =
> +		kmem_cache_create(cache_name, ch->max_rsp_size, 512, 0, NULL);
>   	if (!ch->rsp_buf_cache)
>   		goto free_ch;
>   
> @@ -2280,8 +2288,11 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
>   		alignment_offset = round_up(imm_data_offset, 512) -
>   			imm_data_offset;
>   		req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
> -		ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
> -						      512, 0, NULL);
> +		ch->req_buf_cache_idx = ida_alloc(&cache_ida, GFP_KERNEL);
> +		snprintf(cache_name, sizeof(cache_name), "srpt-req-buf-%u",
> +			 ch->req_buf_cache_idx);

Ditto

> +		ch->req_buf_cache =
> +			kmem_cache_create(cache_name, req_sz, 512, 0, NULL);
>   		if (!ch->req_buf_cache)
>   			goto free_rsp_ring;
>   
> @@ -2479,6 +2490,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
>   
>   free_recv_cache:
>   	kmem_cache_destroy(ch->req_buf_cache);
> +	ida_free(&cache_ida, ch->req_buf_cache_idx);
>   
>   free_rsp_ring:
>   	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
> @@ -2487,6 +2499,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
>   
>   free_rsp_cache:
>   	kmem_cache_destroy(ch->rsp_buf_cache);
> +	ida_free(&cache_ida, ch->rsp_buf_cache_idx);
>   
>   free_ch:
>   	if (rdma_cm_id)
> @@ -3056,6 +3069,7 @@ static void srpt_free_srq(struct srpt_device *sdev)
>   			     sdev->srq_size, sdev->req_buf_cache,
>   			     DMA_FROM_DEVICE);
>   	kmem_cache_destroy(sdev->req_buf_cache);
> +	ida_free(&cache_ida, sdev->req_buf_cache_idx);
>   	sdev->srq = NULL;
>   }
>   
> @@ -3070,6 +3084,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
>   	};
>   	struct ib_device *device = sdev->device;
>   	struct ib_srq *srq;
> +	char cache_name[32];

Ditto

>   	int i;
>   
>   	WARN_ON_ONCE(sdev->srq);
> @@ -3082,8 +3097,11 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
>   	pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
>   		 sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
>   
> -	sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
> -						srp_max_req_size, 0, 0, NULL);
> +	sdev->req_buf_cache_idx = ida_alloc(&cache_ida, GFP_KERNEL);
> +	snprintf(cache_name, sizeof(cache_name), "srpt-srq-req-buf-%u",
> +		 sdev->req_buf_cache_idx);

Ditto

> +	sdev->req_buf_cache =
> +		kmem_cache_create(cache_name, srp_max_req_size, 0, 0, NULL);
>   	if (!sdev->req_buf_cache)
>   		goto free_srq;
>   
> @@ -3106,6 +3124,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
>   
>   free_cache:
>   	kmem_cache_destroy(sdev->req_buf_cache);
> +	ida_free(&cache_ida, sdev->req_buf_cache_idx);
>   
>   free_srq:
>   	ib_destroy_srq(srq);
> @@ -3926,6 +3945,7 @@ static void __exit srpt_cleanup_module(void)
>   		rdma_destroy_id(rdma_cm_id);
>   	ib_unregister_client(&srpt_client);
>   	target_unregister_template(&srpt_template);
> +	ida_destroy(&cache_ida);
>   }
>   
>   module_init(srpt_init_module);
> diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
> index 4c46b301eea1..6d10cd7c9f21 100644
> --- a/drivers/infiniband/ulp/srpt/ib_srpt.h
> +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
> @@ -276,6 +276,8 @@ enum rdma_ch_state {
>    * @state:         channel state. See also enum rdma_ch_state.
>    * @using_rdma_cm: Whether the RDMA/CM or IB/CM is used for this channel.
>    * @processing_wait_list: Whether or not cmd_wait_list is being processed.
> + * @rsp_buf_cache_idx: @rsp_buf_cache index for slab.
> + * @req_buf_cache_idx: @req_buf_cache index for slab.
>    * @rsp_buf_cache: kmem_cache for @ioctx_ring.
>    * @ioctx_ring:    Send ring.
>    * @req_buf_cache: kmem_cache for @ioctx_recv_ring.
> @@ -316,6 +318,8 @@ struct srpt_rdma_ch {
>   	u16			imm_data_offset;
>   	spinlock_t		spinlock;
>   	enum rdma_ch_state	state;
> +	int			rsp_buf_cache_idx;
> +	int			req_buf_cache_idx;

Thanks.
Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev>

Zhu Yanjun
>   	struct kmem_cache	*rsp_buf_cache;
>   	struct srpt_send_ioctx	**ioctx_ring;
>   	struct kmem_cache	*req_buf_cache;
> @@ -443,6 +447,7 @@ struct srpt_port {
>    * @srq_size:      SRQ size.
>    * @sdev_mutex:	   Serializes use_srq changes.
>    * @use_srq:       Whether or not to use SRQ.
> + * @req_buf_cache_idx: @req_buf_cache index for slab.
>    * @req_buf_cache: kmem_cache for @ioctx_ring buffers.
>    * @ioctx_ring:    Per-HCA SRQ.
>    * @event_handler: Per-HCA asynchronous IB event handler.
> @@ -459,6 +464,7 @@ struct srpt_device {
>   	int			srq_size;
>   	struct mutex		sdev_mutex;
>   	bool			use_srq;
> +	int			req_buf_cache_idx;
>   	struct kmem_cache	*req_buf_cache;
>   	struct srpt_recv_ioctx	**ioctx_ring;
>   	struct ib_event_handler	event_handler;
Jens Axboe Oct. 7, 2024, 2:06 p.m. UTC | #3
Still seems way over engineered, just use an atomic_long_t for a
continually increasing index number.
Bart Van Assche Oct. 7, 2024, 4:11 p.m. UTC | #4
On 10/7/24 2:51 AM, Zhu Yanjun wrote:
>> +    char cache_name[32];
> 
> The local variable cache_name is not zeroed.

I think that there are millions of local variables in the Linux kernel
that are not zero-initialized.

Bart.
Bart Van Assche Oct. 7, 2024, 4:14 p.m. UTC | #5
On 10/7/24 7:06 AM, Jens Axboe wrote:
> Still seems way over engineered, just use an atomic_long_t for a
> continually increasing index number.

Even an atomic_long_t can wrap around and hence can result in duplicate
slab cache names. With my patch it is guaranteed that slab cache names
are unique. I'm not claiming that this patch is the best possible
solution but it's a working solution and a solution that doesn't require
too many changes to the ib_srpt driver.

Bart.
Jens Axboe Oct. 7, 2024, 4:28 p.m. UTC | #6
On 10/7/24 10:14 AM, Bart Van Assche wrote:
> On 10/7/24 7:06 AM, Jens Axboe wrote:
>> Still seems way over engineered, just use an atomic_long_t for a
>> continually increasing index number.
> 
> Even an atomic_long_t can wrap around and hence can result in duplicate
> slab cache names. With my patch it is guaranteed that slab cache names
> are unique. I'm not claiming that this patch is the best possible
> solution but it's a working solution and a solution that doesn't require
> too many changes to the ib_srpt driver.

Come on... The current patch doesn't even check if ida_alloc() got an ID.
Without that, using some mechanism to alloc+free an index is surely less
than useful.
Bart Van Assche Oct. 7, 2024, 4:52 p.m. UTC | #7
On 10/7/24 9:28 AM, Jens Axboe wrote:
> On 10/7/24 10:14 AM, Bart Van Assche wrote:
>> On 10/7/24 7:06 AM, Jens Axboe wrote:
>>> Still seems way over engineered, just use an atomic_long_t for a
>>> continually increasing index number.
>>
>> Even an atomic_long_t can wrap around and hence can result in duplicate
>> slab cache names. With my patch it is guaranteed that slab cache names
>> are unique. I'm not claiming that this patch is the best possible
>> solution but it's a working solution and a solution that doesn't require
>> too many changes to the ib_srpt driver.
> 
> Come on... The current patch doesn't even check if ida_alloc() got an ID.
> Without that, using some mechanism to alloc+free an index is surely less
> than useful.

Is it necessary in this case to check the ida_alloc() result? ida_free()
ignores negative values. So if ida_alloc() fails, the worst that can
happen is that a slab name with a negative number is passed to 
kmem_cache_create(). Additionally, if my interpretation of the ida code
is correct, it allocates memory in 128 byte chunks. So if allocation of
an ida fails, it means that less than 128 bytes of memory are left. More
than 128 bytes are required by kmem_cache_create(). Hence, if ida
allocation fails, the kmem cache creation will also fail and the slab
name with the negative number will not become visible in procfs.

Do you agree that in this case it is safe not to check whether
ida_alloc() succeeds?

Thanks,

Bart.
Jason Gunthorpe Oct. 7, 2024, 4:55 p.m. UTC | #8
On Mon, Oct 07, 2024 at 09:52:05AM -0700, Bart Van Assche wrote:

> Do you agree that in this case it is safe not to check whether
> ida_alloc() succeeds?

It seems like a way to attract static checker bug fix patches :\

Jason
Bart Van Assche Oct. 7, 2024, 5:16 p.m. UTC | #9
On 10/7/24 9:55 AM, Jason Gunthorpe wrote:
> On Mon, Oct 07, 2024 at 09:52:05AM -0700, Bart Van Assche wrote:
>> Do you agree that in this case it is safe not to check whether
>> ida_alloc() succeeds?
> 
> It seems like a way to attract static checker bug fix patches :\

Got it. I will add error handling for ida_alloc() failures.

Thanks,

Bart.
Jens Axboe Oct. 7, 2024, 5:20 p.m. UTC | #10
On 10/7/24 10:52 AM, Bart Van Assche wrote:
> On 10/7/24 9:28 AM, Jens Axboe wrote:
>> On 10/7/24 10:14 AM, Bart Van Assche wrote:
>>> On 10/7/24 7:06 AM, Jens Axboe wrote:
>>>> Still seems way over engineered, just use an atomic_long_t for a
>>>> continually increasing index number.
>>>
>>> Even an atomic_long_t can wrap around and hence can result in duplicate
>>> slab cache names. With my patch it is guaranteed that slab cache names
>>> are unique. I'm not claiming that this patch is the best possible
>>> solution but it's a working solution and a solution that doesn't require
>>> too many changes to the ib_srpt driver.
>>
>> Come on... The current patch doesn't even check if ida_alloc() got an ID.
>> Without that, using some mechanism to alloc+free an index is surely less
>> than useful.
> 
> Is it necessary in this case to check the ida_alloc() result? ida_free()
> ignores negative values. So if ida_alloc() fails, the worst that can
> happen is that a slab name with a negative number is passed to kmem_cache_create(). Additionally, if my interpretation of the ida code
> is correct, it allocates memory in 128 byte chunks. So if allocation of
> an ida fails, it means that less than 128 bytes of memory are left. More
> than 128 bytes are required by kmem_cache_create(). Hence, if ida
> allocation fails, the kmem cache creation will also fail and the slab
> name with the negative number will not become visible in procfs.
> 
> Do you agree that in this case it is safe not to check whether
> ida_alloc() succeeds?

I'm not worried about OOM, what if you run out of space? And yes the
free part deals with it fine, but you're right back to having duplicate
slab names in that case. I'm done arguing about this silly thing, I
stand by my comment that using ida for this is overengineering. And that
yes there are 3 slab caches, but having 3 per whatever instance is silly
and you should share those 3 across instances. And guess what, if that
was done, then you would not need to worry about creating silly indexes
to avoid conflicts in slab names. Not only would it be more efficient in
terms of overhead, it'd also fix this problem at the same time rather
than paper over it.
Jason Gunthorpe Oct. 7, 2024, 5:22 p.m. UTC | #11
On Mon, Oct 07, 2024 at 11:20:56AM -0600, Jens Axboe wrote:

> stand by my comment that using ida for this is overengineering. And that
> yes there are 3 slab caches, but having 3 per whatever instance is silly
> and you should share those 3 across instances. 

Store the slab cache in an xarray indexed by the variable size and
name them according to their size?

Jason
Jens Axboe Oct. 7, 2024, 5:25 p.m. UTC | #12
On 10/7/24 11:22 AM, Jason Gunthorpe wrote:
> On Mon, Oct 07, 2024 at 11:20:56AM -0600, Jens Axboe wrote:
> 
>> stand by my comment that using ida for this is overengineering. And that
>> yes there are 3 slab caches, but having 3 per whatever instance is silly
>> and you should share those 3 across instances. 
> 
> Store the slab cache in an xarray indexed by the variable size and
> name them according to their size?

That's a way better idea.
diff mbox series

Patch

diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 9632afbd727b..4cb462074f00 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -41,6 +41,7 @@ 
 #include <linux/string.h>
 #include <linux/delay.h>
 #include <linux/atomic.h>
+#include <linux/idr.h>
 #include <linux/inet.h>
 #include <rdma/ib_cache.h>
 #include <scsi/scsi_proto.h>
@@ -68,6 +69,7 @@  MODULE_LICENSE("Dual BSD/GPL");
 static u64 srpt_service_guid;
 static DEFINE_SPINLOCK(srpt_dev_lock);	/* Protects srpt_dev_list. */
 static LIST_HEAD(srpt_dev_list);	/* List of srpt_device structures. */
+static DEFINE_IDA(cache_ida);
 
 static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
 module_param(srp_max_req_size, int, 0444);
@@ -2120,12 +2122,14 @@  static void srpt_release_channel_work(struct work_struct *w)
 			     ch->rsp_buf_cache, DMA_TO_DEVICE);
 
 	kmem_cache_destroy(ch->rsp_buf_cache);
+	ida_free(&cache_ida, ch->rsp_buf_cache_idx);
 
 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
 			     sdev, ch->rq_size,
 			     ch->req_buf_cache, DMA_FROM_DEVICE);
 
 	kmem_cache_destroy(ch->req_buf_cache);
+	ida_free(&cache_ida, ch->req_buf_cache_idx);
 
 	kref_put(&ch->kref, srpt_free_ch);
 }
@@ -2164,6 +2168,7 @@  static int srpt_cm_req_recv(struct srpt_device *const sdev,
 	u32 it_iu_len;
 	int i, tag_num, tag_size, ret;
 	struct srpt_tpg *stpg;
+	char cache_name[32];
 
 	WARN_ON_ONCE(irqs_disabled());
 
@@ -2245,8 +2250,11 @@  static int srpt_cm_req_recv(struct srpt_device *const sdev,
 	INIT_LIST_HEAD(&ch->cmd_wait_list);
 	ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
 
-	ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
-					      512, 0, NULL);
+	ch->rsp_buf_cache_idx = ida_alloc(&cache_ida, GFP_KERNEL);
+	snprintf(cache_name, sizeof(cache_name), "srpt-rsp-buf-%u",
+		 ch->rsp_buf_cache_idx);
+	ch->rsp_buf_cache =
+		kmem_cache_create(cache_name, ch->max_rsp_size, 512, 0, NULL);
 	if (!ch->rsp_buf_cache)
 		goto free_ch;
 
@@ -2280,8 +2288,11 @@  static int srpt_cm_req_recv(struct srpt_device *const sdev,
 		alignment_offset = round_up(imm_data_offset, 512) -
 			imm_data_offset;
 		req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
-		ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
-						      512, 0, NULL);
+		ch->req_buf_cache_idx = ida_alloc(&cache_ida, GFP_KERNEL);
+		snprintf(cache_name, sizeof(cache_name), "srpt-req-buf-%u",
+			 ch->req_buf_cache_idx);
+		ch->req_buf_cache =
+			kmem_cache_create(cache_name, req_sz, 512, 0, NULL);
 		if (!ch->req_buf_cache)
 			goto free_rsp_ring;
 
@@ -2479,6 +2490,7 @@  static int srpt_cm_req_recv(struct srpt_device *const sdev,
 
 free_recv_cache:
 	kmem_cache_destroy(ch->req_buf_cache);
+	ida_free(&cache_ida, ch->req_buf_cache_idx);
 
 free_rsp_ring:
 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2487,6 +2499,7 @@  static int srpt_cm_req_recv(struct srpt_device *const sdev,
 
 free_rsp_cache:
 	kmem_cache_destroy(ch->rsp_buf_cache);
+	ida_free(&cache_ida, ch->rsp_buf_cache_idx);
 
 free_ch:
 	if (rdma_cm_id)
@@ -3056,6 +3069,7 @@  static void srpt_free_srq(struct srpt_device *sdev)
 			     sdev->srq_size, sdev->req_buf_cache,
 			     DMA_FROM_DEVICE);
 	kmem_cache_destroy(sdev->req_buf_cache);
+	ida_free(&cache_ida, sdev->req_buf_cache_idx);
 	sdev->srq = NULL;
 }
 
@@ -3070,6 +3084,7 @@  static int srpt_alloc_srq(struct srpt_device *sdev)
 	};
 	struct ib_device *device = sdev->device;
 	struct ib_srq *srq;
+	char cache_name[32];
 	int i;
 
 	WARN_ON_ONCE(sdev->srq);
@@ -3082,8 +3097,11 @@  static int srpt_alloc_srq(struct srpt_device *sdev)
 	pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
 		 sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
 
-	sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
-						srp_max_req_size, 0, 0, NULL);
+	sdev->req_buf_cache_idx = ida_alloc(&cache_ida, GFP_KERNEL);
+	snprintf(cache_name, sizeof(cache_name), "srpt-srq-req-buf-%u",
+		 sdev->req_buf_cache_idx);
+	sdev->req_buf_cache =
+		kmem_cache_create(cache_name, srp_max_req_size, 0, 0, NULL);
 	if (!sdev->req_buf_cache)
 		goto free_srq;
 
@@ -3106,6 +3124,7 @@  static int srpt_alloc_srq(struct srpt_device *sdev)
 
 free_cache:
 	kmem_cache_destroy(sdev->req_buf_cache);
+	ida_free(&cache_ida, sdev->req_buf_cache_idx);
 
 free_srq:
 	ib_destroy_srq(srq);
@@ -3926,6 +3945,7 @@  static void __exit srpt_cleanup_module(void)
 		rdma_destroy_id(rdma_cm_id);
 	ib_unregister_client(&srpt_client);
 	target_unregister_template(&srpt_template);
+	ida_destroy(&cache_ida);
 }
 
 module_init(srpt_init_module);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 4c46b301eea1..6d10cd7c9f21 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -276,6 +276,8 @@  enum rdma_ch_state {
  * @state:         channel state. See also enum rdma_ch_state.
  * @using_rdma_cm: Whether the RDMA/CM or IB/CM is used for this channel.
  * @processing_wait_list: Whether or not cmd_wait_list is being processed.
+ * @rsp_buf_cache_idx: @rsp_buf_cache index for slab.
+ * @req_buf_cache_idx: @req_buf_cache index for slab.
  * @rsp_buf_cache: kmem_cache for @ioctx_ring.
  * @ioctx_ring:    Send ring.
  * @req_buf_cache: kmem_cache for @ioctx_recv_ring.
@@ -316,6 +318,8 @@  struct srpt_rdma_ch {
 	u16			imm_data_offset;
 	spinlock_t		spinlock;
 	enum rdma_ch_state	state;
+	int			rsp_buf_cache_idx;
+	int			req_buf_cache_idx;
 	struct kmem_cache	*rsp_buf_cache;
 	struct srpt_send_ioctx	**ioctx_ring;
 	struct kmem_cache	*req_buf_cache;
@@ -443,6 +447,7 @@  struct srpt_port {
  * @srq_size:      SRQ size.
  * @sdev_mutex:	   Serializes use_srq changes.
  * @use_srq:       Whether or not to use SRQ.
+ * @req_buf_cache_idx: @req_buf_cache index for slab.
  * @req_buf_cache: kmem_cache for @ioctx_ring buffers.
  * @ioctx_ring:    Per-HCA SRQ.
  * @event_handler: Per-HCA asynchronous IB event handler.
@@ -459,6 +464,7 @@  struct srpt_device {
 	int			srq_size;
 	struct mutex		sdev_mutex;
 	bool			use_srq;
+	int			req_buf_cache_idx;
 	struct kmem_cache	*req_buf_cache;
 	struct srpt_recv_ioctx	**ioctx_ring;
 	struct ib_event_handler	event_handler;