diff mbox series

[v2,for-next,1/2] RDMA/hns: Add the workqueue framework for flush cqe handler

Message ID 1573563124-12579-2-git-send-email-liuyixian@huawei.com (mailing list archive)
State Superseded
Headers show
Series Fix crash due to sleepy mutex while holding lock in post_{send|recv|poll} | expand

Commit Message

Yixian Liu Nov. 12, 2019, 12:52 p.m. UTC
HiP08 RoCE hardware lacks ability(a known hardware problem) to flush
outstanding WQEs if QP state gets into errored mode for some reason.
To overcome this hardware problem and as a workaround, when QP is
detected to be in errored state during various legs like post send,
post receive etc [1], flush needs to be performed from the driver.

The earlier patch[1] sent to solve the hardware limitation explained
in the cover-letter had a bug in the software flushing leg. It
acquired mutex while modifying QP state to errored state and while
conveying it to the hardware using the mailbox. This caused leg to
sleep while holding spin-lock and caused crash.

Suggested Solution:
we have proposed to defer the flushing of the QP in the Errored state
using the workqueue to get around with the limitation of our hardware.

This patch adds the framework of the workqueue and the flush handler
function.

[1] https://patchwork.kernel.org/patch/10534271/

Signed-off-by: Yixian Liu <liuyixian@huawei.com>
Reviewed-by: Salil Mehta <salil.mehta@huawei.com>
---
 drivers/infiniband/hw/hns/hns_roce_device.h |  3 +++
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c  |  4 ++--
 drivers/infiniband/hw/hns/hns_roce_qp.c     | 33 +++++++++++++++++++++++++++++
 3 files changed, 38 insertions(+), 2 deletions(-)

Comments

Jason Gunthorpe Nov. 15, 2019, 9:06 p.m. UTC | #1
On Tue, Nov 12, 2019 at 08:52:03PM +0800, Yixian Liu wrote:
> HiP08 RoCE hardware lacks ability(a known hardware problem) to flush
> outstanding WQEs if QP state gets into errored mode for some reason.
> To overcome this hardware problem and as a workaround, when QP is
> detected to be in errored state during various legs like post send,
> post receive etc [1], flush needs to be performed from the driver.
> 
> The earlier patch[1] sent to solve the hardware limitation explained
> in the cover-letter had a bug in the software flushing leg. It
> acquired mutex while modifying QP state to errored state and while
> conveying it to the hardware using the mailbox. This caused leg to
> sleep while holding spin-lock and caused crash.
> 
> Suggested Solution:
> we have proposed to defer the flushing of the QP in the Errored state
> using the workqueue to get around with the limitation of our hardware.
> 
> This patch adds the framework of the workqueue and the flush handler
> function.
> 
> [1] https://patchwork.kernel.org/patch/10534271/
> 
> Signed-off-by: Yixian Liu <liuyixian@huawei.com>
> Reviewed-by: Salil Mehta <salil.mehta@huawei.com>
>  drivers/infiniband/hw/hns/hns_roce_device.h |  3 +++
>  drivers/infiniband/hw/hns/hns_roce_hw_v2.c  |  4 ++--
>  drivers/infiniband/hw/hns/hns_roce_qp.c     | 33 +++++++++++++++++++++++++++++
>  3 files changed, 38 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
> index a1b712e..42d8a5a 100644
> +++ b/drivers/infiniband/hw/hns/hns_roce_device.h
> @@ -906,6 +906,7 @@ struct hns_roce_caps {
>  struct hns_roce_work {
>  	struct hns_roce_dev *hr_dev;
>  	struct work_struct work;
> +	struct hns_roce_qp *hr_qp;
>  	u32 qpn;
>  	u32 cqn;
>  	int event_type;
> @@ -1034,6 +1035,7 @@ struct hns_roce_dev {
>  	const struct hns_roce_hw *hw;
>  	void			*priv;
>  	struct workqueue_struct *irq_workq;
> +	struct hns_roce_work flush_work;
>  	const struct hns_roce_dfx_hw *dfx;
>  };
>  
> @@ -1226,6 +1228,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
>  				 struct ib_udata *udata);
>  int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
>  		       int attr_mask, struct ib_udata *udata);
> +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
>  void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
>  void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
>  void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> index 907c951..ec48e7e 100644
> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> @@ -5967,8 +5967,8 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
>  		goto err_request_irq_fail;
>  	}
>  
> -	hr_dev->irq_workq =
> -		create_singlethread_workqueue("hns_roce_irq_workqueue");
> +	hr_dev->irq_workq = alloc_workqueue("hns_roce_irq_workqueue",
> +					    WQ_MEM_RECLAIM, 0);
>  	if (!hr_dev->irq_workq) {
>  		dev_err(dev, "Create irq workqueue failed!\n");
>  		ret = -ENOMEM;
> diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
> index 9442f01..0111f2e 100644
> +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
> @@ -43,6 +43,39 @@
>  
>  #define SQP_NUM				(2 * HNS_ROCE_MAX_PORTS)
>  
> +static void flush_work_handle(struct work_struct *work)
> +{
> +	struct hns_roce_work *flush_work = container_of(work,
> +					struct hns_roce_work, work);
> +	struct hns_roce_qp *hr_qp = flush_work->hr_qp;
> +	struct device *dev = flush_work->hr_dev->dev;
> +	struct ib_qp_attr attr;
> +	int attr_mask;
> +	int ret;
> +
> +	attr_mask = IB_QP_STATE;
> +	attr.qp_state = IB_QPS_ERR;
> +
> +	ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
> +	if (ret)
> +		dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
> +			ret);
> +
> +	if (atomic_dec_and_test(&hr_qp->refcount))
> +		complete(&hr_qp->free);
> +}
> +
> +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
> +{
> +	struct hns_roce_work *flush_work = &hr_dev->flush_work;
> +
> +	flush_work->hr_dev = hr_dev;
> +	flush_work->hr_qp = hr_qp;
> +	INIT_WORK(&flush_work->work, flush_work_handle);
> +	atomic_inc(&hr_qp->refcount);
> +	queue_work(hr_dev->irq_workq, &flush_work->work);

It kind of looks like this can be called multiple times? It won't work
right unless it is called exactly once

Jason
Yixian Liu Nov. 18, 2019, 1:50 p.m. UTC | #2
On 2019/11/16 5:06, Jason Gunthorpe wrote:
> On Tue, Nov 12, 2019 at 08:52:03PM +0800, Yixian Liu wrote:
>> HiP08 RoCE hardware lacks ability(a known hardware problem) to flush
>> outstanding WQEs if QP state gets into errored mode for some reason.
>> To overcome this hardware problem and as a workaround, when QP is
>> detected to be in errored state during various legs like post send,
>> post receive etc [1], flush needs to be performed from the driver.
>>
>> The earlier patch[1] sent to solve the hardware limitation explained
>> in the cover-letter had a bug in the software flushing leg. It
>> acquired mutex while modifying QP state to errored state and while
>> conveying it to the hardware using the mailbox. This caused leg to
>> sleep while holding spin-lock and caused crash.
>>
>> Suggested Solution:
>> we have proposed to defer the flushing of the QP in the Errored state
>> using the workqueue to get around with the limitation of our hardware.
>>
>> This patch adds the framework of the workqueue and the flush handler
>> function.
>>
>> [1] https://patchwork.kernel.org/patch/10534271/
>>
>> Signed-off-by: Yixian Liu <liuyixian@huawei.com>
>> Reviewed-by: Salil Mehta <salil.mehta@huawei.com>
>>  drivers/infiniband/hw/hns/hns_roce_device.h |  3 +++
>>  drivers/infiniband/hw/hns/hns_roce_hw_v2.c  |  4 ++--
>>  drivers/infiniband/hw/hns/hns_roce_qp.c     | 33 +++++++++++++++++++++++++++++
>>  3 files changed, 38 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
>> index a1b712e..42d8a5a 100644
>> +++ b/drivers/infiniband/hw/hns/hns_roce_device.h
>> @@ -906,6 +906,7 @@ struct hns_roce_caps {
>>  struct hns_roce_work {
>>  	struct hns_roce_dev *hr_dev;
>>  	struct work_struct work;
>> +	struct hns_roce_qp *hr_qp;
>>  	u32 qpn;
>>  	u32 cqn;
>>  	int event_type;
>> @@ -1034,6 +1035,7 @@ struct hns_roce_dev {
>>  	const struct hns_roce_hw *hw;
>>  	void			*priv;
>>  	struct workqueue_struct *irq_workq;
>> +	struct hns_roce_work flush_work;
>>  	const struct hns_roce_dfx_hw *dfx;
>>  };
>>  
>> @@ -1226,6 +1228,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
>>  				 struct ib_udata *udata);
>>  int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
>>  		       int attr_mask, struct ib_udata *udata);
>> +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
>>  void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
>>  void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
>>  void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> index 907c951..ec48e7e 100644
>> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> @@ -5967,8 +5967,8 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
>>  		goto err_request_irq_fail;
>>  	}
>>  
>> -	hr_dev->irq_workq =
>> -		create_singlethread_workqueue("hns_roce_irq_workqueue");
>> +	hr_dev->irq_workq = alloc_workqueue("hns_roce_irq_workqueue",
>> +					    WQ_MEM_RECLAIM, 0);
>>  	if (!hr_dev->irq_workq) {
>>  		dev_err(dev, "Create irq workqueue failed!\n");
>>  		ret = -ENOMEM;
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
>> index 9442f01..0111f2e 100644
>> +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
>> @@ -43,6 +43,39 @@
>>  
>>  #define SQP_NUM				(2 * HNS_ROCE_MAX_PORTS)
>>  
>> +static void flush_work_handle(struct work_struct *work)
>> +{
>> +	struct hns_roce_work *flush_work = container_of(work,
>> +					struct hns_roce_work, work);
>> +	struct hns_roce_qp *hr_qp = flush_work->hr_qp;
>> +	struct device *dev = flush_work->hr_dev->dev;
>> +	struct ib_qp_attr attr;
>> +	int attr_mask;
>> +	int ret;
>> +
>> +	attr_mask = IB_QP_STATE;
>> +	attr.qp_state = IB_QPS_ERR;
>> +
>> +	ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
>> +	if (ret)
>> +		dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
>> +			ret);
>> +
>> +	if (atomic_dec_and_test(&hr_qp->refcount))
>> +		complete(&hr_qp->free);
>> +}
>> +
>> +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
>> +{
>> +	struct hns_roce_work *flush_work = &hr_dev->flush_work;
>> +
>> +	flush_work->hr_dev = hr_dev;
>> +	flush_work->hr_qp = hr_qp;
>> +	INIT_WORK(&flush_work->work, flush_work_handle);
>> +	atomic_inc(&hr_qp->refcount);
>> +	queue_work(hr_dev->irq_workq, &flush_work->work);
> 
> It kind of looks like this can be called multiple times? It won't work
> right unless it is called exactly once
> 
> Jason

Yes, you are right.

So I think the reasonable solution is to allocate it dynamically, and I think
it is a very very little chance that the allocation will be failed. If this happened,
I think the application also needs to be over.

So I will fall back to v1 for this part in next version.

	flush_work = kzalloc(sizeof(struct hns_roce_flush_work), GFP_ATOMIC)
	if (!flush_work)
		return;

Or, could you give me some advice for it?

Thanks.

> 
> .
>
Jason Gunthorpe Nov. 18, 2019, 5:02 p.m. UTC | #3
On Mon, Nov 18, 2019 at 09:50:24PM +0800, Liuyixian (Eason) wrote:
> > It kind of looks like this can be called multiple times? It won't work
> > right unless it is called exactly once
> > 
> > Jason
> 
> Yes, you are right.
> 
> So I think the reasonable solution is to allocate it dynamically, and I think
> it is a very very little chance that the allocation will be failed. If this happened,
> I think the application also needs to be over.

Why do you need more than one work in parallel for this? Once you
start to move the HW to error that only has to happen once, surely?

Jason
Yixian Liu Nov. 19, 2019, 8 a.m. UTC | #4
On 2019/11/19 1:02, Jason Gunthorpe wrote:
> On Mon, Nov 18, 2019 at 09:50:24PM +0800, Liuyixian (Eason) wrote:
>>> It kind of looks like this can be called multiple times? It won't work
>>> right unless it is called exactly once
>>>
>>> Jason
>>
>> Yes, you are right.
>>
>> So I think the reasonable solution is to allocate it dynamically, and I think
>> it is a very very little chance that the allocation will be failed. If this happened,
>> I think the application also needs to be over.
> 
> Why do you need more than one work in parallel for this? Once you
> start to move the HW to error that only has to happen once, surely?
> 
> Jason
> 
The flush operation moves QP, not the HW to error.

For the QP, maybe the process A is posting send while the other
process B is modifying qp to error, both of these two operation
needs to initialize one flush work. That's why it could be called
multiple times.

Furthermore, according to IB protocol 9.9.2.4.2, it may can't stop
posting wr into the qp even it already transitions to error state.
That's why it also needs to be called multiple times.

Once the work is implemented successfully, we will free the work,
it is not a big problem to allocate it dynamically again and again.

Thanks.
Zengtao (B) Nov. 19, 2019, 9:43 a.m. UTC | #5
> -----Original Message-----
> From: linux-rdma-owner@vger.kernel.org
> [mailto:linux-rdma-owner@vger.kernel.org] On Behalf Of Liuyixian (Eason)
> Sent: Tuesday, November 19, 2019 4:00 PM
> To: Jason Gunthorpe
> Cc: dledford@redhat.com; leon@kernel.org; linux-rdma@vger.kernel.org;
> Linuxarm
> Subject: Re: [PATCH v2 for-next 1/2] RDMA/hns: Add the workqueue
> framework for flush cqe handler
> 
> 
> 
> On 2019/11/19 1:02, Jason Gunthorpe wrote:
> > On Mon, Nov 18, 2019 at 09:50:24PM +0800, Liuyixian (Eason) wrote:
> >>> It kind of looks like this can be called multiple times? It won't work
> >>> right unless it is called exactly once
> >>>
> >>> Jason
> >>
> >> Yes, you are right.
> >>
> >> So I think the reasonable solution is to allocate it dynamically, and I think
> >> it is a very very little chance that the allocation will be failed. If this
> happened,
> >> I think the application also needs to be over.
> >
> > Why do you need more than one work in parallel for this? Once you
> > start to move the HW to error that only has to happen once, surely?
> >
> > Jason
> >
> The flush operation moves QP, not the HW to error.
> 
> For the QP, maybe the process A is posting send while the other
> process B is modifying qp to error, both of these two operation
> needs to initialize one flush work. That's why it could be called
> multiple times.
> 
> Furthermore, according to IB protocol 9.9.2.4.2, it may can't stop
> posting wr into the qp even it already transitions to error state.
> That's why it also needs to be called multiple times.
> 
> Once the work is implemented successfully, we will free the work,
> it is not a big problem to allocate it dynamically again and again.
> 

So can i understand that this function is designed to be reentrant?
If so, I suggest to introduce a per dev/qp lock to protect.

> Thanks.
> 
>
Yixian Liu Nov. 19, 2019, 1:09 p.m. UTC | #6
On 2019/11/19 17:43, Zengtao (B) wrote:
>> -----Original Message-----
>> From: linux-rdma-owner@vger.kernel.org
>> [mailto:linux-rdma-owner@vger.kernel.org] On Behalf Of Liuyixian (Eason)
>> Sent: Tuesday, November 19, 2019 4:00 PM
>> To: Jason Gunthorpe
>> Cc: dledford@redhat.com; leon@kernel.org; linux-rdma@vger.kernel.org;
>> Linuxarm
>> Subject: Re: [PATCH v2 for-next 1/2] RDMA/hns: Add the workqueue
>> framework for flush cqe handler
>>
>>
>>
>> On 2019/11/19 1:02, Jason Gunthorpe wrote:
>>> On Mon, Nov 18, 2019 at 09:50:24PM +0800, Liuyixian (Eason) wrote:
>>>>> It kind of looks like this can be called multiple times? It won't work
>>>>> right unless it is called exactly once
>>>>>
>>>>> Jason
>>>>
>>>> Yes, you are right.
>>>>
>>>> So I think the reasonable solution is to allocate it dynamically, and I think
>>>> it is a very very little chance that the allocation will be failed. If this
>> happened,
>>>> I think the application also needs to be over.
>>>
>>> Why do you need more than one work in parallel for this? Once you
>>> start to move the HW to error that only has to happen once, surely?
>>>
>>> Jason
>>>
>> The flush operation moves QP, not the HW to error.
>>
>> For the QP, maybe the process A is posting send while the other
>> process B is modifying qp to error, both of these two operation
>> needs to initialize one flush work. That's why it could be called
>> multiple times.
>>
>> Furthermore, according to IB protocol 9.9.2.4.2, it may can't stop
>> posting wr into the qp even it already transitions to error state.
>> That's why it also needs to be called multiple times.
>>
>> Once the work is implemented successfully, we will free the work,
>> it is not a big problem to allocate it dynamically again and again.
>>
> 
> So can i understand that this function is designed to be reentrant?
> If so, I suggest to introduce a per dev/qp lock to protect.

Yes, currently we exactly use the following spinlock per qp to protect the verbs
which can be reentrant.

	spin_lock_irqsave(&qp->sq.lock, flags);

> 
>> Thanks.
>>
>>
>
Jason Gunthorpe Nov. 19, 2019, 6:46 p.m. UTC | #7
On Tue, Nov 19, 2019 at 04:00:00PM +0800, Liuyixian (Eason) wrote:
> 
> 
> On 2019/11/19 1:02, Jason Gunthorpe wrote:
> > On Mon, Nov 18, 2019 at 09:50:24PM +0800, Liuyixian (Eason) wrote:
> >>> It kind of looks like this can be called multiple times? It won't work
> >>> right unless it is called exactly once
> >>>
> >>> Jason
> >>
> >> Yes, you are right.
> >>
> >> So I think the reasonable solution is to allocate it dynamically, and I think
> >> it is a very very little chance that the allocation will be failed. If this happened,
> >> I think the application also needs to be over.
> > 
> > Why do you need more than one work in parallel for this? Once you
> > start to move the HW to error that only has to happen once, surely?
> > 
> > Jason
>
> The flush operation moves QP, not the HW to error.
> 
> For the QP, maybe the process A is posting send while the other
> process B is modifying qp to error, both of these two operation
> needs to initialize one flush work. That's why it could be called
> multiple times.

The work function does something that looks like it only has to happen
once per QP.

One do you need to keep re-queing this thing every time the user posts
a WR?

Jason
Yixian Liu Nov. 20, 2019, 11 a.m. UTC | #8
On 2019/11/20 2:46, Jason Gunthorpe wrote:
> On Tue, Nov 19, 2019 at 04:00:00PM +0800, Liuyixian (Eason) wrote:
>>
>>
>> On 2019/11/19 1:02, Jason Gunthorpe wrote:
>>> On Mon, Nov 18, 2019 at 09:50:24PM +0800, Liuyixian (Eason) wrote:
>>>>> It kind of looks like this can be called multiple times? It won't work
>>>>> right unless it is called exactly once
>>>>>
>>>>> Jason
>>>>
>>>> Yes, you are right.
>>>>
>>>> So I think the reasonable solution is to allocate it dynamically, and I think
>>>> it is a very very little chance that the allocation will be failed. If this happened,
>>>> I think the application also needs to be over.
>>>
>>> Why do you need more than one work in parallel for this? Once you
>>> start to move the HW to error that only has to happen once, surely?
>>>
>>> Jason
>>
>> The flush operation moves QP, not the HW to error.
>>
>> For the QP, maybe the process A is posting send while the other
>> process B is modifying qp to error, both of these two operation
>> needs to initialize one flush work. That's why it could be called
>> multiple times.
> 
> The work function does something that looks like it only has to happen
> once per QP.
No, the work should be re-queued every time the producer index of qp
needs to be updated.

> 
> One do you need to keep re-queing this thing every time the user posts
> a WR?

Once a wr is posted, the producer index (pi) of qp is changed, thus,
the updated pi needs to be delivered into the HW in the flush operation,
to help the HW generated corresponding cqe. That's why modify qp is called
inside flush work, not only modify qp to error, but also transfer the pi into the HW.

In one word, the flush operation includes two parts:
1. change the state of the qp to error
2. deliver the latest pi of the qp to HW

Thanks.

> 
> Jason
> 
>
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index a1b712e..42d8a5a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -906,6 +906,7 @@  struct hns_roce_caps {
 struct hns_roce_work {
 	struct hns_roce_dev *hr_dev;
 	struct work_struct work;
+	struct hns_roce_qp *hr_qp;
 	u32 qpn;
 	u32 cqn;
 	int event_type;
@@ -1034,6 +1035,7 @@  struct hns_roce_dev {
 	const struct hns_roce_hw *hw;
 	void			*priv;
 	struct workqueue_struct *irq_workq;
+	struct hns_roce_work flush_work;
 	const struct hns_roce_dfx_hw *dfx;
 };
 
@@ -1226,6 +1228,7 @@  struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
 				 struct ib_udata *udata);
 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 		       int attr_mask, struct ib_udata *udata);
+void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 907c951..ec48e7e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -5967,8 +5967,8 @@  static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
 		goto err_request_irq_fail;
 	}
 
-	hr_dev->irq_workq =
-		create_singlethread_workqueue("hns_roce_irq_workqueue");
+	hr_dev->irq_workq = alloc_workqueue("hns_roce_irq_workqueue",
+					    WQ_MEM_RECLAIM, 0);
 	if (!hr_dev->irq_workq) {
 		dev_err(dev, "Create irq workqueue failed!\n");
 		ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 9442f01..0111f2e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -43,6 +43,39 @@ 
 
 #define SQP_NUM				(2 * HNS_ROCE_MAX_PORTS)
 
+static void flush_work_handle(struct work_struct *work)
+{
+	struct hns_roce_work *flush_work = container_of(work,
+					struct hns_roce_work, work);
+	struct hns_roce_qp *hr_qp = flush_work->hr_qp;
+	struct device *dev = flush_work->hr_dev->dev;
+	struct ib_qp_attr attr;
+	int attr_mask;
+	int ret;
+
+	attr_mask = IB_QP_STATE;
+	attr.qp_state = IB_QPS_ERR;
+
+	ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
+	if (ret)
+		dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
+			ret);
+
+	if (atomic_dec_and_test(&hr_qp->refcount))
+		complete(&hr_qp->free);
+}
+
+void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+	struct hns_roce_work *flush_work = &hr_dev->flush_work;
+
+	flush_work->hr_dev = hr_dev;
+	flush_work->hr_qp = hr_qp;
+	INIT_WORK(&flush_work->work, flush_work_handle);
+	atomic_inc(&hr_qp->refcount);
+	queue_work(hr_dev->irq_workq, &flush_work->work);
+}
+
 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
 {
 	struct device *dev = hr_dev->dev;