Message ID | 1572255945-20297-2-git-send-email-liuyixian@huawei.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | Fix crash due to sleepy mutex while holding lock in post_{send|recv|poll} | expand |
On Mon, Oct 28, 2019 at 05:45:44PM +0800, Yixian Liu wrote: > @@ -1998,6 +2000,17 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) > } > } > > + snprintf(workq_name, HNS_ROCE_WORKQ_NAME_LEN - 1, > + "hns_roce_%d_flush_wq", device_id); > + device_id++; > + > + hr_dev->flush_workq = alloc_workqueue(workq_name, WQ_HIGHPRI, 0); > + if (!hr_dev->flush_workq) { Why is this so time critical? > diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c > index bec48f2..2c8f726 100644 > +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c > @@ -43,6 +43,49 @@ > > #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) > > +static void flush_work_handle(struct work_struct *work) > +{ > + struct hns_roce_flush_work *flush_work = container_of(work, > + struct hns_roce_flush_work, work); > + struct hns_roce_qp *hr_qp = flush_work->hr_qp; > + struct device *dev = flush_work->hr_dev->dev; > + struct ib_qp_attr attr; > + int attr_mask; > + int ret; > + > + attr_mask = IB_QP_STATE; > + attr.qp_state = IB_QPS_ERR; > + > + ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); > + if (ret) > + dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n", > + ret); There is something wrong with your description as all this seems to do is tell the HW to go to the ERR state. Why don't you do this from hns_roce_irq_work_handle() ? > + kfree(flush_work); > + > + /* > + * make sure we signal QP destroy leg that flush QP was completed > + * so that it can safely proceed ahead now and destroy QP > + */ > + if (atomic_dec_and_test(&hr_qp->refcount)) > + complete(&hr_qp->free); > +} > + > +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) > +{ > + struct hns_roce_flush_work *flush_work; > + > + flush_work = kzalloc(sizeof(struct hns_roce_flush_work), GFP_ATOMIC); > + if (!flush_work) > + return; Don't do things that can fail here > + > + flush_work->hr_dev = hr_dev; > + flush_work->hr_qp = hr_qp; > + INIT_WORK(&flush_work->work, flush_work_handle); > + atomic_inc(&hr_qp->refcount); > + queue_work(hr_dev->flush_workq, &flush_work->work); > +} > + > void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) > { > struct device *dev = hr_dev->dev;
On 2019/11/7 4:40, Jason Gunthorpe wrote: > On Mon, Oct 28, 2019 at 05:45:44PM +0800, Yixian Liu wrote: >> @@ -1998,6 +2000,17 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) >> } >> } >> >> + snprintf(workq_name, HNS_ROCE_WORKQ_NAME_LEN - 1, >> + "hns_roce_%d_flush_wq", device_id); >> + device_id++; >> + >> + hr_dev->flush_workq = alloc_workqueue(workq_name, WQ_HIGHPRI, 0); >> + if (!hr_dev->flush_workq) { > > Why is this so time critical? Hi Jason, I am not quite sure whether you concerned with the flag "WQ_HIGHPRI" or why WQ is created in hns_roce_v2_init. If it is WQ_HIGHPRI, yes, it is much better to implement flush operation ASAP to help generating flushed cqe as ULP may poll cqe urgently. If you concerned allocation stage, as flush operation is support for hip08 only, there is no other place proper than here I think. > >> diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c >> index bec48f2..2c8f726 100644 >> +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c >> @@ -43,6 +43,49 @@ >> >> #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) >> >> +static void flush_work_handle(struct work_struct *work) >> +{ >> + struct hns_roce_flush_work *flush_work = container_of(work, >> + struct hns_roce_flush_work, work); >> + struct hns_roce_qp *hr_qp = flush_work->hr_qp; >> + struct device *dev = flush_work->hr_dev->dev; >> + struct ib_qp_attr attr; >> + int attr_mask; >> + int ret; >> + >> + attr_mask = IB_QP_STATE; >> + attr.qp_state = IB_QPS_ERR; >> + >> + ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); >> + if (ret) >> + dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n", >> + ret); > > There is something wrong with your description as all this seems to do > is tell the HW to go to the ERR state. For the flush operation, in addition to modify qp to ERR state, the head pointers of SQ and RQ are also told to the HW with this interface as following. This part of codes is already there. if (new_state == IB_QPS_ERR) { roce_set_field(context->byte_160_sq_ci_pi, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, hr_qp->sq.head); roce_set_field(qpc_mask->byte_160_sq_ci_pi, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0); if (!ibqp->srq) { roce_set_field(context->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head); roce_set_field(qpc_mask->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); } } > > Why don't you do this from hns_roce_irq_work_handle() ? As described in the cover letter, here we used CMWQ (concurrency management workqueue) to make sure that flush operation can be implemented ASAP. Current irq workqueue is a singlethread workqueue, which may delay the flush too long when the system is heavy. Do you mean we can change irq workqueue to CMWQ to put flush work into it? > >> + kfree(flush_work); >> + >> + /* >> + * make sure we signal QP destroy leg that flush QP was completed >> + * so that it can safely proceed ahead now and destroy QP >> + */ >> + if (atomic_dec_and_test(&hr_qp->refcount)) >> + complete(&hr_qp->free); > >> +} >> + >> +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) >> +{ >> + struct hns_roce_flush_work *flush_work; >> + >> + flush_work = kzalloc(sizeof(struct hns_roce_flush_work), GFP_ATOMIC); >> + if (!flush_work) >> + return; > > Don't do things that can fail here Do you mean that as "GFP_ATOMIC" is used, the if branch can be deleted? > >> + >> + flush_work->hr_dev = hr_dev; >> + flush_work->hr_qp = hr_qp; >> + INIT_WORK(&flush_work->work, flush_work_handle); >> + atomic_inc(&hr_qp->refcount); >> + queue_work(hr_dev->flush_workq, &flush_work->work); >> +} >> + >> void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) >> { >> struct device *dev = hr_dev->dev; > > . >
On Thu, Nov 07, 2019 at 08:48:25PM +0800, Liuyixian (Eason) wrote: > > > On 2019/11/7 4:40, Jason Gunthorpe wrote: > > On Mon, Oct 28, 2019 at 05:45:44PM +0800, Yixian Liu wrote: > >> @@ -1998,6 +2000,17 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) > >> } > >> } > >> > >> + snprintf(workq_name, HNS_ROCE_WORKQ_NAME_LEN - 1, > >> + "hns_roce_%d_flush_wq", device_id); > >> + device_id++; > >> + > >> + hr_dev->flush_workq = alloc_workqueue(workq_name, WQ_HIGHPRI, 0); > >> + if (!hr_dev->flush_workq) { > > > > Why is this so time critical? > > Hi Jason, > > I am not quite sure whether you concerned with the flag "WQ_HIGHPRI" or > why WQ is created in hns_roce_v2_init. Yes, why do you need a dedicated HIGHPRI work queue. > If it is WQ_HIGHPRI, yes, it is much better to implement flush operation > ASAP to help generating flushed cqe as ULP may poll cqe urgently. If you > concerned allocation stage, as flush operation is support for hip08 only, > there is no other place proper than here I think. Why? This is only something that happens in error cases. > > Why don't you do this from hns_roce_irq_work_handle() ? > > As described in the cover letter, here we used CMWQ (concurrency management workqueue) > to make sure that flush operation can be implemented ASAP. Current irq workqueue is > a singlethread workqueue, which may delay the flush too long when the system is heavy. > > Do you mean we can change irq workqueue to CMWQ to put flush work into it? As far as I could tell the only thing the triggered the work to run was some variable which was only set in another work queue 'hns_roce_irq_work_handle()' > >> +} > >> + > >> +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) > >> +{ > >> + struct hns_roce_flush_work *flush_work; > >> + > >> + flush_work = kzalloc(sizeof(struct hns_roce_flush_work), GFP_ATOMIC); > >> + if (!flush_work) > >> + return; > > > > Don't do things that can fail here > > Do you mean that as "GFP_ATOMIC" is used, the if branch can be deleted? No, don't do allocations at all if you can't allow them to fail. Jason
On 2019/11/8 2:28, Jason Gunthorpe wrote: > On Thu, Nov 07, 2019 at 08:48:25PM +0800, Liuyixian (Eason) wrote: >> >> >> On 2019/11/7 4:40, Jason Gunthorpe wrote: >>> On Mon, Oct 28, 2019 at 05:45:44PM +0800, Yixian Liu wrote: >>>> @@ -1998,6 +2000,17 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) >>>> } >>>> } >>>> >>>> + snprintf(workq_name, HNS_ROCE_WORKQ_NAME_LEN - 1, >>>> + "hns_roce_%d_flush_wq", device_id); >>>> + device_id++; >>>> + >>>> + hr_dev->flush_workq = alloc_workqueue(workq_name, WQ_HIGHPRI, 0); >>>> + if (!hr_dev->flush_workq) { >>> >>> Why is this so time critical? >> >> Hi Jason, >> >> I am not quite sure whether you concerned with the flag "WQ_HIGHPRI" or >> why WQ is created in hns_roce_v2_init. > > Yes, why do you need a dedicated HIGHPRI work queue. As hip08 hardware needs driver to help to do flush operation, I am not sure the application can perceive that qp state is error without receiving flush cqe if work in WQ is not scheduled in time. > >> If it is WQ_HIGHPRI, yes, it is much better to implement flush operation >> ASAP to help generating flushed cqe as ULP may poll cqe urgently. If you >> concerned allocation stage, as flush operation is support for hip08 only, >> there is no other place proper than here I think. > > Why? This is only something that happens in error cases. Yes, maybe we can move out WQ_HIGHPRI flag safely, will fix it in v2. > >>> Why don't you do this from hns_roce_irq_work_handle() ? >> >> As described in the cover letter, here we used CMWQ (concurrency management workqueue) >> to make sure that flush operation can be implemented ASAP. Current irq workqueue is >> a singlethread workqueue, which may delay the flush too long when the system is heavy. >> >> Do you mean we can change irq workqueue to CMWQ to put flush work into it? > > As far as I could tell the only thing the triggered the work to run > was some variable which was only set in another work queue 'hns_roce_irq_work_handle()' OK, thanks. I will consider you suggestion and reuse the irq workqueue. > >>>> +} >>>> + >>>> +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) >>>> +{ >>>> + struct hns_roce_flush_work *flush_work; >>>> + >>>> + flush_work = kzalloc(sizeof(struct hns_roce_flush_work), GFP_ATOMIC); >>>> + if (!flush_work) >>>> + return; >>> >>> Don't do things that can fail here >> >> Do you mean that as "GFP_ATOMIC" is used, the if branch can be deleted? > > No, don't do allocations at all if you can't allow them to fail. OK, thanks! I will initialize this structure at compile time. > > Jason > > . >
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index cbd75e4..0d979e8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -45,6 +45,8 @@ #define HNS_ROCE_MAX_MSG_LEN 0x80000000 +#define HNS_ROCE_WORKQ_NAME_LEN 32 + #define HNS_ROCE_ALIGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b)) #define HNS_ROCE_IB_MIN_SQ_STRIDE 6 @@ -921,6 +923,12 @@ struct hns_roce_work { int sub_type; }; +struct hns_roce_flush_work { + struct hns_roce_dev *hr_dev; + struct work_struct work; + struct hns_roce_qp *hr_qp; +}; + struct hns_roce_dfx_hw { int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn, int *buffer); @@ -1043,6 +1051,7 @@ struct hns_roce_dev { const struct hns_roce_hw *hw; void *priv; struct workqueue_struct *irq_workq; + struct workqueue_struct *flush_workq; const struct hns_roce_dfx_hw *dfx; }; @@ -1240,6 +1249,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, struct ib_udata *udata); int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 14e24b4..396c896 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1960,6 +1960,8 @@ static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev, static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; + char workq_name[HNS_ROCE_WORKQ_NAME_LEN]; + static int device_id; int qpc_count, cqc_count; int ret, i; @@ -1998,6 +2000,17 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) } } + snprintf(workq_name, HNS_ROCE_WORKQ_NAME_LEN - 1, + "hns_roce_%d_flush_wq", device_id); + device_id++; + + hr_dev->flush_workq = alloc_workqueue(workq_name, WQ_HIGHPRI, 0); + if (!hr_dev->flush_workq) { + dev_err(hr_dev->dev, "Failed to create flush workqueue!\n"); + ret = -ENOMEM; + goto err_cqc_timer_failed; + } + return 0; err_cqc_timer_failed: @@ -2020,6 +2033,9 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; + flush_workqueue(hr_dev->flush_workq); + destroy_workqueue(hr_dev->flush_workq); + if (hr_dev->pci_dev->revision == 0x21) hns_roce_function_clear(hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index bec48f2..2c8f726 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -43,6 +43,49 @@ #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) +static void flush_work_handle(struct work_struct *work) +{ + struct hns_roce_flush_work *flush_work = container_of(work, + struct hns_roce_flush_work, work); + struct hns_roce_qp *hr_qp = flush_work->hr_qp; + struct device *dev = flush_work->hr_dev->dev; + struct ib_qp_attr attr; + int attr_mask; + int ret; + + attr_mask = IB_QP_STATE; + attr.qp_state = IB_QPS_ERR; + + ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); + if (ret) + dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n", + ret); + + kfree(flush_work); + + /* + * make sure we signal QP destroy leg that flush QP was completed + * so that it can safely proceed ahead now and destroy QP + */ + if (atomic_dec_and_test(&hr_qp->refcount)) + complete(&hr_qp->free); +} + +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) +{ + struct hns_roce_flush_work *flush_work; + + flush_work = kzalloc(sizeof(struct hns_roce_flush_work), GFP_ATOMIC); + if (!flush_work) + return; + + flush_work->hr_dev = hr_dev; + flush_work->hr_qp = hr_qp; + INIT_WORK(&flush_work->work, flush_work_handle); + atomic_inc(&hr_qp->refcount); + queue_work(hr_dev->flush_workq, &flush_work->work); +} + void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) { struct device *dev = hr_dev->dev;