Message ID | 20240326174325.300849-9-rpearsonhpe@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | RDMA/rxe: Various fixes and cleanups | expand |
On Tue, Mar 26, 2024 at 12:43:22PM -0500, Bob Pearson wrote: > Instead of rescheduling rxe_requester from rxe_completer() just extend > the duration of rxe_sender() by one pass. Setting run_requester_again > forces rxe_completer() to return 0 which will cause rxe_sender() to be > called at least one more time. > > Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> > --- > drivers/infiniband/sw/rxe/rxe_comp.c | 19 ++++++++++++------- > 1 file changed, 12 insertions(+), 7 deletions(-) > > diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c > index ea64a25fe876..c41743fbd5f1 100644 > --- a/drivers/infiniband/sw/rxe/rxe_comp.c > +++ b/drivers/infiniband/sw/rxe/rxe_comp.c > @@ -82,6 +82,8 @@ static unsigned long rnrnak_usec[32] = { > [IB_RNR_TIMER_491_52] = 491520, > }; > > +static int run_requester_again; Is it safe to write, read and rely on value from this global variable without any locking? Thanks > + > static inline unsigned long rnrnak_jiffies(u8 timeout) > { > return max_t(unsigned long, > @@ -325,7 +327,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, > qp->comp.psn = pkt->psn; > if (qp->req.wait_psn) { > qp->req.wait_psn = 0; > - rxe_sched_task(&qp->send_task); > + run_requester_again = 1; > } > } > return COMPST_ERROR_RETRY; > @@ -476,7 +478,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) > */ > if (qp->req.wait_fence) { > qp->req.wait_fence = 0; > - rxe_sched_task(&qp->send_task); > + run_requester_again = 1; > } > } > > @@ -515,7 +517,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, > if (qp->req.need_rd_atomic) { > qp->comp.timeout_retry = 0; > qp->req.need_rd_atomic = 0; > - rxe_sched_task(&qp->send_task); > + run_requester_again = 1; > } > } > > @@ -541,7 +543,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp, > > if (qp->req.wait_psn) { > qp->req.wait_psn = 0; > - rxe_sched_task(&qp->send_task); > + run_requester_again = 1; > } > } > > @@ -654,6 +656,8 @@ int rxe_completer(struct rxe_qp *qp) > int ret; > unsigned long flags; > > + run_requester_again = 0; > + > spin_lock_irqsave(&qp->state_lock, flags); > if (!qp->valid || qp_state(qp) == IB_QPS_ERR || > qp_state(qp) == IB_QPS_RESET) { > @@ -737,7 +741,7 @@ int rxe_completer(struct rxe_qp *qp) > > if (qp->req.wait_psn) { > qp->req.wait_psn = 0; > - rxe_sched_task(&qp->send_task); > + run_requester_again = 1; > } > > state = COMPST_DONE; > @@ -792,7 +796,7 @@ int rxe_completer(struct rxe_qp *qp) > RXE_CNT_COMP_RETRY); > qp->req.need_retry = 1; > qp->comp.started_retry = 1; > - rxe_sched_task(&qp->send_task); > + run_requester_again = 1; > } > goto done; > > @@ -843,8 +847,9 @@ int rxe_completer(struct rxe_qp *qp) > ret = 0; > goto out; > exit: > - ret = -EAGAIN; > + ret = (run_requester_again) ? 0 : -EAGAIN; > out: > + run_requester_again = 0; > if (pkt) > free_pkt(pkt); > return ret; > -- > 2.43.0 >
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index ea64a25fe876..c41743fbd5f1 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -82,6 +82,8 @@ static unsigned long rnrnak_usec[32] = { [IB_RNR_TIMER_491_52] = 491520, }; +static int run_requester_again; + static inline unsigned long rnrnak_jiffies(u8 timeout) { return max_t(unsigned long, @@ -325,7 +327,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, qp->comp.psn = pkt->psn; if (qp->req.wait_psn) { qp->req.wait_psn = 0; - rxe_sched_task(&qp->send_task); + run_requester_again = 1; } } return COMPST_ERROR_RETRY; @@ -476,7 +478,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) */ if (qp->req.wait_fence) { qp->req.wait_fence = 0; - rxe_sched_task(&qp->send_task); + run_requester_again = 1; } } @@ -515,7 +517,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, if (qp->req.need_rd_atomic) { qp->comp.timeout_retry = 0; qp->req.need_rd_atomic = 0; - rxe_sched_task(&qp->send_task); + run_requester_again = 1; } } @@ -541,7 +543,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp, if (qp->req.wait_psn) { qp->req.wait_psn = 0; - rxe_sched_task(&qp->send_task); + run_requester_again = 1; } } @@ -654,6 +656,8 @@ int rxe_completer(struct rxe_qp *qp) int ret; unsigned long flags; + run_requester_again = 0; + spin_lock_irqsave(&qp->state_lock, flags); if (!qp->valid || qp_state(qp) == IB_QPS_ERR || qp_state(qp) == IB_QPS_RESET) { @@ -737,7 +741,7 @@ int rxe_completer(struct rxe_qp *qp) if (qp->req.wait_psn) { qp->req.wait_psn = 0; - rxe_sched_task(&qp->send_task); + run_requester_again = 1; } state = COMPST_DONE; @@ -792,7 +796,7 @@ int rxe_completer(struct rxe_qp *qp) RXE_CNT_COMP_RETRY); qp->req.need_retry = 1; qp->comp.started_retry = 1; - rxe_sched_task(&qp->send_task); + run_requester_again = 1; } goto done; @@ -843,8 +847,9 @@ int rxe_completer(struct rxe_qp *qp) ret = 0; goto out; exit: - ret = -EAGAIN; + ret = (run_requester_again) ? 0 : -EAGAIN; out: + run_requester_again = 0; if (pkt) free_pkt(pkt); return ret;
Instead of rescheduling rxe_requester from rxe_completer() just extend the duration of rxe_sender() by one pass. Setting run_requester_again forces rxe_completer() to return 0 which will cause rxe_sender() to be called at least one more time. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_comp.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-)