@@ -554,12 +554,36 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
/* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK(atomic_ops_lock);
+static struct resp_res *rxe_prepare_atomic_res(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct resp_res *res;
+
+ res = &qp->resp.resources[qp->resp.res_head];
+ rxe_advance_resp_resource(qp);
+ free_rd_atomic_resource(qp, res);
+
+ res->type = RXE_ATOMIC_MASK;
+ res->first_psn = pkt->psn;
+ res->last_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ res->replay = 0;
+
+ return res;
+}
+
static enum resp_states rxe_atomic_reply(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
u64 *vaddr;
enum resp_states ret;
struct rxe_mr *mr = qp->resp.mr;
+ struct resp_res *res = qp->resp.res;
+
+ if (!res) {
+ res = rxe_prepare_atomic_res(qp, pkt);
+ qp->resp.res = res;
+ }
if (mr->state != RXE_MR_STATE_VALID) {
ret = RESPST_ERR_RKEY_VIOLATION;
@@ -1028,30 +1052,13 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
return err;
}
-static struct resp_res *rxe_prepare_atomic_res(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
-{
- struct resp_res *res;
-
- res = &qp->resp.resources[qp->resp.res_head];
- rxe_advance_resp_resource(qp);
- free_rd_atomic_resource(qp, res);
-
- res->type = RXE_ATOMIC_MASK;
- res->first_psn = pkt->psn;
- res->last_psn = pkt->psn;
- res->cur_psn = pkt->psn;
-
- return res;
-}
-
static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
u8 syndrome)
{
int err = 0;
struct rxe_pkt_info ack_pkt;
struct sk_buff *skb;
- struct resp_res *res;
+ struct resp_res *res = qp->resp.res;
skb = prepare_ack_packet(qp, pkt, &ack_pkt,
IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
@@ -1063,7 +1070,6 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
skb_get(skb);
- res = rxe_prepare_atomic_res(qp, pkt);
res->atomic.skb = skb;
err = rxe_xmit_packet(qp, &ack_pkt, skb);
@@ -1071,6 +1077,11 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
pr_err_ratelimited("Failed sending ack\n");
rxe_put(qp);
}
+
+ /* have to clear this since it is used to trigger
+ * long read replies
+ */
+ qp->resp.res = NULL;
out:
return err;
}
Move the allocation of the atomic responder resource up into rxe_atomic_reply() from send_atomic_ack(). In preparation for merging the normal and retry atomic responder flows. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_resp.c | 49 +++++++++++++++++----------- 1 file changed, 30 insertions(+), 19 deletions(-)