@@ -660,10 +660,8 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
struct rxe_pkt_info *ack,
- int opcode,
- int payload,
- u32 psn,
- u8 syndrome)
+ int opcode, int payload, u32 psn,
+ u8 syndrome, bool *fragp)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct sk_buff *skb;
@@ -682,7 +680,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
ack->psn = psn;
ack->port_num = 1;
- skb = rxe_init_packet(qp, &qp->pri_av, ack, NULL);
+ skb = rxe_init_packet(qp, &qp->pri_av, ack, fragp);
if (!skb)
return NULL;
@@ -698,12 +696,14 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
atmack_set_orig(ack, qp->resp.res->atomic.orig_val);
err = rxe_prepare(&qp->pri_av, ack, skb);
- if (err) {
- kfree_skb(skb);
- return NULL;
- }
+ if (err)
+ goto err_free_skb;
return skb;
+
+err_free_skb:
+ kfree_skb(skb);
+ return NULL;
}
/**
@@ -775,6 +775,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
struct resp_res *res = qp->resp.res;
struct rxe_mr *mr;
int skb_offset = 0;
+ bool frag;
+ enum rxe_mr_copy_op op;
if (!res) {
res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK);
@@ -787,8 +789,10 @@ static enum resp_states read_reply(struct rxe_qp *qp,
qp->resp.mr = NULL;
} else {
mr = rxe_recheck_mr(qp, res->read.rkey);
- if (!mr)
- return RESPST_ERR_RKEY_VIOLATION;
+ if (!mr) {
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err_out;
+ }
}
if (res->read.resid <= mtu)
@@ -797,8 +801,10 @@ static enum resp_states read_reply(struct rxe_qp *qp,
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
} else {
mr = rxe_recheck_mr(qp, res->read.rkey);
- if (!mr)
- return RESPST_ERR_RKEY_VIOLATION;
+ if (!mr) {
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err_out;
+ }
if (res->read.resid > mtu)
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
@@ -806,35 +812,35 @@ static enum resp_states read_reply(struct rxe_qp *qp,
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
}
- res->state = rdatm_res_state_next;
-
payload = min_t(int, res->read.resid, mtu);
skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload,
- res->cur_psn, AETH_ACK_UNLIMITED);
- if (!skb)
- return RESPST_ERR_RNR;
+ res->cur_psn, AETH_ACK_UNLIMITED, &frag);
+ if (!skb) {
+ state = RESPST_ERR_RNR;
+ goto err_put_mr;
+ }
+ op = frag ? RXE_FRAG_FROM_MR : RXE_COPY_FROM_MR;
err = rxe_copy_mr_data(skb, mr, res->read.va, payload_addr(&ack_pkt),
- skb_offset, payload, RXE_COPY_FROM_MR);
+ skb_offset, payload, op);
if (err) {
- kfree_skb(skb);
- rxe_put(mr);
- return RESPST_ERR_RKEY_VIOLATION;
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err_free_skb;
}
- if (mr)
- rxe_put(mr);
-
- if (bth_pad(&ack_pkt)) {
- u8 *pad = payload_addr(&ack_pkt) + payload;
-
- memset(pad, 0, bth_pad(&ack_pkt));
+ err = rxe_prepare_pad_icrc(&ack_pkt, skb, payload, frag);
+ if (err) {
+ state = RESPST_ERR_RNR;
+ goto err_free_skb;
}
err = rxe_xmit_packet(qp, &ack_pkt, skb);
- if (err)
- return RESPST_ERR_RNR;
+ if (err) {
+ /* rxe_xmit_packet will consume the packet */
+ state = RESPST_ERR_RNR;
+ goto err_put_mr;
+ }
res->read.va += payload;
res->read.resid -= payload;
@@ -851,6 +857,16 @@ static enum resp_states read_reply(struct rxe_qp *qp,
state = RESPST_CLEANUP;
}
+ /* keep these after all error exits */
+ res->state = rdatm_res_state_next;
+ rxe_put(mr);
+ return state;
+
+err_free_skb:
+ kfree_skb(skb);
+err_put_mr:
+ rxe_put(mr);
+err_out:
return state;
}
@@ -1041,14 +1057,19 @@ static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn,
int opcode, const char *msg)
{
int err;
- struct rxe_pkt_info ack_pkt;
+ struct rxe_pkt_info ack;
struct sk_buff *skb;
+ int payload = 0;
- skb = prepare_ack_packet(qp, &ack_pkt, opcode, 0, psn, syndrome);
+ skb = prepare_ack_packet(qp, &ack, opcode, payload,
+ psn, syndrome, NULL);
if (!skb)
return -ENOMEM;
- err = rxe_xmit_packet(qp, &ack_pkt, skb);
+ /* doesn't fail if frag == false */
+ (void)rxe_prepare_pad_icrc(&ack, skb, payload, false);
+
+ err = rxe_xmit_packet(qp, &ack, skb);
if (err)
pr_err_ratelimited("Failed sending %s\n", msg);
Extend prepare_ack_packet(), read_reply() and send_common_ack() in rxe_resp.c to support fragmented skbs. Adjust calls to these routines for the changed API. This is in preparation for using fragmented skbs. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_resp.c | 89 +++++++++++++++++----------- 1 file changed, 55 insertions(+), 34 deletions(-)