@@ -179,8 +179,15 @@ void rxe_srq_cleanup(struct rxe_pool_elem *elem);
void rxe_dealloc(struct ib_device *ib_dev);
-int rxe_completer(void *arg);
+/* rxe_req.c */
+int rxe_prepare_pad_icrc(struct rxe_pkt_info *pkt, struct sk_buff *skb,
+ int payload, bool frag);
int rxe_requester(void *arg);
+
+/* rxe_comp.c */
+int rxe_completer(void *arg);
+
+/* rxe_resp.c */
int rxe_responder(void *arg);
/* rxe_icrc.c */
@@ -438,27 +438,79 @@ static void rxe_init_roce_hdrs(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
static int rxe_init_payload(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
- struct rxe_pkt_info *pkt, u32 payload,
- struct sk_buff *skb)
+ struct rxe_pkt_info *pkt, int pad, u32 payload,
+ struct sk_buff *skb, bool frag)
{
+ int len = skb_tailroom(skb);
+ int tot_len = payload + pad + RXE_ICRC_SIZE;
+ int access = 0;
int skb_offset = 0;
+ int op;
+ void *addr;
void *data;
int err = 0;
if (wqe->wr.send_flags & IB_SEND_INLINE) {
+ if (WARN_ON(frag))
+ return -EINVAL;
+ if (len < tot_len)
+ return -EINVAL;
data = &wqe->dma.inline_data[wqe->dma.sge_offset];
memcpy(payload_addr(pkt), data, payload);
wqe->dma.resid -= payload;
wqe->dma.sge_offset += payload;
} else {
- err = rxe_copy_dma_data(skb, qp->pd, 0, &wqe->dma,
- payload_addr(pkt), skb_offset,
- payload, RXE_COPY_FROM_MR);
+ op = frag ? RXE_FRAG_FROM_MR : RXE_COPY_FROM_MR;
+ addr = frag ? NULL : payload_addr(pkt);
+ err = rxe_copy_dma_data(skb, qp->pd, access, &wqe->dma,
+ addr, skb_offset, payload, op);
}
return err;
}
+/**
+ * rxe_prepare_pad_icrc() - Alloc space if fragmented and init pad and icrc
+ * @pkt: packet info
+ * @skb: packet buffer
+ * @payload: roce payload
+ * @frag: true if skb is fragmented
+ *
+ * Returns: 0 on success else an error
+ */
+int rxe_prepare_pad_icrc(struct rxe_pkt_info *pkt, struct sk_buff *skb,
+ int payload, bool frag)
+{
+ struct rxe_phys_buf dmabuf;
+ size_t offset;
+ u64 iova;
+ u8 *addr;
+ int err = 0;
+ int pad = (-payload) & 0x3;
+
+ if (frag) {
+ /* allocate bytes at the end of the skb linear buffer
+ * and build a frag pointing at it
+ */
+ WARN_ON((skb->end - skb->tail) < 8);
+ addr = skb_end_pointer(skb) - RXE_ICRC_SIZE - pad;
+ iova = (uintptr_t)addr;
+ dmabuf.addr = iova & PAGE_MASK;
+ offset = iova & ~PAGE_MASK;
+ err = rxe_add_frag(skb, &dmabuf, pad + RXE_ICRC_SIZE, offset);
+ if (err)
+ goto err;
+ } else {
+ addr = payload_addr(pkt) + payload;
+ }
+
+ /* init pad and icrc to zero */
+ memset(addr, 0, pad + RXE_ICRC_SIZE);
+
+err:
+ return err;
+}
+
static struct sk_buff *rxe_init_req_packet(struct rxe_qp *qp,
struct rxe_send_wqe *wqe,
int opcode, u32 payload,
@@ -468,9 +520,9 @@ static struct sk_buff *rxe_init_req_packet(struct rxe_qp *qp,
struct sk_buff *skb = NULL;
struct rxe_av *av;
struct rxe_ah *ah = NULL;
- void *padp;
int pad;
int err = -EINVAL;
+ bool frag = false;
pkt->rxe = rxe;
pkt->opcode = opcode;
@@ -498,15 +550,15 @@ static struct sk_buff *rxe_init_req_packet(struct rxe_qp *qp,
rxe_init_roce_hdrs(qp, wqe, pkt, pad);
if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
- err = rxe_init_payload(qp, wqe, pkt, payload, skb);
+ err = rxe_init_payload(qp, wqe, pkt, pad, payload, skb, frag);
if (err)
goto err_out;
}
- if (pad) {
- padp = payload_addr(pkt) + payload;
- memset(padp, 0, pad);
- }
+ /* handle pad and icrc */
+ err = rxe_prepare_pad_icrc(pkt, skb, payload, frag);
+ if (err)
+ goto err_out;
/* IP and UDP network headers */
err = rxe_prepare(av, pkt, skb);
Add code to rxe_build_req_packet() to allocate space for the pad and icrc if the skb is fragmented. This is in preparation for supporting fragmented skbs. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_loc.h | 9 +++- drivers/infiniband/sw/rxe/rxe_req.c | 74 ++++++++++++++++++++++++----- 2 files changed, 71 insertions(+), 12 deletions(-)