@@ -1376,6 +1376,91 @@ static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
return -EINVAL;
}
+static int next_opcode_xrc(struct rxe_qp *qp, u32 wr_opcode, int fits)
+{
+ switch (wr_opcode) {
+ case IB_WR_RDMA_WRITE:
+ if (qp->req.opcode == IB_OPCODE_XRC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_XRC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_XRC_RDMA_WRITE_LAST :
+ IB_OPCODE_XRC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_XRC_RDMA_WRITE_ONLY :
+ IB_OPCODE_XRC_RDMA_WRITE_FIRST;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_XRC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_XRC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_XRC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_XRC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_XRC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_XRC_RDMA_WRITE_FIRST;
+
+ case IB_WR_SEND:
+ if (qp->req.opcode == IB_OPCODE_XRC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_XRC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_XRC_SEND_LAST :
+ IB_OPCODE_XRC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_XRC_SEND_ONLY :
+ IB_OPCODE_XRC_SEND_FIRST;
+
+ case IB_WR_SEND_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_XRC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_XRC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_XRC_SEND_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_XRC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_XRC_SEND_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_XRC_SEND_FIRST;
+
+ case IB_WR_RDMA_READ:
+ return IB_OPCODE_XRC_RDMA_READ_REQUEST;
+
+ case IB_WR_RDMA_READ_WITH_INV:
+ return IB_OPCODE_XRC_RDMA_READ_REQUEST;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return IB_OPCODE_XRC_COMPARE_SWAP;
+
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ return -EOPNOTSUPP;
+
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return IB_OPCODE_XRC_FETCH_ADD;
+
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+ return -EOPNOTSUPP;
+
+ case IB_WR_SEND_WITH_INV:
+ if (qp->req.opcode == IB_OPCODE_XRC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_XRC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_XRC_SEND_LAST_WITH_INVALIDATE :
+ IB_OPCODE_XRC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_XRC_SEND_ONLY_WITH_INVALIDATE :
+ IB_OPCODE_XRC_SEND_FIRST;
+
+ case IB_WR_LOCAL_INV:
+ case IB_WR_REG_MR:
+ case IB_WR_BIND_MW:
+ return wr_opcode;
+ }
+
+ return -EINVAL;
+}
+
int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode)
{
int fits = (wqe->dma.resid <= qp->mtu);
@@ -1387,6 +1472,9 @@ int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode)
case IB_QPT_UC:
return next_opcode_uc(qp, opcode, fits);
+ case IB_QPT_XRC_INI:
+ return next_opcode_xrc(qp, opcode, fits);
+
case IB_QPT_UD:
case IB_QPT_GSI:
switch (opcode) {
Extend next_opcode() to support xrc operations. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_opcode.c | 88 ++++++++++++++++++++++++++ 1 file changed, 88 insertions(+)