@@ -11,61 +11,85 @@
int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
{
struct ib_srq_attr *attr = &init->attr;
+ int err = -EINVAL;
- if (attr->max_wr > rxe->attr.max_srq_wr) {
- pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
- attr->max_wr, rxe->attr.max_srq_wr);
- goto err1;
+ if (init->srq_type == IB_SRQT_TM) {
+ err = -EOPNOTSUPP;
+ goto err_out;
}
- if (attr->max_wr <= 0) {
- pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
- goto err1;
+ if (init->srq_type == IB_SRQT_XRC) {
+ if (!init->ext.cq || !init->ext.xrc.xrcd)
+ goto err_out;
}
+ if (attr->max_wr > rxe->attr.max_srq_wr)
+ goto err_out;
+
+ if (attr->max_wr <= 0)
+ goto err_out;
+
if (attr->max_wr < RXE_MIN_SRQ_WR)
attr->max_wr = RXE_MIN_SRQ_WR;
- if (attr->max_sge > rxe->attr.max_srq_sge) {
- pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
- attr->max_sge, rxe->attr.max_srq_sge);
- goto err1;
- }
+ if (attr->max_sge > rxe->attr.max_srq_sge)
+ goto err_out;
if (attr->max_sge < RXE_MIN_SRQ_SGE)
attr->max_sge = RXE_MIN_SRQ_SGE;
return 0;
-err1:
- return -EINVAL;
+err_out:
+ pr_debug("%s: failed err = %d\n", __func__, err);
+ return err;
}
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_init_attr *init, struct ib_udata *udata,
struct rxe_create_srq_resp __user *uresp)
{
- int err;
- int srq_wqe_size;
+ struct rxe_pd *pd = to_rpd(srq->ibsrq.pd);
+ struct rxe_cq *cq;
+ struct rxe_xrcd *xrcd;
struct rxe_queue *q;
- enum queue_type type;
+ int srq_wqe_size;
+ int err;
+
+ rxe_get(pd);
+ srq->pd = pd;
srq->ibsrq.event_handler = init->event_handler;
srq->ibsrq.srq_context = init->srq_context;
srq->limit = init->attr.srq_limit;
- srq->srq_num = srq->elem.index;
srq->rq.max_wr = init->attr.max_wr;
srq->rq.max_sge = init->attr.max_sge;
- srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
+ if (init->srq_type == IB_SRQT_XRC) {
+ cq = to_rcq(init->ext.cq);
+ if (cq) {
+ rxe_get(cq);
+ srq->cq = to_rcq(init->ext.cq);
+ } else {
+ return -EINVAL;
+ }
+ xrcd = to_rxrcd(init->ext.xrc.xrcd);
+ if (xrcd) {
+ rxe_get(xrcd);
+ srq->xrcd = to_rxrcd(init->ext.xrc.xrcd);
+ }
+ srq->ibsrq.ext.xrc.srq_num = srq->elem.index;
+ }
spin_lock_init(&srq->rq.producer_lock);
spin_lock_init(&srq->rq.consumer_lock);
- type = QUEUE_TYPE_FROM_CLIENT;
- q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type);
+ srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
+ q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size,
+ QUEUE_TYPE_FROM_CLIENT);
if (!q) {
- pr_warn("unable to allocate queue for srq\n");
+ pr_debug("%s: srq#%d: unable to allocate queue\n",
+ __func__, srq->elem.index);
return -ENOMEM;
}
@@ -79,66 +103,45 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
return err;
}
- if (uresp) {
- if (copy_to_user(&uresp->srq_num, &srq->srq_num,
- sizeof(uresp->srq_num))) {
- rxe_queue_cleanup(q);
- return -EFAULT;
- }
- }
-
return 0;
}
int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
{
- if (srq->error) {
- pr_warn("srq in error state\n");
- goto err1;
- }
+ int err = -EINVAL;
+
+ if (srq->error)
+ goto err_out;
if (mask & IB_SRQ_MAX_WR) {
- if (attr->max_wr > rxe->attr.max_srq_wr) {
- pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
- attr->max_wr, rxe->attr.max_srq_wr);
- goto err1;
- }
+ if (attr->max_wr > rxe->attr.max_srq_wr)
+ goto err_out;
- if (attr->max_wr <= 0) {
- pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
- goto err1;
- }
+ if (attr->max_wr <= 0)
+ goto err_out;
- if (srq->limit && (attr->max_wr < srq->limit)) {
- pr_warn("max_wr (%d) < srq->limit (%d)\n",
- attr->max_wr, srq->limit);
- goto err1;
- }
+ if (srq->limit && (attr->max_wr < srq->limit))
+ goto err_out;
if (attr->max_wr < RXE_MIN_SRQ_WR)
attr->max_wr = RXE_MIN_SRQ_WR;
}
if (mask & IB_SRQ_LIMIT) {
- if (attr->srq_limit > rxe->attr.max_srq_wr) {
- pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
- attr->srq_limit, rxe->attr.max_srq_wr);
- goto err1;
- }
+ if (attr->srq_limit > rxe->attr.max_srq_wr)
+ goto err_out;
- if (attr->srq_limit > srq->rq.queue->buf->index_mask) {
- pr_warn("srq_limit (%d) > cur limit(%d)\n",
- attr->srq_limit,
- srq->rq.queue->buf->index_mask);
- goto err1;
- }
+ if (attr->srq_limit > srq->rq.queue->buf->index_mask)
+ goto err_out;
}
return 0;
-err1:
- return -EINVAL;
+err_out:
+ pr_debug("%s: srq#%d: failed err = %d\n", __func__,
+ srq->elem.index, err);
+ return err;
}
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
@@ -182,6 +185,12 @@ void rxe_srq_cleanup(struct rxe_pool_elem *elem)
if (srq->pd)
rxe_put(srq->pd);
+ if (srq->cq)
+ rxe_put(srq->cq);
+
+ if (srq->xrcd)
+ rxe_put(srq->xrcd);
+
if (srq->rq.queue)
rxe_queue_cleanup(srq->rq.queue);
}
@@ -306,7 +306,6 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
{
int err;
struct rxe_dev *rxe = to_rdev(ibsrq->device);
- struct rxe_pd *pd = to_rpd(ibsrq->pd);
struct rxe_srq *srq = to_rsrq(ibsrq);
struct rxe_create_srq_resp __user *uresp = NULL;
@@ -316,9 +315,6 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
uresp = udata->outbuf;
}
- if (init->srq_type != IB_SRQT_BASIC)
- return -EOPNOTSUPP;
-
err = rxe_srq_chk_init(rxe, init);
if (err)
return err;
@@ -327,13 +323,11 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (err)
return err;
- rxe_get(pd);
- srq->pd = pd;
-
err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
if (err)
goto err_cleanup;
+ rxe_finalize(srq);
return 0;
err_cleanup:
@@ -367,6 +361,7 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
if (err)
return err;
+
return 0;
}
@@ -380,6 +375,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
attr->max_wr = srq->rq.queue->buf->index_mask;
attr->max_sge = srq->rq.max_sge;
attr->srq_limit = srq->limit;
+
return 0;
}
@@ -546,7 +542,6 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
const struct ib_send_wr *ibwr)
{
wr->wr_id = ibwr->wr_id;
- wr->num_sge = ibwr->num_sge;
wr->opcode = ibwr->opcode;
wr->send_flags = ibwr->send_flags;
@@ -628,6 +623,8 @@ static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
return;
}
+ wqe->dma.num_sge = ibwr->num_sge;
+
if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
copy_inline_data_to_wqe(wqe, ibwr);
else
@@ -102,13 +102,19 @@ struct rxe_srq {
struct ib_srq ibsrq;
struct rxe_pool_elem elem;
struct rxe_pd *pd;
+ struct rxe_xrcd *xrcd; /* xrc only */
+ struct rxe_cq *cq; /* xrc only */
struct rxe_rq rq;
- u32 srq_num;
int limit;
int error;
};
+static inline u32 srq_num(struct rxe_srq *srq)
+{
+ return srq->ibsrq.ext.xrc.srq_num;
+}
+
enum rxe_qp_state {
QP_STATE_RESET,
QP_STATE_INIT,
@@ -74,7 +74,7 @@ struct rxe_av {
struct rxe_send_wr {
__aligned_u64 wr_id;
- __u32 num_sge;
+ __u32 srq_num; /* xrc only */
__u32 opcode;
__u32 send_flags;
union {
@@ -191,8 +191,6 @@ struct rxe_create_qp_resp {
struct rxe_create_srq_resp {
struct mminfo mi;
- __u32 srq_num;
- __u32 reserved;
};
struct rxe_modify_srq_cmd {
Extend srq to support xrcd in create verb Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_srq.c | 131 ++++++++++++++------------ drivers/infiniband/sw/rxe/rxe_verbs.c | 13 +-- drivers/infiniband/sw/rxe/rxe_verbs.h | 8 +- include/uapi/rdma/rdma_user_rxe.h | 4 +- 4 files changed, 83 insertions(+), 73 deletions(-)