@@ -387,6 +387,38 @@ static struct ib_mr *loopback_get_dma_mr(struct ib_pd *ibpd, int access)
return &mr->ibmr;
}
+static struct ib_mr *
+loopback_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type, u32 max_sges)
+{
+ struct rdma_loopdev *ld = ib_to_loopdev(ibpd->device);
+ struct loopback_mr *mr;
+ int ret;
+
+ if (type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+ mr->type = LOOPBACK_MR_TYPE_FRMR;
+
+ mr->pg_tbl = kcalloc(max_sges, sizeof(u64 *), GFP_KERNEL);
+ if (!mr->pg_tbl) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ret = attach_table_id_free_state(&ld->mr_tbl, &mr->res);
+ if (ret)
+ goto err;
+ mr->ibmr.lkey = mr_id_to_mkey(mr->res.id);
+ mr->ibmr.rkey = mr->ibmr.lkey;
+ return &mr->ibmr;
+
+err:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
static size_t mr_pages_store_size(struct ib_umem *umem)
{
return ib_umem_page_count(umem) * sizeof(struct page *);
@@ -1182,6 +1214,56 @@ static int invalidate_rkey(struct rdma_loopdev *ld, u32 inv_key)
wq_cqe->wc.status = IB_WC_SUCCESS;
}
+static int loopback_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct loopback_mr *mr = ib_to_loop_mr(ibmr);
+
+ mr->pg_tbl[mr->u.frmr.pg_iter] = addr;
+ mr->u.frmr.pg_iter++;
+ return 0;
+}
+
+static int loopback_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct loopback_mr *mr = ib_to_loop_mr(ibmr);
+ int ret;
+
+ mr->u.frmr.pg_iter = 0;
+
+ ret = ib_sg_to_pages(ibmr, sgl, sg_nents, sg_offset,
+ loopback_set_page);
+ if (ret < 0 || ret != sg_nents) {
+ pr_err("%s cannot map mr=0x%x sges=%d sgo=%d\n", __func__,
+ mr->ibmr.lkey, sg_nents, sg_offset ? *sg_offset : 0);
+ return -EINVAL;
+ }
+
+ mr->page_shift = ilog2(ibmr->page_size);
+ mr->fbo = ibmr->iova & (ibmr->page_size - 1);
+ return ret;
+}
+
+static void
+process_one_rc_reg_mr(struct rdma_loopdev *ld, struct loopback_qp *qp,
+ const struct ib_send_wr *wr,
+ struct loopback_cqe *wq_cqe)
+{
+ const struct ib_reg_wr *regwr = reg_wr(wr);
+ struct loopback_mr *mr = ib_to_loop_mr(regwr->mr);
+ unsigned long flags;
+
+ /* TODO: update key, mask key, take lock, check refcount,
+ * add check assert for state
+ */
+ xa_lock_irqsave(&ld->qp_tbl.ids, flags);
+ mr->access = regwr->access;
+ __xa_set_mark(&ld->qp_tbl.ids, mr->res.id,
+ LOOPBACK_RESOURCE_STATE_VALID);
+ xa_unlock_irqrestore(&ld->qp_tbl.ids, flags);
+ wq_cqe->wc.status = IB_WC_SUCCESS;
+}
+
static void
process_one_rc_send_wqe(struct rdma_loopdev *ld, struct loopback_qp *qp,
struct loopback_qp *dqp,
@@ -1274,6 +1356,9 @@ static int invalidate_rkey(struct rdma_loopdev *ld, u32 inv_key)
case IB_WR_LOCAL_INV:
process_one_rc_linv(ld, sqp, wr, wq_cqe);
break;
+ case IB_WR_REG_MR:
+ process_one_rc_reg_mr(ld, sqp, wr, wq_cqe);
+ break;
default:
wq_cqe->wc.status = IB_WC_GENERAL_ERR;
break;
@@ -1462,6 +1547,7 @@ static int loopback_destroy_ah(struct ib_ah *ibah, u32 flags)
static const struct ib_device_ops rdma_loopdev_ops = {
.alloc_pd = loopback_alloc_pd,
+ .alloc_mr = loopback_alloc_mr,
.alloc_ucontext = loopback_alloc_ucontext,
.create_ah = loopback_create_ah,
.create_cq = loopback_create_cq,
@@ -1476,6 +1562,7 @@ static int loopback_destroy_ah(struct ib_ah *ibah, u32 flags)
.get_link_layer = loopback_get_link_layer,
.get_netdev = loopback_get_netdev,
.get_port_immutable = loopback_port_immutable,
+ .map_mr_sg = loopback_map_mr_sg,
.modify_qp = loopback_modify_qp,
.poll_cq = loopback_poll_cq,
.post_recv = loopback_post_recv,
Support Fast memory registration for storage ULPs. Signed-off-by: Parav Pandit <parav@mellanox.com> --- drivers/infiniband/sw/loopback/loopback.c | 87 +++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+)