@@ -42,14 +42,18 @@ void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr)
EXPORT_SYMBOL(ib_mr_pool_put);
int ib_mr_pool_init(struct ib_qp *qp, struct list_head *list, int nr,
- enum ib_mr_type type, u32 max_num_sg)
+ enum ib_mr_type type, u32 max_num_sg, u32 max_num_meta_sg)
{
struct ib_mr *mr;
unsigned long flags;
int ret, i;
for (i = 0; i < nr; i++) {
- mr = ib_alloc_mr(qp->pd, type, max_num_sg);
+ if (type == IB_MR_TYPE_INTEGRITY)
+ mr = ib_alloc_mr_integrity(qp->pd, max_num_sg,
+ max_num_meta_sg);
+ else
+ mr = ib_alloc_mr(qp->pd, type, max_num_sg);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
goto out;
@@ -719,7 +719,7 @@ int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
if (nr_mrs) {
ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs,
IB_MR_TYPE_MEM_REG,
- rdma_rw_fr_page_list_len(dev));
+ rdma_rw_fr_page_list_len(dev), 0);
if (ret) {
pr_err("%s: failed to allocated %d MRs\n",
__func__, nr_mrs);
@@ -729,7 +729,7 @@ int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
if (nr_sig_mrs) {
ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs,
- IB_MR_TYPE_SIGNATURE, 2);
+ IB_MR_TYPE_SIGNATURE, 2, 0);
if (ret) {
pr_err("%s: failed to allocated %d SIG MRs\n",
__func__, nr_sig_mrs);
@@ -486,7 +486,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
queue->queue_size,
IB_MR_TYPE_MEM_REG,
- nvme_rdma_get_max_fr_pages(ibdev));
+ nvme_rdma_get_max_fr_pages(ibdev), 0);
if (ret) {
dev_err(queue->ctrl->ctrl.device,
"failed to initialize MR pool sized %d for QID %d\n",
@@ -19,7 +19,7 @@ struct ib_mr *ib_mr_pool_get(struct ib_qp *qp, struct list_head *list);
void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr);
int ib_mr_pool_init(struct ib_qp *qp, struct list_head *list, int nr,
- enum ib_mr_type type, u32 max_num_sg);
+ enum ib_mr_type type, u32 max_num_sg, u32 max_num_meta_sg);
void ib_mr_pool_destroy(struct ib_qp *qp, struct list_head *list);
#endif /* _RDMA_MR_POOL_H */