@@ -1737,6 +1737,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, alloc_fmr);
SET_DEVICE_OP(dev_ops, alloc_hw_stats);
SET_DEVICE_OP(dev_ops, alloc_mr);
+ SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
SET_DEVICE_OP(dev_ops, alloc_mw);
SET_DEVICE_OP(dev_ops, alloc_pd);
SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
@@ -1992,6 +1992,9 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
if (!pd->device->ops.alloc_mr)
return ERR_PTR(-EOPNOTSUPP);
+ if (WARN_ON_ONCE(mr_type == IB_MR_TYPE_INTEGRITY))
+ return ERR_PTR(-EINVAL);
+
mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
if (!IS_ERR(mr)) {
mr->device = pd->device;
@@ -2009,6 +2012,49 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
}
EXPORT_SYMBOL(ib_alloc_mr);
+/**
+ * ib_alloc_mr_integrity() - Allocates an integrity memory region
+ * @pd: protection domain associated with the region
+ * @max_num_data_sg: maximum data sg entries available for registration
+ * @max_num_meta_sg: maximum metadata sg entries available for
+ * registration
+ *
+ * Notes:
+ * Memory registration page/sg lists must not exceed max_num_sg,
+ * also the integrity page/sg lists must not exceed max_num_meta_sg.
+ *
+ */
+struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
+ u32 max_num_data_sg,
+ u32 max_num_meta_sg)
+{
+ struct ib_mr *mr;
+
+ if (!pd->device->ops.alloc_mr_integrity)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (!max_num_meta_sg)
+ return ERR_PTR(-EINVAL);
+
+ mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
+ max_num_meta_sg);
+ if (IS_ERR(mr))
+ return mr;
+
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->dm = NULL;
+ mr->uobject = NULL;
+ atomic_inc(&pd->usecnt);
+ mr->need_inval = false;
+ mr->res.type = RDMA_RESTRACK_MR;
+ rdma_restrack_kadd(&mr->res);
+ mr->type = IB_MR_TYPE_INTEGRITY;
+
+ return mr;
+}
+EXPORT_SYMBOL(ib_alloc_mr_integrity);
+
/* "Fast" memory regions */
struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
@@ -765,6 +765,8 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
* application
* @IB_MR_TYPE_DMA: memory region that is used for DMA operations
* without address translations (VA=PA)
+ * @IB_MR_TYPE_INTEGRITY: memory region that is used for
+ * data integrity operations
*/
enum ib_mr_type {
IB_MR_TYPE_MEM_REG,
@@ -773,6 +775,7 @@ enum ib_mr_type {
IB_MR_TYPE_DM,
IB_MR_TYPE_USER,
IB_MR_TYPE_DMA,
+ IB_MR_TYPE_INTEGRITY,
};
enum ib_mr_status_check {
@@ -2336,6 +2339,9 @@ struct ib_device_ops {
int (*dereg_mr)(struct ib_mr *mr);
struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
+ struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
+ u32 max_num_data_sg,
+ u32 max_num_meta_sg);
int (*advise_mr)(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice, u32 flags,
struct ib_sge *sg_list, u32 num_sge,
@@ -3760,6 +3766,10 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
+struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
+ u32 max_num_data_sg,
+ u32 max_num_meta_sg);
+
/**
* ib_update_fast_reg_key - updates the key portion of the fast_reg MR
* R_Key and L_Key.