@@ -161,6 +161,28 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
mr->type = IB_MR_TYPE_DMA;
}
+static bool iova_in_pmem(struct rxe_mr *mr, u64 iova, int length)
+{
+ char *vaddr;
+ int is_pmem;
+
+ /* XXX: Shall me allow length == 0 */
+ if (length == 0) {
+ return false;
+ }
+ /* check the 1st byte only to avoid crossing page boundary */
+ vaddr = iova_to_vaddr(mr, iova, 1);
+ if (!vaddr) {
+ pr_warn("not a valid iova 0x%llx\n", iova);
+ return false;
+ }
+
+ is_pmem = region_intersects(virt_to_phys(vaddr), 1, IORESOURCE_MEM,
+ IORES_DESC_PERSISTENT_MEMORY);
+
+ return is_pmem == REGION_INTERSECTS;
+}
+
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr)
{
@@ -235,6 +257,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
set->va = start;
set->offset = ib_umem_offset(umem);
+ // iova_in_pmem() must be called after set is updated
+ mr->ibmr.is_pmem = iova_in_pmem(mr, iova, length);
+
return 0;
err_release_umem:
@@ -1807,6 +1807,7 @@ struct ib_mr {
unsigned int page_size;
enum ib_mr_type type;
bool need_inval;
+ bool is_pmem;
union {
struct ib_uobject *uobject; /* user */
struct list_head qp_entry; /* FR */
We can use it to indicate whether the registering mr is associated with a pmem/nvdimm or not. Currently, we only update it in rxe driver, for other device/drivers, they should implement it if needed. CC: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com> --- V2: check 1st byte to avoid crossing page boundary new scheme to check is_pmem # Dan --- drivers/infiniband/sw/rxe/rxe_mr.c | 25 +++++++++++++++++++++++++ include/rdma/ib_verbs.h | 1 + 2 files changed, 26 insertions(+)