@@ -68,6 +68,8 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr);
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr);
int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
+void *rxe_map_to_vaddr(struct rxe_mr *mr, int map_index, int addr_index, size_t offset);
+void rxe_unmap_vaddr(struct rxe_mr *mr, void *vaddr);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
@@ -118,9 +118,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
struct ib_umem *umem;
struct sg_page_iter sg_iter;
int num_buf;
- void *vaddr;
int err;
- int i;
umem = ib_umem_get(&rxe->ib_dev, start, length, access);
if (IS_ERR(umem)) {
@@ -154,15 +152,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
num_buf = 0;
}
- vaddr = page_address(sg_page_iter_page(&sg_iter));
- if (!vaddr) {
- pr_warn("%s: Unable to get virtual address\n",
- __func__);
- err = -ENOMEM;
- goto err_cleanup_map;
- }
-
- map[0]->addrs[num_buf] = (uintptr_t)vaddr;
+ map[0]->addrs[num_buf] = (uintptr_t)sg_page_iter_page(&sg_iter);
num_buf++;
}
@@ -176,10 +166,6 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
return 0;
-err_cleanup_map:
- for (i = 0; i < mr->num_map; i++)
- kfree(mr->map[i]);
- kfree(mr->map);
err_release_umem:
ib_umem_release(umem);
err_out:
@@ -240,6 +226,28 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
}
}
+void *rxe_map_to_vaddr(struct rxe_mr *mr, int map_index, int addr_index, size_t offset)
+{
+ void *vaddr = NULL;
+
+ if (mr->ibmr.type == IB_MR_TYPE_USER) {
+ vaddr = kmap_local_page((struct page *)mr->map[map_index]->addrs[addr_index]);
+ if (vaddr == NULL) {
+ pr_warn("Failed to map page");
+ return NULL;
+ }
+ } else
+ vaddr = (void *)(uintptr_t)mr->map[map_index]->addrs[addr_index];
+
+ return vaddr + offset;
+}
+
+void rxe_unmap_vaddr(struct rxe_mr *mr, void *vaddr)
+{
+ if (mr->ibmr.type == IB_MR_TYPE_USER)
+ kunmap_local(vaddr);
+}
+
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
{
size_t offset;
@@ -271,7 +279,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
goto out;
}
- addr = (void *)(uintptr_t)mr->map[m]->addrs[n] + offset;
+ addr = rxe_map_to_vaddr(mr, m, n, offset);
out:
return addr;
@@ -318,7 +326,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
while (length > 0) {
u8 *src, *dest;
- va = (u8 *)(uintptr_t)mr->map[m]->addrs[i] + offset;
+ va = (u8 *)rxe_map_to_vaddr(mr, m, i, offset);
src = (dir == RXE_TO_MR_OBJ) ? addr : va;
dest = (dir == RXE_TO_MR_OBJ) ? va : addr;
@@ -339,6 +347,8 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
i = 0;
m++;
}
+
+ rxe_unmap_vaddr(mr, va);
}
return 0;
@@ -652,6 +652,7 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
ret = RESPST_ACKNOWLEDGE;
out:
+ rxe_unmap_vaddr(mr, vaddr);
return ret;
}
page_address() will be broken when new in-kernel memory protection scheme[1] is applied in the future so use kmap_local_page() instead. [1]: https://lore.kernel.org/lkml/20220419170649.1022246-1-ira.weiny@intel.com/ Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com> --- drivers/infiniband/sw/rxe/rxe_loc.h | 2 ++ drivers/infiniband/sw/rxe/rxe_mr.c | 44 +++++++++++++++++----------- drivers/infiniband/sw/rxe/rxe_resp.c | 1 + 3 files changed, 30 insertions(+), 17 deletions(-)