@@ -225,11 +225,24 @@ struct hns_roce_uar {
unsigned long logic_idx;
};
+enum hns_roce_mmap_type {
+ HNS_ROCE_MMAP_TYPE_DB = 1,
+ HNS_ROCE_MMAP_TYPE_TPTR,
+};
+
+struct hns_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ enum hns_roce_mmap_type mmap_type;
+ u64 address;
+};
+
struct hns_roce_ucontext {
struct ib_ucontext ibucontext;
struct hns_roce_uar uar;
struct list_head page_list;
struct mutex page_mutex;
+ struct hns_user_mmap_entry *db_mmap_entry;
+ struct hns_user_mmap_entry *tptr_mmap_entry;
};
struct hns_roce_pd {
@@ -1050,6 +1063,12 @@ static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
return container_of(ibsrq, struct hns_roce_srq, ibsrq);
}
+static inline struct hns_user_mmap_entry *to_hns_mmap(
+ struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
+}
+
static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
{
writeq(*(u64 *)val, dest);
@@ -1260,4 +1279,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
struct ib_cq *ib_cq);
+struct hns_user_mmap_entry *hns_roce_user_mmap_entry_insert(
+ struct ib_ucontext *ucontext,
+ u64 address, size_t length,
+ enum hns_roce_mmap_type mmap_type);
#endif /* _HNS_ROCE_DEVICE_H */
@@ -292,6 +292,80 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
return 0;
}
+struct hns_user_mmap_entry *hns_roce_user_mmap_entry_insert(
+ struct ib_ucontext *ucontext,
+ u64 address, size_t length,
+ enum hns_roce_mmap_type mmap_type)
+{
+ struct hns_user_mmap_entry *entry;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+ entry->mmap_type = mmap_type;
+
+ ret = rdma_user_mmap_entry_insert_exact(
+ ucontext, &entry->rdma_entry, length,
+ mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1);
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+
+ return entry;
+}
+
+static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
+{
+ if (context->db_mmap_entry)
+ rdma_user_mmap_entry_remove(
+ &context->db_mmap_entry->rdma_entry);
+
+ if (context->tptr_mmap_entry)
+ rdma_user_mmap_entry_remove(
+ &context->tptr_mmap_entry->rdma_entry);
+}
+
+static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
+{
+ struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
+ u64 address;
+ int ret;
+
+ address = context->uar.pfn << PAGE_SHIFT;
+ context->db_mmap_entry =
+ hns_roce_user_mmap_entry_insert(uctx, address, PAGE_SIZE,
+ HNS_ROCE_MMAP_TYPE_DB);
+ if (!context->db_mmap_entry)
+ return -ENOMEM;
+
+ if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
+ return 0;
+
+ /*
+ * FIXME: using io_remap_pfn_range on the dma address returned
+ * by dma_alloc_coherent is totally wrong.
+ */
+ context->tptr_mmap_entry =
+ hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr,
+ hr_dev->tptr_size,
+ HNS_ROCE_MMAP_TYPE_TPTR);
+ if (!context->tptr_mmap_entry) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ hns_roce_dealloc_uar_entry(context);
+ return ret;
+}
+
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata)
{
@@ -310,6 +384,10 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
if (ret)
goto error_fail_uar_alloc;
+ ret = hns_roce_alloc_uar_entry(uctx);
+ if (ret)
+ goto error_fail_uar_entry;
+
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
INIT_LIST_HEAD(&context->page_list);
@@ -326,6 +404,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
return 0;
error_fail_copy_to_udata:
+ hns_roce_dealloc_uar_entry(context);
+
+error_fail_uar_entry:
ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
error_fail_uar_alloc:
@@ -337,39 +418,45 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
+ hns_roce_dealloc_uar_entry(context);
+
ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
}
-static int hns_roce_mmap(struct ib_ucontext *context,
- struct vm_area_struct *vma)
+static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
-
- switch (vma->vm_pgoff) {
- case 0:
- return rdma_user_mmap_io(context, vma,
- to_hr_ucontext(context)->uar.pfn,
- PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot),
- NULL);
-
- /* vm_pgoff: 1 -- TPTR */
- case 1:
- if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
- return -EINVAL;
- /*
- * FIXME: using io_remap_pfn_range on the dma address returned
- * by dma_alloc_coherent is totally wrong.
- */
- return rdma_user_mmap_io(context, vma,
- hr_dev->tptr_dma_addr >> PAGE_SHIFT,
- hr_dev->tptr_size,
- vma->vm_page_prot,
- NULL);
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct hns_user_mmap_entry *entry;
+ phys_addr_t pfn;
+ pgprot_t prot;
+ int ret;
- default:
+ rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
+ if (!rdma_entry)
return -EINVAL;
- }
+
+ entry = to_hns_mmap(rdma_entry);
+ pfn = entry->address >> PAGE_SHIFT;
+ prot = vma->vm_page_prot;
+
+ if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR)
+ prot = pgprot_noncached(prot);
+
+ ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
+ prot, rdma_entry);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+
+ return ret;
+}
+
+static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry);
+
+ kfree(entry);
}
static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
@@ -445,6 +532,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.get_link_layer = hns_roce_get_link_layer,
.get_port_immutable = hns_roce_port_immutable,
.mmap = hns_roce_mmap,
+ .mmap_free = hns_roce_free_mmap,
.modify_device = hns_roce_modify_device,
.modify_qp = hns_roce_modify_qp,
.query_ah = hns_roce_query_ah,
@@ -2914,6 +2914,14 @@ int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
size_t length, u32 min_pgoff,
u32 max_pgoff);
+static inline int rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length, u32 pgoff)
+{
+ return rdma_user_mmap_entry_insert_range(ucontext, entry, length,
+ pgoff, pgoff);
+}
+
struct rdma_user_mmap_entry *
rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
unsigned long pgoff);