diff mbox

[rdma-core] libhns: Add 64KB page size support for hip08

Message ID 1526028667-49118-1-git-send-email-liuyixian@huawei.com (mailing list archive)
State Accepted
Delegated to: Jason Gunthorpe
Headers show

Commit Message

Yixian Liu May 11, 2018, 8:51 a.m. UTC
This patch uses device page size instead of magic number
to adjust the system configuration on page size.

Signed-off-by: Yixian Liu <liuyixian@huawei.com>
---
 providers/hns/hns_roce_u_verbs.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

Comments

Jason Gunthorpe May 16, 2018, 7:48 p.m. UTC | #1
On Fri, May 11, 2018 at 04:51:07PM +0800, Yixian Liu wrote:
> This patch uses device page size instead of magic number
> to adjust the system configuration on page size.
> 
> Signed-off-by: Yixian Liu <liuyixian@huawei.com>
>  providers/hns/hns_roce_u_verbs.c | 17 +++++++++--------
>  1 file changed, 9 insertions(+), 8 deletions(-)

Applied thanks

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c
index 2fc5367..d5eddf4 100644
--- a/providers/hns/hns_roce_u_verbs.c
+++ b/providers/hns/hns_roce_u_verbs.c
@@ -402,6 +402,7 @@  static int hns_roce_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
 				 enum ibv_qp_type type, struct hns_roce_qp *qp)
 {
 	int i;
+	int page_size = to_hr_dev(pd->context->device)->page_size;
 
 	qp->sq.wrid =
 		(unsigned long *)malloc(qp->sq.wqe_cnt * sizeof(uint64_t));
@@ -422,7 +423,7 @@  static int hns_roce_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
 			;
 
 		qp->buf_size = align((qp->sq.wqe_cnt << qp->sq.wqe_shift),
-				     0x1000) +
+				     page_size) +
 			       (qp->rq.wqe_cnt << qp->rq.wqe_shift);
 
 		if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
@@ -430,7 +431,7 @@  static int hns_roce_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
 			qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
 		} else {
 			qp->rq.offset = align((qp->sq.wqe_cnt <<
-					      qp->sq.wqe_shift), 0x1000);
+					      qp->sq.wqe_shift), page_size);
 			qp->sq.offset = 0;
 		}
 	} else {
@@ -474,27 +475,27 @@  static int hns_roce_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
 		}
 
 		qp->buf_size = align((qp->sq.wqe_cnt << qp->sq.wqe_shift),
-				     0x1000) +
+				     page_size) +
 			       align((qp->sge.sge_cnt << qp->sge.sge_shift),
-				     0x1000) +
+				     page_size) +
 			       (qp->rq.wqe_cnt << qp->rq.wqe_shift);
 
 		if (qp->sge.sge_cnt) {
 			qp->sq.offset = 0;
 			qp->sge.offset = align((qp->sq.wqe_cnt <<
-						qp->sq.wqe_shift), 0x1000);
+						qp->sq.wqe_shift), page_size);
 			qp->rq.offset = qp->sge.offset +
 					align((qp->sge.sge_cnt <<
-					qp->sge.sge_shift), 0x1000);
+					qp->sge.sge_shift), page_size);
 		} else {
 			qp->sq.offset = 0;
 			qp->sge.offset = 0;
 			qp->rq.offset = align((qp->sq.wqe_cnt <<
-						qp->sq.wqe_shift), 0x1000);
+						qp->sq.wqe_shift), page_size);
 		}
 	}
 
-	if (hns_roce_alloc_buf(&qp->buf, align(qp->buf_size, 0x1000),
+	if (hns_roce_alloc_buf(&qp->buf, align(qp->buf_size, page_size),
 			       to_hr_dev(pd->context->device)->page_size)) {
 		if (qp->rq.wqe_cnt)
 			free(qp->sq.wrid);