Message ID | 20240411022757.2591839-1-shaozhengchao@huawei.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | [net] RDMA/hns: fix return value in hns_roce_map_mr_sg | expand |
On 2024/4/11 10:27, Zhengchao Shao wrote: > As described in the ib_map_mr_sg function comment, it returns the number > of sg elements that were mapped to the memory region. However, > hns_roce_map_mr_sg returns the number of pages required for mapping the > DMA area. Fix it. > > Fixes: 9b2cf76c9f05 ("RDMA/hns: Optimize PBL buffer allocation process") > Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com> > --- > drivers/infiniband/hw/hns/hns_roce_mr.c | 16 +++++++--------- > 1 file changed, 7 insertions(+), 9 deletions(-) > > diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c > index 9e05b57a2d67..0c5e41d5c03d 100644 > --- a/drivers/infiniband/hw/hns/hns_roce_mr.c > +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c > @@ -441,7 +441,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, > struct ib_device *ibdev = &hr_dev->ib_dev; > struct hns_roce_mr *mr = to_hr_mr(ibmr); > struct hns_roce_mtr *mtr = &mr->pbl_mtr; > - int ret = 0; > + int ret, sg_num; > <...> > mr->npages = 0; > mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, > @@ -449,10 +449,10 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, > if (!mr->page_list) > return ret; The 'ret = 0' is deleted, and here returns an undefined value. > > - ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); > - if (ret < 1) { > + sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); > + if (sg_num < 1) { > ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", > - mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret); > + mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num); > goto err_page_list; > } > > @@ -463,17 +463,15 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, > ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages); > if (ret) { > ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret); > - ret = 0; > - } else { > + sg_num = 0; > + } else > mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size); > - ret = mr->npages; > - } Braces should be used in both branches, as the if branch has two statements. Junxian > > err_page_list: > kvfree(mr->page_list); > mr->page_list = NULL; > > - return ret; > + return sg_num; > } > > static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
On 2024/4/11 10:56, Junxian Huang wrote: > > > On 2024/4/11 10:27, Zhengchao Shao wrote: >> As described in the ib_map_mr_sg function comment, it returns the number >> of sg elements that were mapped to the memory region. However, >> hns_roce_map_mr_sg returns the number of pages required for mapping the >> DMA area. Fix it. >> >> Fixes: 9b2cf76c9f05 ("RDMA/hns: Optimize PBL buffer allocation process") >> Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com> >> --- >> drivers/infiniband/hw/hns/hns_roce_mr.c | 16 +++++++--------- >> 1 file changed, 7 insertions(+), 9 deletions(-) >> >> diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c >> index 9e05b57a2d67..0c5e41d5c03d 100644 >> --- a/drivers/infiniband/hw/hns/hns_roce_mr.c >> +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c >> @@ -441,7 +441,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, >> struct ib_device *ibdev = &hr_dev->ib_dev; >> struct hns_roce_mr *mr = to_hr_mr(ibmr); >> struct hns_roce_mtr *mtr = &mr->pbl_mtr; >> - int ret = 0; >> + int ret, sg_num; >> > > <...> > >> mr->npages = 0; >> mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, >> @@ -449,10 +449,10 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, >> if (!mr->page_list) >> return ret; > Hi Junxian: Thank you for your review. > The 'ret = 0' is deleted, and here returns an undefined value. My mistake. > >> >> - ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); >> - if (ret < 1) { >> + sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); >> + if (sg_num < 1) { >> ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", >> - mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret); >> + mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num); >> goto err_page_list; >> } >> >> @@ -463,17 +463,15 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, >> ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages); >> if (ret) { >> ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret); >> - ret = 0; >> - } else { >> + sg_num = 0; >> + } else >> mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size); >> - ret = mr->npages; >> - } > > Braces should be used in both branches, as the if branch has two statements. > Yeah. I will change it in V2. > Junxian > Zhengchao Shao >> >> err_page_list: >> kvfree(mr->page_list); >> mr->page_list = NULL; >> >> - return ret; >> + return sg_num; >> } >> >> static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 9e05b57a2d67..0c5e41d5c03d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -441,7 +441,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mtr *mtr = &mr->pbl_mtr; - int ret = 0; + int ret, sg_num; mr->npages = 0; mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, @@ -449,10 +449,10 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, if (!mr->page_list) return ret; - ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); - if (ret < 1) { + sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); + if (sg_num < 1) { ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", - mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret); + mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num); goto err_page_list; } @@ -463,17 +463,15 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages); if (ret) { ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret); - ret = 0; - } else { + sg_num = 0; + } else mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size); - ret = mr->npages; - } err_page_list: kvfree(mr->page_list); mr->page_list = NULL; - return ret; + return sg_num; } static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
As described in the ib_map_mr_sg function comment, it returns the number of sg elements that were mapped to the memory region. However, hns_roce_map_mr_sg returns the number of pages required for mapping the DMA area. Fix it. Fixes: 9b2cf76c9f05 ("RDMA/hns: Optimize PBL buffer allocation process") Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com> --- drivers/infiniband/hw/hns/hns_roce_mr.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-)