diff mbox series

[rdma-next,v2,3/3] RDMA/mana_ib: Add support of 4M, 1G, and 2G pages

Message ID 1744621234-26114-4-git-send-email-kotaranov@linux.microsoft.com (mailing list archive)
State New
Headers show
Series RDMA/mana_ib: extend MR support | expand

Commit Message

Konstantin Taranov April 14, 2025, 9 a.m. UTC
From: Konstantin Taranov <kotaranov@microsoft.com>

Check PF capability flag whether the 4M, 1G, and 2G pages are
supported. Add these pages sizes to mana_ib, if supported.

Define possible page sizes in enum gdma_page_type and
remove unused enum atb_page_size.

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
---
 drivers/infiniband/hw/mana/main.c               | 10 +++++++---
 drivers/infiniband/hw/mana/mana_ib.h            |  1 +
 drivers/net/ethernet/microsoft/mana/gdma_main.c |  1 +
 include/net/mana/gdma.h                         | 17 +++--------------
 4 files changed, 12 insertions(+), 17 deletions(-)

Comments

Long Li April 16, 2025, 6:31 p.m. UTC | #1
> Subject: [PATCH rdma-next v2 3/3] RDMA/mana_ib: Add support of 4M, 1G, and
> 2G pages
> 
> From: Konstantin Taranov <kotaranov@microsoft.com>
> 
> Check PF capability flag whether the 4M, 1G, and 2G pages are supported. Add
> these pages sizes to mana_ib, if supported.
> 
> Define possible page sizes in enum gdma_page_type and remove unused enum
> atb_page_size.
> 
> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>

Reviewed-by: Long Li <longli@microsoft.com>

> ---
>  drivers/infiniband/hw/mana/main.c               | 10 +++++++---
>  drivers/infiniband/hw/mana/mana_ib.h            |  1 +
>  drivers/net/ethernet/microsoft/mana/gdma_main.c |  1 +
>  include/net/mana/gdma.h                         | 17 +++--------------
>  4 files changed, 12 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/mana/main.c
> b/drivers/infiniband/hw/mana/main.c
> index 730f958..a28b712 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -479,7 +479,7 @@ int mana_ib_create_dma_region(struct mana_ib_dev
> *dev, struct ib_umem *umem,  {
>  	unsigned long page_sz;
> 
> -	page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
> +	page_sz = ib_umem_find_best_pgsz(umem,
> +dev->adapter_caps.page_size_cap, virt);
>  	if (!page_sz) {
>  		ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
>  		return -EINVAL;
> @@ -494,7 +494,7 @@ int mana_ib_create_zero_offset_dma_region(struct
> mana_ib_dev *dev, struct ib_ume
>  	unsigned long page_sz;
> 
>  	/* Hardware requires dma region to align to chosen page size */
> -	page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
> +	page_sz = ib_umem_find_best_pgoff(umem,
> +dev->adapter_caps.page_size_cap, 0);
>  	if (!page_sz) {
>  		ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
>  		return -EINVAL;
> @@ -577,7 +577,7 @@ int mana_ib_query_device(struct ib_device *ibdev, struct
> ib_device_attr *props,
> 
>  	memset(props, 0, sizeof(*props));
>  	props->max_mr_size = MANA_IB_MAX_MR_SIZE;
> -	props->page_size_cap = PAGE_SZ_BM;
> +	props->page_size_cap = dev->adapter_caps.page_size_cap;
>  	props->max_qp = dev->adapter_caps.max_qp_count;
>  	props->max_qp_wr = dev->adapter_caps.max_qp_wr;
>  	props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN; @@ -696,6
> +696,10 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
>  	caps->max_recv_sge_count = resp.max_recv_sge_count;
>  	caps->feature_flags = resp.feature_flags;
> 
> +	caps->page_size_cap = PAGE_SZ_BM;
> +	if (mdev_to_gc(dev)->pf_cap_flags1 &
> GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
> +		caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
> +
>  	return 0;
>  }
> 
> diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index 6903946..f0dbd90 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -60,6 +60,7 @@ struct mana_ib_adapter_caps {
>  	u32 max_recv_sge_count;
>  	u32 max_inline_data_size;
>  	u64 feature_flags;
> +	u64 page_size_cap;
>  };
> 
>  struct mana_ib_queue {
> diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c
> b/drivers/net/ethernet/microsoft/mana/gdma_main.c
> index 4a2b17f..b5156d4 100644
> --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
> +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
> @@ -937,6 +937,7 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
>  			err, resp.hdr.status);
>  		return err ? err : -EPROTO;
>  	}
> +	gc->pf_cap_flags1 = resp.pf_cap_flags1;
>  	if (resp.pf_cap_flags1 &
> GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
>  		err = mana_gd_query_hwc_timeout(pdev, &hwc-
> >hwc_timeout);
>  		if (err) {
> diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index
> 3db506d..89abf98 100644
> --- a/include/net/mana/gdma.h
> +++ b/include/net/mana/gdma.h
> @@ -407,6 +407,8 @@ struct gdma_context {
> 
>  	/* Azure RDMA adapter */
>  	struct gdma_dev		mana_ib;
> +
> +	u64 pf_cap_flags1;
>  };
> 
>  #define MAX_NUM_GDMA_DEVICES	4
> @@ -556,6 +558,7 @@ enum {
>  #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)  #define
> GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)  #define
> GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
> +#define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4)
> 
>  #define GDMA_DRV_CAP_FLAGS1 \
>  	(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ @@ -
> 704,20 +707,6 @@ struct gdma_query_hwc_timeout_resp {
>  	u32 reserved;
>  };
> 
> -enum atb_page_size {
> -	ATB_PAGE_SIZE_4K,
> -	ATB_PAGE_SIZE_8K,
> -	ATB_PAGE_SIZE_16K,
> -	ATB_PAGE_SIZE_32K,
> -	ATB_PAGE_SIZE_64K,
> -	ATB_PAGE_SIZE_128K,
> -	ATB_PAGE_SIZE_256K,
> -	ATB_PAGE_SIZE_512K,
> -	ATB_PAGE_SIZE_1M,
> -	ATB_PAGE_SIZE_2M,
> -	ATB_PAGE_SIZE_MAX,
> -};
> -
>  enum gdma_mr_access_flags {
>  	GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
>  	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
> --
> 2.43.0
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 730f958..a28b712 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -479,7 +479,7 @@  int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
 {
 	unsigned long page_sz;
 
-	page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
+	page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt);
 	if (!page_sz) {
 		ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
 		return -EINVAL;
@@ -494,7 +494,7 @@  int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_ume
 	unsigned long page_sz;
 
 	/* Hardware requires dma region to align to chosen page size */
-	page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
+	page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0);
 	if (!page_sz) {
 		ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
 		return -EINVAL;
@@ -577,7 +577,7 @@  int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
 
 	memset(props, 0, sizeof(*props));
 	props->max_mr_size = MANA_IB_MAX_MR_SIZE;
-	props->page_size_cap = PAGE_SZ_BM;
+	props->page_size_cap = dev->adapter_caps.page_size_cap;
 	props->max_qp = dev->adapter_caps.max_qp_count;
 	props->max_qp_wr = dev->adapter_caps.max_qp_wr;
 	props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
@@ -696,6 +696,10 @@  int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
 	caps->max_recv_sge_count = resp.max_recv_sge_count;
 	caps->feature_flags = resp.feature_flags;
 
+	caps->page_size_cap = PAGE_SZ_BM;
+	if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
+		caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
+
 	return 0;
 }
 
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6903946..f0dbd90 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -60,6 +60,7 @@  struct mana_ib_adapter_caps {
 	u32 max_recv_sge_count;
 	u32 max_inline_data_size;
 	u64 feature_flags;
+	u64 page_size_cap;
 };
 
 struct mana_ib_queue {
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 4a2b17f..b5156d4 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -937,6 +937,7 @@  int mana_gd_verify_vf_version(struct pci_dev *pdev)
 			err, resp.hdr.status);
 		return err ? err : -EPROTO;
 	}
+	gc->pf_cap_flags1 = resp.pf_cap_flags1;
 	if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
 		err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
 		if (err) {
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 3db506d..89abf98 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -407,6 +407,8 @@  struct gdma_context {
 
 	/* Azure RDMA adapter */
 	struct gdma_dev		mana_ib;
+
+	u64 pf_cap_flags1;
 };
 
 #define MAX_NUM_GDMA_DEVICES	4
@@ -556,6 +558,7 @@  enum {
 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
+#define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4)
 
 #define GDMA_DRV_CAP_FLAGS1 \
 	(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
@@ -704,20 +707,6 @@  struct gdma_query_hwc_timeout_resp {
 	u32 reserved;
 };
 
-enum atb_page_size {
-	ATB_PAGE_SIZE_4K,
-	ATB_PAGE_SIZE_8K,
-	ATB_PAGE_SIZE_16K,
-	ATB_PAGE_SIZE_32K,
-	ATB_PAGE_SIZE_64K,
-	ATB_PAGE_SIZE_128K,
-	ATB_PAGE_SIZE_256K,
-	ATB_PAGE_SIZE_512K,
-	ATB_PAGE_SIZE_1M,
-	ATB_PAGE_SIZE_2M,
-	ATB_PAGE_SIZE_MAX,
-};
-
 enum gdma_mr_access_flags {
 	GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
 	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),