diff mbox series

[net-next,09/11] net: enetc: optimize the allocation of tx_bdr

Message ID 20241009095116.147412-10-wei.fang@nxp.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series add basic support for i.MX95 NETC | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 13 this patch: 13
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 5 maintainers not CCed: john.fastabend@gmail.com daniel@iogearbox.net bpf@vger.kernel.org ast@kernel.org hawk@kernel.org
netdev/build_clang fail Errors and warnings before: 13 this patch: 13
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 13 this patch: 13
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 140 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Wei Fang Oct. 9, 2024, 9:51 a.m. UTC
From: Clark Wang <xiaoning.wang@nxp.com>

There is a situation where num_tx_rings cannot be divided by
bdr_int_num. For example, num_tx_rings is 8 and bdr_int_num
is 3. According to the previous logic, this results in two
tx_bdr corresponding memories not being allocated, so when
sending packets to tx BD ring 6 or 7, wild pointers will be
accessed. Of course, this issue does not exist for LS1028A,
because its num_tx_rings is 8, and bdr_int_num is either 1
or 2. So there is no situation where it cannot be divided.
However, there is a risk for the upcoming i.MX95, so the
allocation of tx_bdr is optimized to ensure that each tx_bdr
can be allocated to the corresponding memory.

Signed-off-by: Clark Wang <xiaoning.wang@nxp.com>
Signed-off-by: Wei Fang <wei.fang@nxp.com>
Reviewed-by: Claudiu Manoil <claudiu.manoil@nxp.com>
---
 drivers/net/ethernet/freescale/enetc/enetc.c | 121 ++++++++++---------
 1 file changed, 62 insertions(+), 59 deletions(-)

Comments

Frank Li Oct. 9, 2024, 5:25 p.m. UTC | #1
On Wed, Oct 09, 2024 at 05:51:14PM +0800, Wei Fang wrote:
> From: Clark Wang <xiaoning.wang@nxp.com>
>
> There is a situation where num_tx_rings cannot be divided by
> bdr_int_num. For example, num_tx_rings is 8 and bdr_int_num
> is 3. According to the previous logic, this results in two
> tx_bdr corresponding memories not being allocated, so when
> sending packets to tx BD ring 6 or 7, wild pointers will be
> accessed. Of course, this issue does not exist for LS1028A,
> because its num_tx_rings is 8, and bdr_int_num is either 1
> or 2. So there is no situation where it cannot be divided.
> However, there is a risk for the upcoming i.MX95, so the
> allocation of tx_bdr is optimized to ensure that each tx_bdr
> can be allocated to the corresponding memory.
>
> Signed-off-by: Clark Wang <xiaoning.wang@nxp.com>
> Signed-off-by: Wei Fang <wei.fang@nxp.com>
> Reviewed-by: Claudiu Manoil <claudiu.manoil@nxp.com>
> ---
>  drivers/net/ethernet/freescale/enetc/enetc.c | 121 ++++++++++---------
>  1 file changed, 62 insertions(+), 59 deletions(-)
>
> diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
> index 032d8eadd003..b84c88a76762 100644
> --- a/drivers/net/ethernet/freescale/enetc/enetc.c
> +++ b/drivers/net/ethernet/freescale/enetc/enetc.c
> @@ -2965,13 +2965,70 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
>  }
>  EXPORT_SYMBOL_GPL(enetc_ioctl);
>
> +static int enetc_bdr_init(struct enetc_ndev_priv *priv, int i, int v_tx_rings)
> +{
> +	struct enetc_int_vector *v __free(kfree);
> +	struct enetc_bdr *bdr;
> +	int j, err;
> +
> +	v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
> +	if (!v)
> +		return -ENOMEM;
> +
> +	bdr = &v->rx_ring;
> +	bdr->index = i;
> +	bdr->ndev = priv->ndev;
> +	bdr->dev = priv->dev;
> +	bdr->bd_count = priv->rx_bd_count;
> +	bdr->buffer_offset = ENETC_RXB_PAD;
> +	priv->rx_ring[i] = bdr;
> +
> +	err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
> +	if (err)
> +		return err;
> +
> +	err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
> +					 MEM_TYPE_PAGE_SHARED, NULL);
> +	if (err) {
> +		xdp_rxq_info_unreg(&bdr->xdp.rxq);
> +		return err;
> +	}
> +
> +	/* init defaults for adaptive IC */
> +	if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
> +		v->rx_ictt = 0x1;
> +		v->rx_dim_en = true;
> +	}
> +	INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
> +	netif_napi_add(priv->ndev, &v->napi, enetc_poll);
> +	v->count_tx_rings = v_tx_rings;
> +
> +	for (j = 0; j < v_tx_rings; j++) {
> +		int idx;
> +
> +		/* default tx ring mapping policy */
> +		idx = priv->bdr_int_num * j + i;
> +		__set_bit(idx, &v->tx_rings_map);
> +		bdr = &v->tx_ring[j];
> +		bdr->index = idx;
> +		bdr->ndev = priv->ndev;
> +		bdr->dev = priv->dev;
> +		bdr->bd_count = priv->tx_bd_count;
> +		priv->tx_ring[idx] = bdr;
> +	}
> +
> +	priv->int_vector[i] = no_free_ptr(v);
> +
> +	return 0;
> +}
> +
>  int enetc_alloc_msix(struct enetc_ndev_priv *priv)
>  {
>  	struct pci_dev *pdev = priv->si->pdev;
> +	int v_tx_rings, v_remainder;
>  	int num_stack_tx_queues;
>  	int first_xdp_tx_ring;
>  	int i, n, err, nvec;
> -	int v_tx_rings;
>
>  	nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
>  	/* allocate MSIX for both messaging and Rx/Tx interrupts */
> @@ -2985,65 +3042,11 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
>
>  	/* # of tx rings per int vector */
>  	v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
> +	v_remainder = priv->num_tx_rings % priv->bdr_int_num;
>
> -	for (i = 0; i < priv->bdr_int_num; i++) {
> -		struct enetc_int_vector *v;
> -		struct enetc_bdr *bdr;
> -		int j;
> -
> -		v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
> -		if (!v) {
> -			err = -ENOMEM;
> -			goto fail;
> -		}
> -
> -		priv->int_vector[i] = v;
> -
> -		bdr = &v->rx_ring;
> -		bdr->index = i;
> -		bdr->ndev = priv->ndev;
> -		bdr->dev = priv->dev;
> -		bdr->bd_count = priv->rx_bd_count;
> -		bdr->buffer_offset = ENETC_RXB_PAD;
> -		priv->rx_ring[i] = bdr;
> -
> -		err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
> -		if (err) {
> -			kfree(v);
> -			goto fail;
> -		}
> -
> -		err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
> -						 MEM_TYPE_PAGE_SHARED, NULL);
> -		if (err) {
> -			xdp_rxq_info_unreg(&bdr->xdp.rxq);
> -			kfree(v);
> -			goto fail;
> -		}
> -
> -		/* init defaults for adaptive IC */
> -		if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
> -			v->rx_ictt = 0x1;
> -			v->rx_dim_en = true;
> -		}
> -		INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
> -		netif_napi_add(priv->ndev, &v->napi, enetc_poll);
> -		v->count_tx_rings = v_tx_rings;
> -
> -		for (j = 0; j < v_tx_rings; j++) {
> -			int idx;
> -
> -			/* default tx ring mapping policy */
> -			idx = priv->bdr_int_num * j + i;
> -			__set_bit(idx, &v->tx_rings_map);
> -			bdr = &v->tx_ring[j];
> -			bdr->index = idx;
> -			bdr->ndev = priv->ndev;
> -			bdr->dev = priv->dev;
> -			bdr->bd_count = priv->tx_bd_count;
> -			priv->tx_ring[idx] = bdr;
> -		}
> -	}
> +	for (i = 0; i < priv->bdr_int_num; i++)
> +		enetc_bdr_init(priv, i,
> +			       i < v_remainder ? v_tx_rings + 1 : v_tx_rings);

suggest you create two patches, one just move to help function to
enetc_bdr_init(), the another is for real fixes.

>
>  	num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
>
> --
> 2.34.1
>
Wei Fang Oct. 10, 2024, 3:25 a.m. UTC | #2
> -----Original Message-----
> From: Frank Li <frank.li@nxp.com>
> Sent: 2024年10月10日 1:25
> To: Wei Fang <wei.fang@nxp.com>
> Cc: davem@davemloft.net; edumazet@google.com; kuba@kernel.org;
> pabeni@redhat.com; robh@kernel.org; krzk+dt@kernel.org;
> conor+dt@kernel.org; Vladimir Oltean <vladimir.oltean@nxp.com>; Claudiu
> Manoil <claudiu.manoil@nxp.com>; Clark Wang <xiaoning.wang@nxp.com>;
> christophe.leroy@csgroup.eu; linux@armlinux.org.uk; bhelgaas@google.com;
> imx@lists.linux.dev; netdev@vger.kernel.org; devicetree@vger.kernel.org;
> linux-kernel@vger.kernel.org; linux-pci@vger.kernel.org
> Subject: Re: [PATCH net-next 09/11] net: enetc: optimize the allocation of
> tx_bdr
> 
> On Wed, Oct 09, 2024 at 05:51:14PM +0800, Wei Fang wrote:
> > From: Clark Wang <xiaoning.wang@nxp.com>
> >
> > There is a situation where num_tx_rings cannot be divided by
> > bdr_int_num. For example, num_tx_rings is 8 and bdr_int_num
> > is 3. According to the previous logic, this results in two
> > tx_bdr corresponding memories not being allocated, so when
> > sending packets to tx BD ring 6 or 7, wild pointers will be
> > accessed. Of course, this issue does not exist for LS1028A,
> > because its num_tx_rings is 8, and bdr_int_num is either 1
> > or 2. So there is no situation where it cannot be divided.
> > However, there is a risk for the upcoming i.MX95, so the
> > allocation of tx_bdr is optimized to ensure that each tx_bdr
> > can be allocated to the corresponding memory.
> >
> > Signed-off-by: Clark Wang <xiaoning.wang@nxp.com>
> > Signed-off-by: Wei Fang <wei.fang@nxp.com>
> > Reviewed-by: Claudiu Manoil <claudiu.manoil@nxp.com>
> > ---
> >  drivers/net/ethernet/freescale/enetc/enetc.c | 121 ++++++++++---------
> >  1 file changed, 62 insertions(+), 59 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c
> b/drivers/net/ethernet/freescale/enetc/enetc.c
> > index 032d8eadd003..b84c88a76762 100644
> > --- a/drivers/net/ethernet/freescale/enetc/enetc.c
> > +++ b/drivers/net/ethernet/freescale/enetc/enetc.c
> > @@ -2965,13 +2965,70 @@ int enetc_ioctl(struct net_device *ndev, struct
> ifreq *rq, int cmd)
> >  }
> >  EXPORT_SYMBOL_GPL(enetc_ioctl);
> >
> > +static int enetc_bdr_init(struct enetc_ndev_priv *priv, int i, int v_tx_rings)
> > +{
> > +	struct enetc_int_vector *v __free(kfree);
> > +	struct enetc_bdr *bdr;
> > +	int j, err;
> > +
> > +	v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
> > +	if (!v)
> > +		return -ENOMEM;
> > +
> > +	bdr = &v->rx_ring;
> > +	bdr->index = i;
> > +	bdr->ndev = priv->ndev;
> > +	bdr->dev = priv->dev;
> > +	bdr->bd_count = priv->rx_bd_count;
> > +	bdr->buffer_offset = ENETC_RXB_PAD;
> > +	priv->rx_ring[i] = bdr;
> > +
> > +	err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
> > +	if (err)
> > +		return err;
> > +
> > +	err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
> > +					 MEM_TYPE_PAGE_SHARED, NULL);
> > +	if (err) {
> > +		xdp_rxq_info_unreg(&bdr->xdp.rxq);
> > +		return err;
> > +	}
> > +
> > +	/* init defaults for adaptive IC */
> > +	if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
> > +		v->rx_ictt = 0x1;
> > +		v->rx_dim_en = true;
> > +	}
> > +	INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
> > +	netif_napi_add(priv->ndev, &v->napi, enetc_poll);
> > +	v->count_tx_rings = v_tx_rings;
> > +
> > +	for (j = 0; j < v_tx_rings; j++) {
> > +		int idx;
> > +
> > +		/* default tx ring mapping policy */
> > +		idx = priv->bdr_int_num * j + i;
> > +		__set_bit(idx, &v->tx_rings_map);
> > +		bdr = &v->tx_ring[j];
> > +		bdr->index = idx;
> > +		bdr->ndev = priv->ndev;
> > +		bdr->dev = priv->dev;
> > +		bdr->bd_count = priv->tx_bd_count;
> > +		priv->tx_ring[idx] = bdr;
> > +	}
> > +
> > +	priv->int_vector[i] = no_free_ptr(v);
> > +
> > +	return 0;
> > +}
> > +
> >  int enetc_alloc_msix(struct enetc_ndev_priv *priv)
> >  {
> >  	struct pci_dev *pdev = priv->si->pdev;
> > +	int v_tx_rings, v_remainder;
> >  	int num_stack_tx_queues;
> >  	int first_xdp_tx_ring;
> >  	int i, n, err, nvec;
> > -	int v_tx_rings;
> >
> >  	nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
> >  	/* allocate MSIX for both messaging and Rx/Tx interrupts */
> > @@ -2985,65 +3042,11 @@ int enetc_alloc_msix(struct enetc_ndev_priv
> *priv)
> >
> >  	/* # of tx rings per int vector */
> >  	v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
> > +	v_remainder = priv->num_tx_rings % priv->bdr_int_num;
> >
> > -	for (i = 0; i < priv->bdr_int_num; i++) {
> > -		struct enetc_int_vector *v;
> > -		struct enetc_bdr *bdr;
> > -		int j;
> > -
> > -		v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
> > -		if (!v) {
> > -			err = -ENOMEM;
> > -			goto fail;
> > -		}
> > -
> > -		priv->int_vector[i] = v;
> > -
> > -		bdr = &v->rx_ring;
> > -		bdr->index = i;
> > -		bdr->ndev = priv->ndev;
> > -		bdr->dev = priv->dev;
> > -		bdr->bd_count = priv->rx_bd_count;
> > -		bdr->buffer_offset = ENETC_RXB_PAD;
> > -		priv->rx_ring[i] = bdr;
> > -
> > -		err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
> > -		if (err) {
> > -			kfree(v);
> > -			goto fail;
> > -		}
> > -
> > -		err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
> > -						 MEM_TYPE_PAGE_SHARED, NULL);
> > -		if (err) {
> > -			xdp_rxq_info_unreg(&bdr->xdp.rxq);
> > -			kfree(v);
> > -			goto fail;
> > -		}
> > -
> > -		/* init defaults for adaptive IC */
> > -		if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
> > -			v->rx_ictt = 0x1;
> > -			v->rx_dim_en = true;
> > -		}
> > -		INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
> > -		netif_napi_add(priv->ndev, &v->napi, enetc_poll);
> > -		v->count_tx_rings = v_tx_rings;
> > -
> > -		for (j = 0; j < v_tx_rings; j++) {
> > -			int idx;
> > -
> > -			/* default tx ring mapping policy */
> > -			idx = priv->bdr_int_num * j + i;
> > -			__set_bit(idx, &v->tx_rings_map);
> > -			bdr = &v->tx_ring[j];
> > -			bdr->index = idx;
> > -			bdr->ndev = priv->ndev;
> > -			bdr->dev = priv->dev;
> > -			bdr->bd_count = priv->tx_bd_count;
> > -			priv->tx_ring[idx] = bdr;
> > -		}
> > -	}
> > +	for (i = 0; i < priv->bdr_int_num; i++)
> > +		enetc_bdr_init(priv, i,
> > +			       i < v_remainder ? v_tx_rings + 1 : v_tx_rings);
> 
> suggest you create two patches, one just move to help function to
> enetc_bdr_init(), the another is for real fixes.

Sure, thanks

> 
> >
> >  	num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
> >
> > --
> > 2.34.1
> >
diff mbox series

Patch

diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 032d8eadd003..b84c88a76762 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2965,13 +2965,70 @@  int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 }
 EXPORT_SYMBOL_GPL(enetc_ioctl);
 
+static int enetc_bdr_init(struct enetc_ndev_priv *priv, int i, int v_tx_rings)
+{
+	struct enetc_int_vector *v __free(kfree);
+	struct enetc_bdr *bdr;
+	int j, err;
+
+	v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
+	if (!v)
+		return -ENOMEM;
+
+	bdr = &v->rx_ring;
+	bdr->index = i;
+	bdr->ndev = priv->ndev;
+	bdr->dev = priv->dev;
+	bdr->bd_count = priv->rx_bd_count;
+	bdr->buffer_offset = ENETC_RXB_PAD;
+	priv->rx_ring[i] = bdr;
+
+	err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
+	if (err)
+		return err;
+
+	err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
+					 MEM_TYPE_PAGE_SHARED, NULL);
+	if (err) {
+		xdp_rxq_info_unreg(&bdr->xdp.rxq);
+		return err;
+	}
+
+	/* init defaults for adaptive IC */
+	if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
+		v->rx_ictt = 0x1;
+		v->rx_dim_en = true;
+	}
+	INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
+	netif_napi_add(priv->ndev, &v->napi, enetc_poll);
+	v->count_tx_rings = v_tx_rings;
+
+	for (j = 0; j < v_tx_rings; j++) {
+		int idx;
+
+		/* default tx ring mapping policy */
+		idx = priv->bdr_int_num * j + i;
+		__set_bit(idx, &v->tx_rings_map);
+		bdr = &v->tx_ring[j];
+		bdr->index = idx;
+		bdr->ndev = priv->ndev;
+		bdr->dev = priv->dev;
+		bdr->bd_count = priv->tx_bd_count;
+		priv->tx_ring[idx] = bdr;
+	}
+
+	priv->int_vector[i] = no_free_ptr(v);
+
+	return 0;
+}
+
 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
 {
 	struct pci_dev *pdev = priv->si->pdev;
+	int v_tx_rings, v_remainder;
 	int num_stack_tx_queues;
 	int first_xdp_tx_ring;
 	int i, n, err, nvec;
-	int v_tx_rings;
 
 	nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
 	/* allocate MSIX for both messaging and Rx/Tx interrupts */
@@ -2985,65 +3042,11 @@  int enetc_alloc_msix(struct enetc_ndev_priv *priv)
 
 	/* # of tx rings per int vector */
 	v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
+	v_remainder = priv->num_tx_rings % priv->bdr_int_num;
 
-	for (i = 0; i < priv->bdr_int_num; i++) {
-		struct enetc_int_vector *v;
-		struct enetc_bdr *bdr;
-		int j;
-
-		v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
-		if (!v) {
-			err = -ENOMEM;
-			goto fail;
-		}
-
-		priv->int_vector[i] = v;
-
-		bdr = &v->rx_ring;
-		bdr->index = i;
-		bdr->ndev = priv->ndev;
-		bdr->dev = priv->dev;
-		bdr->bd_count = priv->rx_bd_count;
-		bdr->buffer_offset = ENETC_RXB_PAD;
-		priv->rx_ring[i] = bdr;
-
-		err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
-		if (err) {
-			kfree(v);
-			goto fail;
-		}
-
-		err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
-						 MEM_TYPE_PAGE_SHARED, NULL);
-		if (err) {
-			xdp_rxq_info_unreg(&bdr->xdp.rxq);
-			kfree(v);
-			goto fail;
-		}
-
-		/* init defaults for adaptive IC */
-		if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
-			v->rx_ictt = 0x1;
-			v->rx_dim_en = true;
-		}
-		INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
-		netif_napi_add(priv->ndev, &v->napi, enetc_poll);
-		v->count_tx_rings = v_tx_rings;
-
-		for (j = 0; j < v_tx_rings; j++) {
-			int idx;
-
-			/* default tx ring mapping policy */
-			idx = priv->bdr_int_num * j + i;
-			__set_bit(idx, &v->tx_rings_map);
-			bdr = &v->tx_ring[j];
-			bdr->index = idx;
-			bdr->ndev = priv->ndev;
-			bdr->dev = priv->dev;
-			bdr->bd_count = priv->tx_bd_count;
-			priv->tx_ring[idx] = bdr;
-		}
-	}
+	for (i = 0; i < priv->bdr_int_num; i++)
+		enetc_bdr_init(priv, i,
+			       i < v_remainder ? v_tx_rings + 1 : v_tx_rings);
 
 	num_stack_tx_queues = enetc_num_stack_tx_queues(priv);