diff mbox series

net/mlx5e: allocate 'indirection_rqt' buffer dynamically

Message ID 20210308153318.2486939-1-arnd@kernel.org (mailing list archive)
State Not Applicable
Headers show
Series net/mlx5e: allocate 'indirection_rqt' buffer dynamically | expand

Commit Message

Arnd Bergmann March 8, 2021, 3:32 p.m. UTC
From: Arnd Bergmann <arnd@arndb.de>

Increasing the size of the indirection_rqt array from 128 to 256 bytes
pushed the stack usage of the mlx5e_hairpin_fill_rqt_rqns() function
over the warning limit when building with clang and CONFIG_KASAN:

drivers/net/ethernet/mellanox/mlx5/core/en_tc.c:970:1: error: stack frame size of 1180 bytes in function 'mlx5e_tc_add_nic_flow' [-Werror,-Wframe-larger-than=]

Using dynamic allocation here is safe because the caller does the
same, and it reduces the stack usage of the function to just a few
bytes.

Fixes: 1dd55ba2fb70 ("net/mlx5e: Increase indirection RQ table size to 256")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
---
 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

Comments

Tariq Toukan March 8, 2021, 4:28 p.m. UTC | #1
On 3/8/2021 5:32 PM, Arnd Bergmann wrote:
> From: Arnd Bergmann <arnd@arndb.de>
> 
> Increasing the size of the indirection_rqt array from 128 to 256 bytes
> pushed the stack usage of the mlx5e_hairpin_fill_rqt_rqns() function
> over the warning limit when building with clang and CONFIG_KASAN:
> 
> drivers/net/ethernet/mellanox/mlx5/core/en_tc.c:970:1: error: stack frame size of 1180 bytes in function 'mlx5e_tc_add_nic_flow' [-Werror,-Wframe-larger-than=]
> 
> Using dynamic allocation here is safe because the caller does the
> same, and it reduces the stack usage of the function to just a few
> bytes.
> 
> Fixes: 1dd55ba2fb70 ("net/mlx5e: Increase indirection RQ table size to 256")
> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
> ---
>   drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 16 +++++++++++++---
>   1 file changed, 13 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
> index 0da69b98f38f..66f98618dc13 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
> @@ -445,12 +445,16 @@ static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
>   	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
>   }
>   
> -static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
> +static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
>   {
> -	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
> +	u32 *indirection_rqt, rqn;
>   	struct mlx5e_priv *priv = hp->func_priv;
>   	int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
>   
> +	indirection_rqt = kzalloc(sz, GFP_KERNEL);
> +	if (!indirection_rqt)
> +		return -ENOMEM;
> +
>   	mlx5e_build_default_indir_rqt(indirection_rqt, sz,
>   				      hp->num_channels);
>   
> @@ -462,6 +466,9 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
>   		rqn = hp->pair->rqn[ix];
>   		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
>   	}
> +
> +	kfree(indirection_rqt);
> +	return 0;
>   }
>   
>   static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
> @@ -482,12 +489,15 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
>   	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
>   	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
>   
> -	mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
> +	err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
> +	if (err)
> +		goto out;
>   
>   	err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
>   	if (!err)
>   		hp->indir_rqt.enabled = true;
>   
> +out:
>   	kvfree(in);
>   	return err;
>   }
> 

Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Thanks for your patch.

Tariq
Saeed Mahameed March 11, 2021, 10:48 p.m. UTC | #2
On Mon, 2021-03-08 at 18:28 +0200, Tariq Toukan wrote:
> 
> 
> On 3/8/2021 5:32 PM, Arnd Bergmann wrote:
> > From: Arnd Bergmann <arnd@arndb.de>
> > 
> > Increasing the size of the indirection_rqt array from 128 to 256
> > bytes
> > pushed the stack usage of the mlx5e_hairpin_fill_rqt_rqns()
> > function
> > over the warning limit when building with clang and CONFIG_KASAN:
> > 
> > drivers/net/ethernet/mellanox/mlx5/core/en_tc.c:970:1: error: stack
> > frame size of 1180 bytes in function 'mlx5e_tc_add_nic_flow' [-
> > Werror,-Wframe-larger-than=]
> > 
> > Using dynamic allocation here is safe because the caller does the
> > same, and it reduces the stack usage of the function to just a few
> > bytes.
> > 
> > Fixes: 1dd55ba2fb70 ("net/mlx5e: Increase indirection RQ table size
> > to 256")
> > Signed-off-by: Arnd Bergmann <arnd@arndb.de>
> > ---
> >   drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 16
> > +++++++++++++---
> >   1 file changed, 13 insertions(+), 3 deletions(-)
> > 
> > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
> > b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
> > index 0da69b98f38f..66f98618dc13 100644
> > --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
> > +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
> > @@ -445,12 +445,16 @@ static void
> > mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
> >         mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
> >   }
> >   
> > -static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp,
> > void *rqtc)
> > +static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp,
> > void *rqtc)
> >   {
> > -       u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
> > +       u32 *indirection_rqt, rqn;
> >         struct mlx5e_priv *priv = hp->func_priv;
> >         int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
> >   
> > +       indirection_rqt = kzalloc(sz, GFP_KERNEL);
> > +       if (!indirection_rqt)
> > +               return -ENOMEM;
> > +
> >         mlx5e_build_default_indir_rqt(indirection_rqt, sz,
> >                                       hp->num_channels);
> >   
> > @@ -462,6 +466,9 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct
> > mlx5e_hairpin *hp, void *rqtc)
> >                 rqn = hp->pair->rqn[ix];
> >                 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
> >         }
> > +
> > +       kfree(indirection_rqt);
> > +       return 0;
> >   }
> >   
> >   static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin
> > *hp)
> > @@ -482,12 +489,15 @@ static int
> > mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
> >         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
> >         MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
> >   
> > -       mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
> > +       err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
> > +       if (err)
> > +               goto out;
> >   
> >         err = mlx5_core_create_rqt(mdev, in, inlen, &hp-
> > >indir_rqt.rqtn);
> >         if (!err)
> >                 hp->indir_rqt.enabled = true;
> >   
> > +out:
> >         kvfree(in);
> >         return err;
> >   }
> > 
> 
> Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
> Thanks for your patch.
> 
> Tariq

Applied to net-next-mlx5
Thanks!
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 0da69b98f38f..66f98618dc13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -445,12 +445,16 @@  static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
 	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
 }
 
-static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
+static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
 {
-	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
+	u32 *indirection_rqt, rqn;
 	struct mlx5e_priv *priv = hp->func_priv;
 	int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
 
+	indirection_rqt = kzalloc(sz, GFP_KERNEL);
+	if (!indirection_rqt)
+		return -ENOMEM;
+
 	mlx5e_build_default_indir_rqt(indirection_rqt, sz,
 				      hp->num_channels);
 
@@ -462,6 +466,9 @@  static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
 		rqn = hp->pair->rqn[ix];
 		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
 	}
+
+	kfree(indirection_rqt);
+	return 0;
 }
 
 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
@@ -482,12 +489,15 @@  static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
 
-	mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+	err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+	if (err)
+		goto out;
 
 	err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
 	if (!err)
 		hp->indir_rqt.enabled = true;
 
+out:
 	kvfree(in);
 	return err;
 }