diff mbox series

[net-next,v13,6/6] net: dsa: qca8k: Use new convenience functions

Message ID 20220916121817.4061532-7-mattias.forsblad@gmail.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series net: dsa: qca8k, mv88e6xxx: rmon: Add RMU support | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers success CCed 10 of 10 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 93 exceeds 80 columns WARNING: line length of 99 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Mattias Forsblad Sept. 16, 2022, 12:18 p.m. UTC
Use the new common convenience functions for sending and
waiting for frames.

Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Mattias Forsblad <mattias.forsblad@gmail.com>
---
 drivers/net/dsa/qca/qca8k-8xxx.c | 61 +++++++++-----------------------
 1 file changed, 17 insertions(+), 44 deletions(-)

Comments

Christian Marangi Sept. 16, 2022, 6:09 a.m. UTC | #1
On Fri, Sep 16, 2022 at 02:18:17PM +0200, Mattias Forsblad wrote:
> Use the new common convenience functions for sending and
> waiting for frames.
> 
> Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
> Signed-off-by: Mattias Forsblad <mattias.forsblad@gmail.com>
> ---
>  drivers/net/dsa/qca/qca8k-8xxx.c | 61 +++++++++-----------------------
>  1 file changed, 17 insertions(+), 44 deletions(-)
> 
> diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
> index c181346388a4..4e9bc103c0a5 100644
> --- a/drivers/net/dsa/qca/qca8k-8xxx.c
> +++ b/drivers/net/dsa/qca/qca8k-8xxx.c
> @@ -160,7 +160,7 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
>  			       QCA_HDR_MGMT_DATA2_LEN);
>  	}
>  
> -	complete(&mgmt_eth_data->rw_done);
> +	dsa_switch_inband_complete(ds, &mgmt_eth_data->rw_done);
>  }
>  
>  static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
> @@ -228,6 +228,7 @@ static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
>  static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  {
>  	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
> +	struct dsa_switch *ds = priv->ds;
>  	struct sk_buff *skb;
>  	bool ack;
>  	int ret;
> @@ -248,17 +249,12 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  
>  	skb->dev = priv->mgmt_master;
>  
> -	reinit_completion(&mgmt_eth_data->rw_done);
> -
>  	/* Increment seq_num and set it in the mdio pkt */
>  	mgmt_eth_data->seq++;
>  	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
>  	mgmt_eth_data->ack = false;
>  
> -	dev_queue_xmit(skb);
> -
> -	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
> -					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
> +	ret = dsa_switch_inband_tx(ds, skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
>  
>  	*val = mgmt_eth_data->data[0];
>  	if (len > QCA_HDR_MGMT_DATA1_LEN)
> @@ -280,6 +276,7 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  {
>  	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
> +	struct dsa_switch *ds = priv->ds;
>  	struct sk_buff *skb;
>  	bool ack;
>  	int ret;
> @@ -300,17 +297,12 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  
>  	skb->dev = priv->mgmt_master;
>  
> -	reinit_completion(&mgmt_eth_data->rw_done);
> -
>  	/* Increment seq_num and set it in the mdio pkt */
>  	mgmt_eth_data->seq++;
>  	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
>  	mgmt_eth_data->ack = false;
>  
> -	dev_queue_xmit(skb);
> -
> -	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
> -					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
> +	ret = dsa_switch_inband_tx(ds, skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
>  
>  	ack = mgmt_eth_data->ack;
>  
> @@ -441,24 +433,21 @@ static struct regmap_config qca8k_regmap_config = {
>  };
>  
>  static int
> -qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
> +qca8k_phy_eth_busy_wait(struct qca8k_priv *priv,
>  			struct sk_buff *read_skb, u32 *val)
>  {
> +	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
>  	struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
> +	struct dsa_switch *ds = priv->ds;
>  	bool ack;
>  	int ret;
>  
> -	reinit_completion(&mgmt_eth_data->rw_done);
> -
>  	/* Increment seq_num and set it in the copy pkt */
>  	mgmt_eth_data->seq++;
>  	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
>  	mgmt_eth_data->ack = false;
>  
> -	dev_queue_xmit(skb);
> -
> -	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
> -					  QCA8K_ETHERNET_TIMEOUT);
> +	ret = dsa_switch_inband_tx(ds, skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
>  
>  	ack = mgmt_eth_data->ack;
>  
> @@ -480,6 +469,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  	struct sk_buff *write_skb, *clear_skb, *read_skb;
>  	struct qca8k_mgmt_eth_data *mgmt_eth_data;
>  	u32 write_val, clear_val = 0, val;
> +	struct dsa_switch *ds = priv->ds;
>  	struct net_device *mgmt_master;
>  	int ret, ret1;
>  	bool ack;
> @@ -540,17 +530,12 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  	clear_skb->dev = mgmt_master;
>  	write_skb->dev = mgmt_master;
>  
> -	reinit_completion(&mgmt_eth_data->rw_done);
> -
>  	/* Increment seq_num and set it in the write pkt */
>  	mgmt_eth_data->seq++;
>  	qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
>  	mgmt_eth_data->ack = false;
>  
> -	dev_queue_xmit(write_skb);
> -
> -	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
> -					  QCA8K_ETHERNET_TIMEOUT);
> +	ret = dsa_switch_inband_tx(ds, write_skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
>  
>  	ack = mgmt_eth_data->ack;
>  
> @@ -569,7 +554,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  	ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
>  				!(val & QCA8K_MDIO_MASTER_BUSY), 0,
>  				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
> -				mgmt_eth_data, read_skb, &val);
> +				priv, read_skb, &val);
>  
>  	if (ret < 0 && ret1 < 0) {
>  		ret = ret1;
> @@ -577,17 +562,13 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  	}
>  
>  	if (read) {
> -		reinit_completion(&mgmt_eth_data->rw_done);
> -
>  		/* Increment seq_num and set it in the read pkt */
>  		mgmt_eth_data->seq++;
>  		qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
>  		mgmt_eth_data->ack = false;
>  
> -		dev_queue_xmit(read_skb);
> -
> -		ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
> -						  QCA8K_ETHERNET_TIMEOUT);
> +		ret = dsa_switch_inband_tx(ds, read_skb, &mgmt_eth_data->rw_done,
> +					   QCA8K_ETHERNET_TIMEOUT);
>  
>  		ack = mgmt_eth_data->ack;
>  
> @@ -606,17 +587,12 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  		kfree_skb(read_skb);
>  	}
>  exit:
> -	reinit_completion(&mgmt_eth_data->rw_done);
> -
>  	/* Increment seq_num and set it in the clear pkt */
>  	mgmt_eth_data->seq++;
>  	qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
>  	mgmt_eth_data->ack = false;
>  
> -	dev_queue_xmit(clear_skb);
> -
> -	wait_for_completion_timeout(&mgmt_eth_data->rw_done,
> -				    QCA8K_ETHERNET_TIMEOUT);
> +	ret = dsa_switch_inband_tx(ds, clear_skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);

This cause the breakage of qca8k!

The clear_skb is used to clean a state but is optional and we should not
check exit value.

On top of that this overwrites the mdio return value from the read
condition.

ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;

This should be changed to just

dsa_switch_inband_tx(ds, clear_skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);

Also considering the majority of the driver is alligned to 80 column can
you wrap these new function to that? (personal taste)

>  
>  	mutex_unlock(&mgmt_eth_data->mutex);
>  
> @@ -1528,7 +1504,7 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
>  exit:
>  	/* Complete on receiving all the mib packet */
>  	if (refcount_dec_and_test(&mib_eth_data->port_parsed))
> -		complete(&mib_eth_data->rw_done);
> +		dsa_switch_inband_complete(ds, &mib_eth_data->rw_done);
>  }
>  
>  static int
> @@ -1543,8 +1519,6 @@ qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
>  
>  	mutex_lock(&mib_eth_data->mutex);
>  
> -	reinit_completion(&mib_eth_data->rw_done);
> -
>  	mib_eth_data->req_port = dp->index;
>  	mib_eth_data->data = data;
>  	refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
> @@ -1562,8 +1536,7 @@ qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
>  	if (ret)
>  		goto exit;
>  
> -	ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
> -
> +	ret = dsa_switch_inband_tx(ds, NULL, &mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
>  exit:
>  	mutex_unlock(&mib_eth_data->mutex);
>  
> -- 
> 2.25.1
>
Mattias Forsblad Sept. 19, 2022, 5:18 a.m. UTC | #2
On 2022-09-16 08:09, Christian Marangi wrote:
>> @@ -606,17 +587,12 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>>  		kfree_skb(read_skb);
>>  	}
>>  exit:
>> -	reinit_completion(&mgmt_eth_data->rw_done);
>> -
>>  	/* Increment seq_num and set it in the clear pkt */
>>  	mgmt_eth_data->seq++;
>>  	qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
>>  	mgmt_eth_data->ack = false;
>>  
>> -	dev_queue_xmit(clear_skb);
>> -
>> -	wait_for_completion_timeout(&mgmt_eth_data->rw_done,
>> -				    QCA8K_ETHERNET_TIMEOUT);
>> +	ret = dsa_switch_inband_tx(ds, clear_skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
> 
> This cause the breakage of qca8k!
> 
> The clear_skb is used to clean a state but is optional and we should not
> check exit value.
> 
> On top of that this overwrites the mdio return value from the read
> condition.
> 
> ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
> 
> This should be changed to just
> 
> dsa_switch_inband_tx(ds, clear_skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
> 
> Also considering the majority of the driver is alligned to 80 column can
> you wrap these new function to that? (personal taste)
>

Thanks for the testing, I'll fix the issue you've found and do a respin.

/Mattias

 
>>  
>>  	mutex_unlock(&mgmt_eth_data->mutex);
>>  
>> @@ -1528,7 +1504,7 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
>>  exit:
>>  	/* Complete on receiving all the mib packet */
>>  	if (refcount_dec_and_test(&mib_eth_data->port_parsed))
>> -		complete(&mib_eth_data->rw_done);
>> +		dsa_switch_inband_complete(ds, &mib_eth_data->rw_done);
>>  }
>>  
>>  static int
>> @@ -1543,8 +1519,6 @@ qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
>>  
>>  	mutex_lock(&mib_eth_data->mutex);
>>  
>> -	reinit_completion(&mib_eth_data->rw_done);
>> -
>>  	mib_eth_data->req_port = dp->index;
>>  	mib_eth_data->data = data;
>>  	refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
>> @@ -1562,8 +1536,7 @@ qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
>>  	if (ret)
>>  		goto exit;
>>  
>> -	ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
>> -
>> +	ret = dsa_switch_inband_tx(ds, NULL, &mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
>>  exit:
>>  	mutex_unlock(&mib_eth_data->mutex);
>>  
>> -- 
>> 2.25.1
>>
>
diff mbox series

Patch

diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index c181346388a4..4e9bc103c0a5 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -160,7 +160,7 @@  static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
 			       QCA_HDR_MGMT_DATA2_LEN);
 	}
 
-	complete(&mgmt_eth_data->rw_done);
+	dsa_switch_inband_complete(ds, &mgmt_eth_data->rw_done);
 }
 
 static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
@@ -228,6 +228,7 @@  static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
 static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
 {
 	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+	struct dsa_switch *ds = priv->ds;
 	struct sk_buff *skb;
 	bool ack;
 	int ret;
@@ -248,17 +249,12 @@  static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
 
 	skb->dev = priv->mgmt_master;
 
-	reinit_completion(&mgmt_eth_data->rw_done);
-
 	/* Increment seq_num and set it in the mdio pkt */
 	mgmt_eth_data->seq++;
 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
 	mgmt_eth_data->ack = false;
 
-	dev_queue_xmit(skb);
-
-	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
-					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+	ret = dsa_switch_inband_tx(ds, skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
 
 	*val = mgmt_eth_data->data[0];
 	if (len > QCA_HDR_MGMT_DATA1_LEN)
@@ -280,6 +276,7 @@  static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
 static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
 {
 	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+	struct dsa_switch *ds = priv->ds;
 	struct sk_buff *skb;
 	bool ack;
 	int ret;
@@ -300,17 +297,12 @@  static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
 
 	skb->dev = priv->mgmt_master;
 
-	reinit_completion(&mgmt_eth_data->rw_done);
-
 	/* Increment seq_num and set it in the mdio pkt */
 	mgmt_eth_data->seq++;
 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
 	mgmt_eth_data->ack = false;
 
-	dev_queue_xmit(skb);
-
-	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
-					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+	ret = dsa_switch_inband_tx(ds, skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
 
 	ack = mgmt_eth_data->ack;
 
@@ -441,24 +433,21 @@  static struct regmap_config qca8k_regmap_config = {
 };
 
 static int
-qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
+qca8k_phy_eth_busy_wait(struct qca8k_priv *priv,
 			struct sk_buff *read_skb, u32 *val)
 {
+	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
 	struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
+	struct dsa_switch *ds = priv->ds;
 	bool ack;
 	int ret;
 
-	reinit_completion(&mgmt_eth_data->rw_done);
-
 	/* Increment seq_num and set it in the copy pkt */
 	mgmt_eth_data->seq++;
 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
 	mgmt_eth_data->ack = false;
 
-	dev_queue_xmit(skb);
-
-	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
-					  QCA8K_ETHERNET_TIMEOUT);
+	ret = dsa_switch_inband_tx(ds, skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
 
 	ack = mgmt_eth_data->ack;
 
@@ -480,6 +469,7 @@  qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
 	struct sk_buff *write_skb, *clear_skb, *read_skb;
 	struct qca8k_mgmt_eth_data *mgmt_eth_data;
 	u32 write_val, clear_val = 0, val;
+	struct dsa_switch *ds = priv->ds;
 	struct net_device *mgmt_master;
 	int ret, ret1;
 	bool ack;
@@ -540,17 +530,12 @@  qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
 	clear_skb->dev = mgmt_master;
 	write_skb->dev = mgmt_master;
 
-	reinit_completion(&mgmt_eth_data->rw_done);
-
 	/* Increment seq_num and set it in the write pkt */
 	mgmt_eth_data->seq++;
 	qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
 	mgmt_eth_data->ack = false;
 
-	dev_queue_xmit(write_skb);
-
-	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
-					  QCA8K_ETHERNET_TIMEOUT);
+	ret = dsa_switch_inband_tx(ds, write_skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
 
 	ack = mgmt_eth_data->ack;
 
@@ -569,7 +554,7 @@  qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
 	ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
 				!(val & QCA8K_MDIO_MASTER_BUSY), 0,
 				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
-				mgmt_eth_data, read_skb, &val);
+				priv, read_skb, &val);
 
 	if (ret < 0 && ret1 < 0) {
 		ret = ret1;
@@ -577,17 +562,13 @@  qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
 	}
 
 	if (read) {
-		reinit_completion(&mgmt_eth_data->rw_done);
-
 		/* Increment seq_num and set it in the read pkt */
 		mgmt_eth_data->seq++;
 		qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
 		mgmt_eth_data->ack = false;
 
-		dev_queue_xmit(read_skb);
-
-		ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
-						  QCA8K_ETHERNET_TIMEOUT);
+		ret = dsa_switch_inband_tx(ds, read_skb, &mgmt_eth_data->rw_done,
+					   QCA8K_ETHERNET_TIMEOUT);
 
 		ack = mgmt_eth_data->ack;
 
@@ -606,17 +587,12 @@  qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
 		kfree_skb(read_skb);
 	}
 exit:
-	reinit_completion(&mgmt_eth_data->rw_done);
-
 	/* Increment seq_num and set it in the clear pkt */
 	mgmt_eth_data->seq++;
 	qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
 	mgmt_eth_data->ack = false;
 
-	dev_queue_xmit(clear_skb);
-
-	wait_for_completion_timeout(&mgmt_eth_data->rw_done,
-				    QCA8K_ETHERNET_TIMEOUT);
+	ret = dsa_switch_inband_tx(ds, clear_skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
 
 	mutex_unlock(&mgmt_eth_data->mutex);
 
@@ -1528,7 +1504,7 @@  static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
 exit:
 	/* Complete on receiving all the mib packet */
 	if (refcount_dec_and_test(&mib_eth_data->port_parsed))
-		complete(&mib_eth_data->rw_done);
+		dsa_switch_inband_complete(ds, &mib_eth_data->rw_done);
 }
 
 static int
@@ -1543,8 +1519,6 @@  qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
 
 	mutex_lock(&mib_eth_data->mutex);
 
-	reinit_completion(&mib_eth_data->rw_done);
-
 	mib_eth_data->req_port = dp->index;
 	mib_eth_data->data = data;
 	refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
@@ -1562,8 +1536,7 @@  qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
 	if (ret)
 		goto exit;
 
-	ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
-
+	ret = dsa_switch_inband_tx(ds, NULL, &mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
 exit:
 	mutex_unlock(&mib_eth_data->mutex);