@@ -146,7 +146,7 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
/* Make sure the seq match the requested packet. If not, drop. */
- if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
+ if (mgmt_ethhdr->seq == dsa_inband_seqno(&mgmt_eth_data->inband))
return;
if (cmd == MDIO_READ) {
@@ -247,13 +247,10 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
}
skb->dev = priv->mgmt_master;
-
- /* Increment seq_num and set it in the mdio pkt */
- mgmt_eth_data->seq++;
- qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
ret = dsa_inband_request(&mgmt_eth_data->inband, skb,
+ qca8k_mdio_header_fill_seq_num,
QCA8K_ETHERNET_TIMEOUT);
*val = mgmt_eth_data->data[0];
@@ -295,13 +292,10 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
}
skb->dev = priv->mgmt_master;
-
- /* Increment seq_num and set it in the mdio pkt */
- mgmt_eth_data->seq++;
- qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
ret = dsa_inband_request(&mgmt_eth_data->inband, skb,
+ qca8k_mdio_header_fill_seq_num,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -440,12 +434,10 @@ qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
bool ack;
int ret;
- /* Increment seq_num and set it in the copy pkt */
- mgmt_eth_data->seq++;
- qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
ret = dsa_inband_request(&mgmt_eth_data->inband, skb,
+ qca8k_mdio_header_fill_seq_num,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -527,13 +519,10 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
read_skb->dev = mgmt_master;
clear_skb->dev = mgmt_master;
write_skb->dev = mgmt_master;
-
- /* Increment seq_num and set it in the write pkt */
- mgmt_eth_data->seq++;
- qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
ret = dsa_inband_request(&mgmt_eth_data->inband, write_skb,
+ qca8k_mdio_header_fill_seq_num,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -560,12 +549,10 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
}
if (read) {
- /* Increment seq_num and set it in the read pkt */
- mgmt_eth_data->seq++;
- qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
ret = dsa_inband_request(&mgmt_eth_data->inband, read_skb,
+ qca8k_mdio_header_fill_seq_num,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -583,13 +570,11 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
kfree_skb(read_skb);
}
exit:
- /* Increment seq_num and set it in the clear pkt */
- mgmt_eth_data->seq++;
- qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
/* This is expected to fail sometimes, so don't check return value. */
dsa_inband_request(&mgmt_eth_data->inband, clear_skb,
+ qca8k_mdio_header_fill_seq_num,
QCA8K_ETHERNET_TIMEOUT);
mutex_unlock(&mgmt_eth_data->mutex);
@@ -1902,10 +1887,10 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
return -ENOMEM;
mutex_init(&priv->mgmt_eth_data.mutex);
- dsa_inband_init(&priv->mgmt_eth_data.inband);
+ dsa_inband_init(&priv->mgmt_eth_data.inband, U32_MAX);
mutex_init(&priv->mib_eth_data.mutex);
- dsa_inband_init(&priv->mib_eth_data.inband);
+ dsa_inband_init(&priv->mib_eth_data.inband, U32_MAX);
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
@@ -349,7 +349,6 @@ struct qca8k_mgmt_eth_data {
struct dsa_inband inband;
struct mutex mutex; /* Enforce one mdio read/write at time */
bool ack;
- u32 seq;
u32 data[4];
};
@@ -1309,13 +1309,17 @@ bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
*/
struct dsa_inband {
struct completion completion;
+ u32 seqno;
+ u32 seqno_mask;
};
-void dsa_inband_init(struct dsa_inband *inband);
+void dsa_inband_init(struct dsa_inband *inband, u32 seqno_mask);
void dsa_inband_complete(struct dsa_inband *inband);
int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
+ void (*insert_seqno)(struct sk_buff *skb, u32 seqno),
int timeout_ms);
int dsa_inband_wait_for_completion(struct dsa_inband *inband, int timeout_ms);
+u32 dsa_inband_seqno(struct dsa_inband *inband);
/* Keep inline for faster access in hot path */
static inline bool netdev_uses_dsa(const struct net_device *dev)
@@ -518,9 +518,11 @@ bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
-void dsa_inband_init(struct dsa_inband *inband)
+void dsa_inband_init(struct dsa_inband *inband, u32 seqno_mask)
{
init_completion(&inband->completion);
+ inband->seqno_mask = seqno_mask;
+ inband->seqno = 0;
}
EXPORT_SYMBOL_GPL(dsa_inband_init);
@@ -544,11 +546,17 @@ EXPORT_SYMBOL_GPL(dsa_inband_wait_for_completion);
* reinitialized before the skb is queue to avoid races.
*/
int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
+ void (*insert_seqno)(struct sk_buff *skb, u32 seqno),
int timeout_ms)
{
unsigned long jiffies = msecs_to_jiffies(timeout_ms);
int ret;
+ if (insert_seqno) {
+ inband->seqno++;
+ insert_seqno(skb, inband->seqno & inband->seqno_mask);
+ }
+
reinit_completion(&inband->completion);
dev_queue_xmit(skb);
@@ -560,6 +568,12 @@ int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(dsa_inband_request);
+u32 dsa_inband_seqno(struct dsa_inband *inband)
+{
+ return READ_ONCE(inband->seqno) & inband->seqno_mask;
+}
+EXPORT_SYMBOL_GPL(dsa_inband_seqno);
+
static int __init dsa_init_module(void)
{
int rc;
Each request/reply frame is likely to have a sequence number so that request and the reply can be matched together. Move this sequence number into the inband structure. The driver must provide a helper to insert the sequence number into the skb, and the core will perform the increment. To allow different devices to have different size sequence numbers, a mask is provided. This can be used for example to reduce the u32 sequence number down to a u8. Signed-off-by: Andrew Lunn <andrew@lunn.ch> --- v2 Fix wrong indentation of parameters Move seqno increment before reinitializing completion to close race Add a READ_ONCE() to stop compiler mischief. --- drivers/net/dsa/qca/qca8k-8xxx.c | 33 +++++++++----------------------- drivers/net/dsa/qca/qca8k.h | 1 - include/net/dsa.h | 6 +++++- net/dsa/dsa.c | 16 +++++++++++++++- 4 files changed, 29 insertions(+), 27 deletions(-)