@@ -171,7 +171,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
if (entry.skb) {
spin_unlock_bh(&q->lock);
- dev->drv->tx_complete_skb(dev, q, &entry, flush);
+ dev->drv->tx_complete_skb(dev, qid, &entry);
spin_lock_bh(&q->lock);
}
@@ -348,7 +348,7 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
free:
e.skb = skb;
e.txwi = t;
- dev->drv->tx_complete_skb(dev, q, &e, true);
+ dev->drv->tx_complete_skb(dev, qid, &e);
mt76_put_txwi(dev, t);
return ret;
}
@@ -292,8 +292,8 @@ struct mt76_driver_ops {
struct mt76_wcid *wcid,
struct ieee80211_sta *sta, u32 *tx_info);
- void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+ void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
@@ -1168,8 +1168,8 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
rcu_read_unlock();
}
-void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct sk_buff *skb = e->skb;
@@ -1179,7 +1179,7 @@ void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
return;
}
- if (q - dev->mt76.q_tx < 4)
+ if (qid < 4)
dev->tx_hang_check = 0;
mt76_tx_complete_skb(mdev, skb);
@@ -227,8 +227,8 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);
-void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
@@ -755,8 +755,8 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
}
}
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
@@ -198,8 +198,8 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int len);
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
void mt76x02_update_channel(struct mt76_dev *mdev);
void mt76x02_mac_work(struct work_struct *work);
@@ -29,6 +29,6 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, enum mt76_txq_id qid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);
-void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
#endif /* __MT76x02_USB_H */
@@ -26,8 +26,8 @@ static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
mt76x02_remove_hdr_pad(skb, 2);
}
-void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
{
mt76x02u_remove_dma_hdr(e->skb);
mt76_tx_complete_skb(mdev, e->skb);
@@ -651,7 +651,7 @@ static void mt76u_tx_tasklet(unsigned long data)
q->queued--;
spin_unlock_bh(&q->lock);
- dev->drv->tx_complete_skb(dev, q, &entry, false);
+ dev->drv->tx_complete_skb(dev, i, &entry);
spin_lock_bh(&q->lock);
}
mt76_txq_schedule(dev, q);
Remove mt76_queue dependency from tx_complete_skb function pointer and rely on mt76_tx_qid instead. Remove flush from tx_complete_skb signature. This is a preliminary patch to introduce mt76_sw_queue support Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> --- drivers/net/wireless/mediatek/mt76/dma.c | 4 ++-- drivers/net/wireless/mediatek/mt76/mt76.h | 4 ++-- drivers/net/wireless/mediatek/mt76/mt7603/mac.c | 6 +++--- drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h | 4 ++-- drivers/net/wireless/mediatek/mt76/mt76x02_mac.c | 4 ++-- drivers/net/wireless/mediatek/mt76/mt76x02_mac.h | 4 ++-- drivers/net/wireless/mediatek/mt76/mt76x02_usb.h | 4 ++-- drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c | 4 ++-- drivers/net/wireless/mediatek/mt76/usb.c | 2 +- 9 files changed, 18 insertions(+), 18 deletions(-)