@@ -300,6 +300,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
}
skb->prev = skb->next = NULL;
+ if (dev->drv->tx_aligned4_skbs)
+ mt76_insert_hdr_pad(skb);
+
dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, qid, wcid, sta,
@@ -288,6 +288,7 @@ struct mt76_hw_cap {
};
struct mt76_driver_ops {
+ bool tx_aligned4_skbs;
u16 txwi_size;
void (*update_survey)(struct mt76_dev *dev);
@@ -164,6 +164,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
+ .tx_aligned4_skbs = true,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
@@ -152,20 +152,20 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
u32 *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct mt76x02_txwi *txwi = txwi_ptr;
- int qsel = MT_QSEL_EDCA;
- int pid;
+ int hdrlen, len, pid, qsel = MT_QSEL_EDCA;
if (qid == MT_TXQ_PSD && wcid && wcid->idx < 128)
mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
- mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ len = skb->len - (hdrlen & 2);
+ mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
txwi->pktid = pid;
- mt76_insert_hdr_pad(skb);
-
if (pid >= MT_PACKET_ID_FIRST)
qsel = MT_QSEL_MGMT;
@@ -32,6 +32,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
+ .tx_aligned4_skbs = true,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,