@@ -300,9 +300,9 @@ mtk_wed_assign(struct mtk_wed_device *dev)
static int
mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
{
+ struct mtk_wed_buf *page_list;
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
- void **page_list;
int token = dev->wlan.token_start;
int ring_size;
int n_pages;
@@ -343,7 +343,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
return -ENOMEM;
}
- page_list[page_idx++] = page;
+ page_list[page_idx].p = page;
+ page_list[page_idx++].phy_addr = page_phys;
dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
DMA_BIDIRECTIONAL);
@@ -387,8 +388,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
static void
mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
{
+ struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages;
struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
- void **page_list = dev->tx_buf_ring.pages;
int page_idx;
int i;
@@ -400,13 +401,12 @@ mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
i += MTK_WED_BUF_PER_PAGE) {
- void *page = page_list[page_idx++];
- dma_addr_t buf_addr;
+ dma_addr_t buf_addr = page_list[page_idx].phy_addr;
+ void *page = page_list[page_idx++].p;
if (!page)
break;
- buf_addr = le32_to_cpu(desc[i].buf0);
dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(page);
@@ -76,6 +76,11 @@ struct mtk_wed_wo_rx_stats {
__le32 rx_drop_cnt;
};
+struct mtk_wed_buf {
+ void *p;
+ dma_addr_t phy_addr;
+};
+
struct mtk_wed_device {
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
const struct mtk_wed_ops *ops;
@@ -97,7 +102,7 @@ struct mtk_wed_device {
struct {
int size;
- void **pages;
+ struct mtk_wed_buf *pages;
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
} tx_buf_ring;