@@ -406,9 +406,14 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
if (!t)
return NULL;
- dma_unmap_single(dev->dma_dev, t->dma_addr,
- SKB_WITH_OVERHEAD(q->buf_size),
- DMA_FROM_DEVICE);
+ if (mt76_is_page_from_pp(t->ptr))
+ dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
+ else
+ dma_unmap_single(dev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ DMA_FROM_DEVICE);
buf = t->ptr;
t->dma_addr = 0;
@@ -425,9 +430,14 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
} else {
buf = e->buf;
e->buf = NULL;
- dma_unmap_single(dev->dma_dev, e->dma_addr[0],
- SKB_WITH_OVERHEAD(q->buf_size),
- DMA_FROM_DEVICE);
+ if (mt76_is_page_from_pp(buf))
+ dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
+ else
+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
+ SKB_WITH_OVERHEAD(q->buf_size),
+ DMA_FROM_DEVICE);
}
return buf;
@@ -591,6 +601,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
+ enum dma_data_direction dir;
struct mt76_queue_buf qbuf;
dma_addr_t addr;
int offset;
@@ -600,19 +611,14 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
if (!buf)
break;
- addr = dma_map_single(dev->dma_dev, buf + offset, len,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
- mt76_put_page_pool_buf(buf, allow_direct);
- break;
- }
+ addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
+ dir = page_pool_get_dma_dir(q->page_pool);
+ dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
qbuf.addr = addr + q->buf_offset;
qbuf.len = len - q->buf_offset;
qbuf.skip_unmap = false;
if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
- dma_unmap_single(dev->dma_dev, addr, len,
- DMA_FROM_DEVICE);
mt76_put_page_pool_buf(buf, allow_direct);
break;
}
@@ -567,6 +567,14 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
.dev = dev->dma_dev,
};
+ if (mt76_is_mmio(dev)) {
+ /* rely on page_pool for DMA mapping */
+ pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
+ pp_params.offset = 0;
+ }
+
q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(q->page_pool)) {
int err = PTR_ERR(q->page_pool);
@@ -604,8 +604,9 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
if (!t || !t->ptr)
continue;
- dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
- wed->wlan.rx_size, DMA_FROM_DEVICE);
+ if (!mt76_is_page_from_pp(t->ptr))
+ dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
+ wed->wlan.rx_size, DMA_FROM_DEVICE);
mt76_put_page_pool_buf(t->ptr, false);
t->ptr = NULL;