@@ -271,6 +271,11 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
const struct xdp_mem_info *mem);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count);
+
+int page_pool_init_paged_area(struct page_pool *pool,
+ struct net_iov_area *area, struct page **pages);
+void page_pool_release_area(struct page_pool *pool,
+ struct net_iov_area *area);
#else
static inline void page_pool_destroy(struct page_pool *pool)
{
@@ -286,6 +291,18 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count)
{
}
+
+static inline int page_pool_init_paged_area(struct page_pool *pool,
+ struct net_iov_area *area,
+ struct page **pages)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void page_pool_release_area(struct page_pool *pool,
+ struct net_iov_area *area)
+{
+}
#endif
void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
@@ -459,7 +459,8 @@ page_pool_dma_sync_for_device(const struct page_pool *pool,
__page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
}
-static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
+static bool page_pool_dma_map_page(struct page_pool *pool, netmem_ref netmem,
+ struct page *page)
{
dma_addr_t dma;
@@ -468,7 +469,7 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
* into page private data (i.e 32bit cpu with 64bit DMA caps)
* This mapping is kept for lifetime of page, until leaving pool.
*/
- dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0,
+ dma = dma_map_page_attrs(pool->p.dev, page, 0,
(PAGE_SIZE << pool->p.order), pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC |
DMA_ATTR_WEAK_ORDERING);
@@ -490,6 +491,11 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
return false;
}
+static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
+{
+ return page_pool_dma_map_page(pool, netmem, netmem_to_page(netmem));
+}
+
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
gfp_t gfp)
{
@@ -1154,3 +1160,54 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
}
}
EXPORT_SYMBOL(page_pool_update_nid);
+
+static void page_pool_release_page_dma(struct page_pool *pool,
+ netmem_ref netmem)
+{
+ __page_pool_release_page_dma(pool, netmem);
+}
+
+int page_pool_init_paged_area(struct page_pool *pool,
+ struct net_iov_area *area, struct page **pages)
+{
+ struct net_iov *niov;
+ netmem_ref netmem;
+ int i, ret = 0;
+
+ if (!pool->dma_map)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < area->num_niovs; i++) {
+ niov = &area->niovs[i];
+ netmem = net_iov_to_netmem(niov);
+
+ page_pool_set_pp_info(pool, netmem);
+ if (!page_pool_dma_map_page(pool, netmem, pages[i])) {
+ ret = -EINVAL;
+ goto err_unmap_dma;
+ }
+ }
+ return 0;
+
+err_unmap_dma:
+ while (i--) {
+ netmem = net_iov_to_netmem(&area->niovs[i]);
+ page_pool_release_page_dma(pool, netmem);
+ }
+ return ret;
+}
+
+void page_pool_release_area(struct page_pool *pool,
+ struct net_iov_area *area)
+{
+ int i;
+
+ if (!pool->dma_map)
+ return;
+
+ for (i = 0; i < area->num_niovs; i++) {
+ struct net_iov *niov = &area->niovs[i];
+
+ page_pool_release_page_dma(pool, net_iov_to_netmem(niov));
+ }
+}