@@ -35,6 +35,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/xarray.h>
#ifdef CONFIG_SLUB_DEBUG_ON
#define DMAPOOL_DEBUG 1
@@ -59,6 +60,7 @@ struct dma_pool { /* the pool */
unsigned int boundary;
char name[32];
struct list_head pools;
+ struct xarray block_map;
};
struct dma_page { /* cacheable header for 'allocation' bytes */
@@ -96,23 +98,7 @@ static DEVICE_ATTR_RO(pools);
static struct dma_block *pool_find_block(struct dma_pool *pool, void *vaddr)
{
- struct dma_page *page;
- size_t offset, index;
-
- list_for_each_entry(page, &pool->page_list, page_list) {
- if (vaddr < page->vaddr)
- continue;
- offset = vaddr - page->vaddr;
- if (offset >= pool->allocation)
- continue;
-
- index = offset / pool->size;
- if (index >= page->blocks_per_page)
- return NULL;
-
- return &page->blocks[index];
- }
- return NULL;
+ return xa_load(&pool->block_map, (unsigned long)vaddr);
}
#ifdef DMAPOOL_DEBUG
@@ -273,6 +259,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
retval->boundary = boundary;
retval->allocation = allocation;
INIT_LIST_HEAD(&retval->pools);
+ xa_init(&retval->block_map);
/*
* pools_lock ensures that the ->dma_pools list does not get corrupted.
@@ -324,6 +311,12 @@ static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
block->dma = page->dma + offset;
block->next_block = NULL;
+ if (xa_err(xa_store(&pool->block_map, (unsigned long)block->vaddr,
+ block, GFP_KERNEL))) {
+ pr_err("dma_pool: Failed to store block in xarray\n");
+ return;
+ }
+
if (last)
last->next_block = block;
else
@@ -385,6 +378,7 @@ void dma_pool_destroy(struct dma_pool *pool)
if (unlikely(!pool))
return;
+ xa_destroy(&pool->block_map);
mutex_lock(&pools_reg_lock);
mutex_lock(&pools_lock);
list_del(&pool->pools);