@@ -203,6 +203,9 @@ config HAVE_DMA_ATTRS
config HAVE_DMA_CONTIGUOUS
bool
+config HAVE_DMA_PFN
+ bool
+
config GENERIC_SMP_IDLE_THREAD
bool
@@ -17,9 +17,15 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
kmemcheck_mark_initialized(ptr, size);
BUG_ON(!valid_dma_direction(dir));
+#ifdef CONFIG_HAVE_DMA_PFN
+ addr = ops->map_pfn(dev, page_to_pfn_typed(virt_to_page(ptr)),
+ (unsigned long)ptr & ~PAGE_MASK, size,
+ dir, attrs);
+#else
addr = ops->map_page(dev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, attrs);
+#endif
debug_dma_map_page(dev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, addr, true);
@@ -73,6 +79,29 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
ops->unmap_sg(dev, sg, nents, dir, attrs);
}
+#ifdef CONFIG_HAVE_DMA_PFN
+static inline dma_addr_t dma_map_pfn(struct device *dev, __pfn_t pfn,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ BUG_ON(!valid_dma_direction(dir));
+ addr = ops->map_pfn(dev, pfn, offset, size, dir, NULL);
+ debug_dma_map_pfn(dev, pfn, offset, size, dir, addr, false);
+
+ return addr;
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ kmemcheck_mark_initialized(page_address(page) + offset, size);
+ return dma_map_pfn(dev, page_to_pfn_typed(page), offset, size, dir);
+}
+#else
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
enum dma_data_direction dir)
@@ -87,6 +116,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
return addr;
}
+#endif /* CONFIG_HAVE_DMA_PFN */
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
@@ -34,10 +34,18 @@ extern void dma_debug_init(u32 num_entries);
extern int dma_debug_resize_entries(u32 num_entries);
-extern void debug_dma_map_page(struct device *dev, struct page *page,
- size_t offset, size_t size,
- int direction, dma_addr_t dma_addr,
- bool map_single);
+extern void debug_dma_map_pfn(struct device *dev, __pfn_t pfn, size_t offset,
+ size_t size, int direction, dma_addr_t dma_addr,
+ bool map_single);
+
+static inline void debug_dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ int direction, dma_addr_t dma_addr,
+ bool map_single)
+{
+ return debug_dma_map_pfn(dev, page_to_pfn_t(page), offset, size,
+ direction, dma_addr, map_single);
+}
extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
@@ -109,6 +117,13 @@ static inline void debug_dma_map_page(struct device *dev, struct page *page,
{
}
+static inline void debug_dma_map_pfn(struct device *dev, __pfn_t pfn,
+ size_t offset, size_t size,
+ int direction, dma_addr_t dma_addr,
+ bool map_single)
+{
+}
+
static inline void debug_dma_mapping_error(struct device *dev,
dma_addr_t dma_addr)
{
@@ -26,11 +26,17 @@ struct dma_map_ops {
int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
dma_addr_t, size_t, struct dma_attrs *attrs);
-
+#ifdef CONFIG_HAVE_DMA_PFN
+ dma_addr_t (*map_pfn)(struct device *dev, __pfn_t pfn,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+#else
dma_addr_t (*map_page)(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs);
+#endif
void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
@@ -1250,11 +1250,12 @@ out:
put_hash_bucket(bucket, &flags);
}
-void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
+void debug_dma_map_pfn(struct device *dev, __pfn_t pfn, size_t offset,
size_t size, int direction, dma_addr_t dma_addr,
bool map_single)
{
struct dma_debug_entry *entry;
+ struct page *page;
if (unlikely(dma_debug_disabled()))
return;
@@ -1268,7 +1269,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
entry->dev = dev;
entry->type = dma_debug_page;
- entry->pfn = page_to_pfn(page);
+ entry->pfn = __pfn_t_to_pfn(pfn);
entry->offset = offset,
entry->dev_addr = dma_addr;
entry->size = size;
@@ -1278,7 +1279,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
if (map_single)
entry->type = dma_debug_single;
- if (!PageHighMem(page)) {
+ page = __pfn_t_to_page(pfn);
+ if (page && !PageHighMem(page)) {
void *addr = page_address(page) + offset;
check_for_stack(dev, addr);
@@ -1287,7 +1289,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
add_dma_entry(entry);
}
-EXPORT_SYMBOL(debug_dma_map_page);
+EXPORT_SYMBOL(debug_dma_map_pfn);
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
This is in support of enabling block device drivers to perform DMA to/from persistent memory which may not have a backing struct page entry. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- arch/Kconfig | 3 +++ include/asm-generic/dma-mapping-common.h | 30 ++++++++++++++++++++++++++++++ include/linux/dma-debug.h | 23 +++++++++++++++++++---- include/linux/dma-mapping.h | 8 +++++++- lib/dma-debug.c | 10 ++++++---- 5 files changed, 65 insertions(+), 9 deletions(-)