@@ -133,6 +133,8 @@ struct dma_map_ops {
u64 (*get_required_mask)(struct device *dev);
};
+#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
+
extern const struct dma_map_ops dma_direct_ops;
extern const struct dma_map_ops dma_virt_ops;
@@ -576,6 +578,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
const struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_mapping_error(dev, dma_addr);
+
+ if (dma_addr == DMA_MAPPING_ERROR)
+ return 1;
+
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
return 0;
Error handling of the dma_map_single and dma_map_page APIs is a little problematic at the moment, in that we use different encodings in the returned dma_addr_t to indicate an error. That means we require an additional indirect call to figure out if a dma mapping call returned an error, and a lot of boilerplate code to implement these semantics. Instead return the maximum addressable value as the error. As long as we don't allow mapping single-byte ranges with single-byte alignment this value can never be a valid return. Additionaly if drivers do not check the return value from the dma_map* routines this values means they will generally not be pointed to actual memory. Once the default value is added here we can start removing the various mapping_error methods and just rely on this generic check. Signed-off-by: Christoph Hellwig <hch@lst.de> --- include/linux/dma-mapping.h | 6 ++++++ 1 file changed, 6 insertions(+)