@@ -55,10 +55,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
WARN_ON(nents == 0 || sglist[0].length == 0);
for_each_sg(sglist, sg, nents, i) {
- BUG_ON(!sg_page(sg));
-
sg->dma_address = sg_phys(sg);
- dma_sync_for_device(sg_virt(sg), sg->length, direction);
+ if (sg_has_page(sg))
+ dma_sync_for_device(sg_virt(sg), sg->length, direction);
}
return nents;
@@ -94,10 +93,9 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries,
WARN_ON(nhwentries == 0 || sglist[0].length == 0);
for_each_sg(sglist, sg, nhwentries, i) {
- BUG_ON(!sg_page(sg));
-
sg->dma_address = sg_phys(sg);
- dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+ if (sg_has_page(sg))
+ dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
}
}
@@ -140,8 +138,10 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
int i;
struct scatterlist *sg;
- for_each_sg(sglist, sg, nelems, i)
- dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+ for_each_sg(sglist, sg, nelems, i) {
+ if (sg_has_page(sg))
+ dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+ }
}
static inline void
@@ -151,8 +151,10 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
int i;
struct scatterlist *sg;
- for_each_sg(sglist, sg, nelems, i)
- dma_sync_for_device(sg_virt(sg), sg->length, direction);
+ for_each_sg(sglist, sg, nelems, i) {
+ if (sg_has_page(sg))
+ dma_sync_for_device(sg_virt(sg), sg->length, direction);
+ }
}
static inline int
Make all cache invalidation conditional on sg_has_page(). Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/metag/include/asm/dma-mapping.h | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-)