@@ -75,6 +75,10 @@ struct dma_map_ops {
int nents, enum dma_data_direction dir);
void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir);
+ void (*sync_bvecs_for_cpu)(struct device *dev, struct bio_vec *bvecs,
+ int nents, enum dma_data_direction dir);
+ void (*sync_bvecs_for_device)(struct device *dev, struct bio_vec *bvecs,
+ int nents, enum dma_data_direction dir);
void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
int (*dma_supported)(struct device *dev, u64 mask);
@@ -126,6 +126,10 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir);
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir);
+void dma_sync_bvecs_for_cpu(struct device *dev, struct bio_vec *bvecs,
+ int nelems, enum dma_data_direction dir);
+void dma_sync_bvecs_for_device(struct device *dev, struct bio_vec *bvecs,
+ int nelems, enum dma_data_direction dir);
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, unsigned long attrs);
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
@@ -385,6 +385,34 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
+void dma_sync_bvecs_for_cpu(struct device *dev, struct bio_vec *bvecs,
+ int nelems, enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (dma_map_direct(dev, ops))
+ dma_direct_sync_bvecs_for_cpu(dev, bvecs, nelems, dir);
+ else if (ops->sync_bvecs_for_cpu)
+ ops->sync_bvecs_for_cpu(dev, bvecs, nelems, dir);
+ debug_dma_sync_bvecs_for_cpu(dev, bvecs, nelems, dir);
+}
+EXPORT_SYMBOL(dma_sync_bvecs_for_cpu);
+
+void dma_sync_bvecs_for_device(struct device *dev, struct bio_vec *bvecs,
+ int nelems, enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (dma_map_direct(dev, ops))
+ dma_direct_sync_bvecs_for_device(dev, bvecs, nelems, dir);
+ else if (ops->sync_bvecs_for_device)
+ ops->sync_bvecs_for_device(dev, bvecs, nelems, dir);
+ debug_dma_sync_bvecs_for_device(dev, bvecs, nelems, dir);
+}
+EXPORT_SYMBOL(dma_sync_bvecs_for_device);
+
/*
* The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
* that the intention is to allow exporting memory allocated via the