@@ -19,6 +19,7 @@ struct udmabuf {
u32 flags;
pgoff_t pagecount;
struct page **pages;
+ struct sg_table *sg;
struct miscdevice *device;
};
@@ -105,8 +106,12 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
static void release_udmabuf(struct dma_buf *buf)
{
struct udmabuf *ubuf = buf->priv;
+ struct device *dev = ubuf->device->this_device;
pgoff_t pg;
+ if (ubuf->sg)
+ put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
+
for (pg = 0; pg < ubuf->pagecount; pg++)
put_page(ubuf->pages[pg]);
kfree(ubuf->pages);
@@ -127,6 +132,39 @@ static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
kunmap(vaddr);
}
+static int begin_cpu_udmabuf(struct dma_buf *buf,
+ enum dma_data_direction direction)
+{
+ struct udmabuf *ubuf = buf->priv;
+ struct device *dev = ubuf->device->this_device;
+
+ if (!ubuf->sg) {
+ ubuf->sg = get_sg_table(dev, buf, direction);
+ if (IS_ERR(ubuf->sg))
+ return PTR_ERR(ubuf->sg);
+ } else {
+ dma_sync_sg_for_device(dev, ubuf->sg->sgl,
+ ubuf->sg->nents,
+ direction);
+ }
+
+ return 0;
+}
+
+static int end_cpu_udmabuf(struct dma_buf *buf,
+ enum dma_data_direction direction)
+{
+ struct udmabuf *ubuf = buf->priv;
+ struct device *dev = ubuf->device->this_device;
+
+ if (!ubuf->sg)
+ return -EINVAL;
+
+ dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+ return 0;
+}
+
+
static const struct dma_buf_ops udmabuf_ops = {
.cache_sgt_mapping = true,
.map_dma_buf = map_udmabuf,
@@ -135,6 +173,8 @@ static const struct dma_buf_ops udmabuf_ops = {
.map = kmap_udmabuf,
.unmap = kunmap_udmabuf,
.mmap = mmap_udmabuf,
+ .begin_cpu_access = begin_cpu_udmabuf,
+ .end_cpu_access = end_cpu_udmabuf,
};
#define SEALS_WANTED (F_SEAL_SHRINK)
With the misc device, we should end up using the result of get_arch_dma_ops(..) or dma-direct ops. This can allow us to have WC mappings in the guest after synchronization. Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org> --- drivers/dma-buf/udmabuf.c | 40 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+)