diff mbox series

[RFC,6/6] udmabuf: implement begin_cpu_access/end_cpu_access hooks

Message ID 20190801022517.1903-7-gurchetansingh@chromium.org (mailing list archive)
State New, archived
Headers show
Series udmabuf guest <--> host interaction model | expand

Commit Message

Gurchetan Singh Aug. 1, 2019, 2:25 a.m. UTC
With the misc device, we should end up using dma direct ops.
This can allow us to have WC mappings in the guest after some
synchronization, if we disallow cached mappings in the host.

Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
---
 drivers/dma-buf/udmabuf.c | 39 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 39 insertions(+)
diff mbox series

Patch

diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 5f8bee1862de..52de7ba1e712 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -19,6 +19,7 @@  struct udmabuf {
 	u32 flags;
 	pgoff_t pagecount;
 	struct page **pages;
+	struct sg_table *sg;
 	struct miscdevice *udmabuf_misc;
 };
 
@@ -106,8 +107,12 @@  static void unmap_udmabuf(struct dma_buf_attachment *at,
 static void release_udmabuf(struct dma_buf *buf)
 {
 	struct udmabuf *ubuf = buf->priv;
+	struct device *dev = ubuf->udmabuf_misc->this_device;
 	pgoff_t pg;
 
+	if (ubuf->sg)
+		put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
+
 	for (pg = 0; pg < ubuf->pagecount; pg++)
 		put_page(ubuf->pages[pg]);
 	kfree(ubuf->pages);
@@ -128,6 +133,38 @@  static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
 	kunmap(vaddr);
 }
 
+static int begin_cpu_udmabuf(struct dma_buf *buf,
+			     enum dma_data_direction direction)
+{
+	struct udmabuf *ubuf = buf->priv;
+	struct device *dev = ubuf->udmabuf_misc->this_device;
+
+	if (!ubuf->sg) {
+		ubuf->sg = get_sg_table(dev, buf, direction);
+		if (IS_ERR(ubuf->sg))
+			return PTR_ERR(ubuf->sg);
+	} else {
+		dma_sync_sg_for_device(dev, ubuf->sg->sgl,
+				       ubuf->sg->nents,
+				       direction);
+	}
+
+	return 0;
+}
+
+static int end_cpu_udmabuf(struct dma_buf *buf,
+			   enum dma_data_direction direction)
+{
+	struct udmabuf *ubuf = buf->priv;
+	struct device *dev = ubuf->udmabuf_misc->this_device;
+
+	if (!ubuf->sg)
+		return -EINVAL;
+
+	dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+	return 0;
+}
+
 static const struct dma_buf_ops udmabuf_ops = {
 	.cache_sgt_mapping = true,
 	.map_dma_buf	   = map_udmabuf,
@@ -136,6 +173,8 @@  static const struct dma_buf_ops udmabuf_ops = {
 	.map		   = kmap_udmabuf,
 	.unmap		   = kunmap_udmabuf,
 	.mmap		   = mmap_udmabuf,
+	.begin_cpu_access  = begin_cpu_udmabuf,
+	.end_cpu_access    = end_cpu_udmabuf,
 };
 
 #define SEALS_WANTED (F_SEAL_SHRINK)