diff mbox

[v2] drm/exynos: consider buffer allocation without iommu

Message ID 1357190747-20611-1-git-send-email-inki.dae@samsung.com (mailing list archive)
State New, archived
Headers show

Commit Message

Inki Dae Jan. 3, 2013, 5:25 a.m. UTC
This patch fixes the issue that when buffer allocation is requested
without iommu, the allocation is failed.

Without iommu, dma_alloc_attrs function allocates some memory region
and returns cpu address so this patch makes the cpu address to be set
to buf->kvaddr correctly.

Changelog v2:
- fix buffer free
  . Without iommu, dma_free_attrs function requires kernel space address
    as argument. So it changes the argument, buf->pages to buf->kvaddr.

Signed-off-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_buf.c |   55 +++++++++++++++++++++++++++----
 1 files changed, 48 insertions(+), 7 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 13a8489..b7937a6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -15,6 +15,7 @@ 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_buf.h"
+#include "exynos_drm_iommu.h"
 
 static int lowlevel_buffer_allocate(struct drm_device *dev,
 		unsigned int flags, struct exynos_drm_gem_buf *buf)
@@ -52,14 +53,45 @@  static int lowlevel_buffer_allocate(struct drm_device *dev,
 	dma_set_attr(attr, &buf->dma_attrs);
 	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
 
-	buf->pages = dma_alloc_attrs(dev->dev, buf->size,
-			&buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
-	if (!buf->pages) {
-		DRM_ERROR("failed to allocate buffer.\n");
-		return -ENOMEM;
+	nr_pages = buf->size >> PAGE_SHIFT;
+
+	if (!is_drm_iommu_supported(dev)) {
+		dma_addr_t start_addr;
+		unsigned int i = 0;
+
+		buf->pages = kzalloc(sizeof(struct page) * nr_pages,
+					GFP_KERNEL);
+		if (!buf->pages) {
+			DRM_ERROR("failed to allocate pages.\n");
+			return -ENOMEM;
+		}
+
+		buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
+					&buf->dma_addr, GFP_KERNEL,
+					&buf->dma_attrs);
+		if (!buf->kvaddr) {
+			DRM_ERROR("failed to allocate buffer.\n");
+			kfree(buf->pages);
+			return -ENOMEM;
+		}
+
+		start_addr = buf->dma_addr;
+		while (i < nr_pages) {
+			buf->pages[i] = phys_to_page(start_addr);
+			start_addr += PAGE_SIZE;
+			i++;
+		}
+	} else {
+
+		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+					&buf->dma_addr, GFP_KERNEL,
+					&buf->dma_attrs);
+		if (!buf->pages) {
+			DRM_ERROR("failed to allocate buffer.\n");
+			return -ENOMEM;
+		}
 	}
 
-	nr_pages = buf->size >> PAGE_SHIFT;
 	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
 	if (!buf->sgt) {
 		DRM_ERROR("failed to get sg table.\n");
@@ -78,6 +110,9 @@  err_free_attrs:
 			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
 	buf->dma_addr = (dma_addr_t)NULL;
 
+	if (!is_drm_iommu_supported(dev))
+		kfree(buf->pages);
+
 	return ret;
 }
 
@@ -100,8 +135,14 @@  static void lowlevel_buffer_deallocate(struct drm_device *dev,
 	kfree(buf->sgt);
 	buf->sgt = NULL;
 
-	dma_free_attrs(dev->dev, buf->size, buf->pages,
+	if (!is_drm_iommu_supported(dev)) {
+		dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
 				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
+		kfree(buf->pages);
+	} else
+		dma_free_attrs(dev->dev, buf->size, buf->pages,
+				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
+
 	buf->dma_addr = (dma_addr_t)NULL;
 }