diff mbox

[RFC,2/3] i915/drm: Increase the utilization of stolen memory on VLV

Message ID 1394019340-8811-3-git-send-email-sourab.gupta@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

sourab.gupta@intel.com March 5, 2014, 11:35 a.m. UTC
From: Sourab Gupta <sourab.gupta@intel.com>

On VLV, 64MB of system memory was being reserved for stolen
area, but ~8MB of it was being utilized.
For the buffer objects which are not cpu mappable, we can allocate
the space from stolen memory, thus increasing the utilization of stolen
memory area.

Testcase: igt/gem_stolen_mem

Signed-off-by: Sourab Gupta <sourab.gupta@intel.com>

Signed-off-by: Akash Goel <akash.goel@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h        |    1 +
 drivers/gpu/drm/i915/i915_gem.c        |   11 ++++
 drivers/gpu/drm/i915/i915_gem_stolen.c |   94 ++++++++++++++++++++++++++++++++
 3 files changed, 106 insertions(+)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8a066a7..b5f603f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2441,6 +2441,7 @@  i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 					       u32 stolen_offset,
 					       u32 gtt_offset,
 					       u32 size);
+void i915_gem_object_move_to_stolen(struct drm_i915_gem_object *obj);
 void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
 
 /* i915_gem_tiling.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8421b80..f57ca31 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3854,6 +3854,17 @@  i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		}
 	}
 
+	/* Try to allocate the physical space for the GEM object,
+	 * for which the CPU map is not required, from the stolen area.
+	 * But if there is no sufficient free space left in stolen
+	 * area, will fallback to shmem.
+	 */
+	if (obj->cpu_map_not_needed == 1) {
+		if (obj->pages == NULL) {
+			i915_gem_object_move_to_stolen(obj);
+		}
+	}
+
 	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
 		vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
 		if (IS_ERR(vma))
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index d58b4e2..6758ba4 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -29,6 +29,7 @@ 
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include <linux/shmem_fs.h>
 
 /*
  * The BIOS typically reserves some of the system's memory for the exclusive
@@ -447,6 +448,99 @@  err_out:
 }
 
 void
+i915_gem_object_move_to_stolen(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_mm_node *stolen;
+	u32 size = obj->base.size;
+	int ret = 0;
+
+	if (!IS_VALLEYVIEW(dev)) {
+		return;
+	}
+
+	if (obj->stolen) {
+		BUG_ON(obj->pages == NULL);
+		return;
+	}
+
+	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+		return;
+
+	if (size == 0)
+		return;
+
+	/* Check if already shmem space has been allocated for the object
+	 * or not. We cannot rely upon on the value of 'pages' field for this.
+	 * As even though if the 'pages' field is NULL, it does not actually
+	 * indicate that the backing physical space (shmem) is currently not
+	 * reserved for the object, as the object may not get purged/truncated
+	 * on the calll to 'put_pages_gtt'.
+	 */
+	if (obj->base.filp) {
+		struct inode *inode = file_inode(obj->base.filp);
+		struct shmem_inode_info *info = SHMEM_I(inode);
+		if (!inode)
+			return;
+		spin_lock(&info->lock);
+		/* The alloced field stores how many data pages are
+		 * allocated to the file.
+		 */
+		ret = info->alloced;
+		spin_unlock(&info->lock);
+		if (ret > 0) {
+			DRM_DEBUG_DRIVER(
+				"Already shmem space alloced, %d pges\n", ret);
+			return;
+		}
+	}
+
+	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+	if (!stolen)
+		return;
+
+	ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
+				 4096, DRM_MM_SEARCH_DEFAULT);
+	if (ret) {
+		kfree(stolen);
+		DRM_DEBUG_DRIVER("ran out of stolen space\n");
+		return;
+	}
+
+	/* Set up the object to use the stolen memory,
+	 * backing store no longer managed by shmem layer */
+	drm_gem_object_release(&(obj->base));
+	obj->base.filp = NULL;
+	obj->ops = &i915_gem_object_stolen_ops;
+
+	obj->pages = i915_pages_create_for_stolen(dev,
+						stolen->start, stolen->size);
+	if (obj->pages == NULL)
+		goto cleanup;
+
+	i915_gem_object_pin_pages(obj);
+	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+	obj->has_dma_mapping = true;
+	obj->stolen = stolen;
+
+	DRM_DEBUG_DRIVER("Obj moved to stolen, ptr = %p, size = %x\n",
+			 obj, size);
+
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+	obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
+
+	/* No zeroing-out of buffers allocated from stolen area */
+	return;
+
+cleanup:
+	drm_mm_remove_node(stolen);
+	kfree(stolen);
+	return;
+}
+
+
+void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
 	if (obj->stolen) {