@@ -208,14 +208,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
return 0;
}
+#define count_object(obj) do { \
+ size += obj->gtt_space.size; \
+ ++count; \
+ if (obj->map_and_fenceable) { \
+ mappable_size += obj->gtt_space.size; \
+ ++mappable_count; \
+ } \
+} while(0)
+
#define count_objects(list, member) do { \
list_for_each_entry(obj, list, member) { \
- size += obj->gtt_space.size; \
- ++count; \
- if (obj->map_and_fenceable) { \
- mappable_size += obj->gtt_space.size; \
- ++mappable_count; \
- } \
+ count_object(obj); \
} \
} while(0)
@@ -226,6 +230,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 count, mappable_count;
size_t size, mappable_size;
+ struct drm_mm_node *mm_node;
struct drm_i915_gem_object *obj;
int ret;
@@ -238,7 +243,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
dev_priv->mm.object_memory);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.gtt_list, gtt_list);
+ drm_mm_for_each_node(mm_node, &dev_priv->mm.gtt_space) {
+ obj = container_of(mm_node, struct drm_i915_gem_object, gtt_space);
+ count_object(obj);
+ }
seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
count, mappable_count, size, mappable_size);
@@ -264,7 +272,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ drm_mm_for_each_node(mm_node, &dev_priv->mm.gtt_space) {
+ obj = container_of(mm_node, struct drm_i915_gem_object, gtt_space);
if (obj->fault_mappable) {
size += obj->gtt_space.size;
++count;
@@ -292,6 +301,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *mm_node;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
int count, ret;
@@ -301,7 +311,8 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ drm_mm_for_each_node(mm_node, &dev_priv->mm.gtt_space) {
+ obj = container_of(mm_node, struct drm_i915_gem_object, gtt_space);
seq_printf(m, " ");
describe_obj(m, obj);
seq_printf(m, "\n");
@@ -542,9 +542,6 @@ typedef struct drm_i915_private {
struct drm_mm stolen;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
- /** List of all objects in gtt_space. Used to restore gtt
- * mappings on resume */
- struct list_head gtt_list;
/** Usable portion of the GTT for GEM */
unsigned long gtt_start;
@@ -722,7 +719,6 @@ struct drm_i915_gem_object {
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node gtt_space;
- struct list_head gtt_list;
/** This object's place on the active/flushing/inactive lists */
struct list_head ring_list;
@@ -2239,7 +2239,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
i915_gem_gtt_unbind_object(obj);
i915_gem_object_put_pages_gtt(obj);
- list_del_init(&obj->gtt_list);
list_del_init(&obj->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
@@ -2870,7 +2869,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
goto search_free;
}
- list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
/* Assert that the object is not currently in any GPU domain. As it
@@ -3705,7 +3703,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj->mm_list);
- INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
INIT_LIST_HEAD(&obj->gpu_write_list);
@@ -3946,7 +3943,6 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
- INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < 16; i++)
@@ -53,12 +53,14 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct drm_mm_node *node;
/* First fill our portion of the GTT with scratch pages */
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ drm_mm_for_each_node(node, &dev_priv->mm.gtt_space) {
+ obj = container_of(node, struct drm_i915_gem_object, gtt_space);
i915_gem_clflush_object(obj);
i915_gem_gtt_rebind_object(obj, obj->cache_level);
}
Use the list iterator provided by drm_mm instead. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> --- drivers/gpu/drm/i915/i915_debugfs.c | 29 ++++++++++++++++++++--------- drivers/gpu/drm/i915/i915_drv.h | 4 ---- drivers/gpu/drm/i915/i915_gem.c | 4 ---- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 +++- 4 files changed, 23 insertions(+), 18 deletions(-)