@@ -135,9 +135,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (name: %d)", obj->base.name);
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
- if (obj->gtt_space != NULL)
+ if (drm_mm_node_allocated(&obj->gtt_space))
seq_printf(m, " (gtt offset: %08x, size: %08x)",
- obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ obj->gtt_offset, (unsigned int)obj->gtt_space.size);
if (obj->pin_mappable || obj->fault_mappable) {
char s[3], *t = s;
if (obj->pin_mappable)
@@ -198,7 +198,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
describe_obj(m, obj);
seq_printf(m, "\n");
total_obj_size += obj->base.size;
- total_gtt_size += obj->gtt_space->size;
+ total_gtt_size += obj->gtt_space.size;
count++;
}
mutex_unlock(&dev->struct_mutex);
@@ -210,10 +210,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
#define count_objects(list, member) do { \
list_for_each_entry(obj, list, member) { \
- size += obj->gtt_space->size; \
+ size += obj->gtt_space.size; \
++count; \
if (obj->map_and_fenceable) { \
- mappable_size += obj->gtt_space->size; \
+ mappable_size += obj->gtt_space.size; \
++mappable_count; \
} \
} \
@@ -266,11 +266,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
if (obj->fault_mappable) {
- size += obj->gtt_space->size;
+ size += obj->gtt_space.size;
++count;
}
if (obj->pin_mappable) {
- mappable_size += obj->gtt_space->size;
+ mappable_size += obj->gtt_space.size;
++mappable_count;
}
}
@@ -306,7 +306,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
describe_obj(m, obj);
seq_printf(m, "\n");
total_obj_size += obj->base.size;
- total_gtt_size += obj->gtt_space->size;
+ total_gtt_size += obj->gtt_space.size;
count++;
}
@@ -721,7 +721,7 @@ struct drm_i915_gem_object {
struct drm_gem_object base;
/** Current space allocated to this object in the GTT, if any. */
- struct drm_mm_node *gtt_space;
+ struct drm_mm_node gtt_space;
struct list_head gtt_list;
/** This object's place on the active/flushing/inactive lists */
@@ -122,7 +122,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj->gtt_space && !obj->active && obj->pin_count == 0;
+ return drm_mm_node_allocated(&obj->gtt_space)
+ && !obj->active && obj->pin_count == 0;
}
void i915_gem_do_init(struct drm_device *dev,
@@ -176,7 +177,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
- pinned += obj->gtt_space->size;
+ pinned += obj->gtt_space.size;
mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->mm.gtt_total;
@@ -1000,7 +1001,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
*/
if (obj->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file);
- else if (obj->gtt_space &&
+ else if (drm_mm_node_allocated(&obj->gtt_space) &&
obj->cache_level == I915_CACHE_NONE &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_object_pin(obj, 0, true);
@@ -1227,7 +1228,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto unlock;
}
- if (!obj->gtt_space) {
+ if (!drm_mm_node_allocated(&obj->gtt_space)) {
ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
goto unlock;
@@ -2194,7 +2195,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
int ret = 0;
- if (obj->gtt_space == NULL)
+ if (!drm_mm_node_allocated(&obj->gtt_space))
return 0;
if (obj->pin_count != 0) {
@@ -2243,8 +2244,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
+ drm_mm_remove_node(&obj->gtt_space);
obj->gtt_offset = 0;
if (i915_gem_object_is_purgeable(obj))
@@ -2319,7 +2319,7 @@ static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
+ u32 size = obj->gtt_space.size;
int regnum = obj->fence_reg;
uint64_t val;
@@ -2356,7 +2356,7 @@ static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
+ u32 size = obj->gtt_space.size;
int regnum = obj->fence_reg;
uint64_t val;
@@ -2391,7 +2391,7 @@ static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
+ u32 size = obj->gtt_space.size;
u32 fence_reg, val, pitch_val;
int tile_width;
@@ -2445,7 +2445,7 @@ static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
+ u32 size = obj->gtt_space.size;
int regnum = obj->fence_reg;
uint32_t val;
uint32_t pitch_val;
@@ -2779,7 +2779,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mm_node *free_space;
gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
@@ -2815,27 +2814,17 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
search_free:
if (map_and_fenceable)
- free_space =
- drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ ret =
+ drm_mm_insert_node_in_range(&dev_priv->mm.gtt_space,
+ &obj->gtt_space,
size, alignment, 0,
- dev_priv->mm.gtt_mappable_end,
- 0);
+ dev_priv->mm.gtt_mappable_end);
else
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- size, alignment, 0);
-
- if (free_space != NULL) {
- if (map_and_fenceable)
- obj->gtt_space =
- drm_mm_get_block_range_generic(free_space,
- size, alignment, 0,
- dev_priv->mm.gtt_mappable_end,
- 0);
- else
- obj->gtt_space =
- drm_mm_get_block(free_space, size, alignment);
- }
- if (obj->gtt_space == NULL) {
+ ret = drm_mm_insert_node(&dev_priv->mm.gtt_space,
+ &obj->gtt_space,
+ size, alignment);
+
+ if (ret != 0) {
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
@@ -2849,8 +2838,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
if (ret) {
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
+ drm_mm_remove_node(&obj->gtt_space);
if (ret == -ENOMEM) {
/* first try to reclaim some memory by clearing the GTT */
@@ -2874,8 +2862,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
ret = i915_gem_gtt_bind_object(obj);
if (ret) {
i915_gem_object_put_pages_gtt(obj);
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
+ drm_mm_remove_node(&obj->gtt_space);
if (i915_gem_evict_everything(dev, false))
return ret;
@@ -2893,11 +2880,11 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- obj->gtt_offset = obj->gtt_space->start;
+ obj->gtt_offset = obj->gtt_space.start;
fenceable =
- obj->gtt_space->size == fence_size &&
- (obj->gtt_space->start & (fence_alignment -1)) == 0;
+ obj->gtt_space.size == fence_size &&
+ (obj->gtt_space.start & (fence_alignment -1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
@@ -3006,7 +2993,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int ret;
/* Not valid to be called on unbound objects. */
- if (obj->gtt_space == NULL)
+ if (!drm_mm_node_allocated(&obj->gtt_space))
return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3066,7 +3053,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- if (obj->gtt_space) {
+ if (drm_mm_node_allocated(&obj->gtt_space)) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
@@ -3421,7 +3408,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
WARN_ON(i915_verify_lists(dev));
- if (obj->gtt_space != NULL) {
+ if (drm_mm_node_allocated(&obj->gtt_space)) {
if ((alignment && obj->gtt_offset & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count,
@@ -3437,7 +3424,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
}
- if (obj->gtt_space == NULL) {
+ if (!drm_mm_node_allocated(&obj->gtt_space)) {
ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable);
if (ret)
@@ -3463,7 +3450,7 @@ i915_gem_object_unpin(struct drm_i915_gem_object *obj)
WARN_ON(i915_verify_lists(dev));
BUG_ON(obj->pin_count == 0);
- BUG_ON(obj->gtt_space == NULL);
+ BUG_ON(!drm_mm_node_allocated(&obj->gtt_space));
if (--obj->pin_count == 0) {
if (!obj->active)
@@ -3668,7 +3655,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
/* if the object is no longer bound, discard its backing storage */
if (i915_gem_object_is_purgeable(obj) &&
- obj->gtt_space == NULL)
+ !drm_mm_node_allocated(&obj->gtt_space))
i915_gem_object_truncate(obj);
args->retained = obj->madv != __I915_MADV_PURGED;
@@ -37,7 +37,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
list_add(&obj->exec_list, unwind);
drm_gem_object_reference(&obj->base);
- return drm_mm_scan_add_block(obj->gtt_space);
+ return drm_mm_scan_add_block(&obj->gtt_space);
}
int
@@ -135,7 +135,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
struct drm_i915_gem_object,
exec_list);
- ret = drm_mm_scan_remove_block(obj->gtt_space);
+ ret = drm_mm_scan_remove_block(&obj->gtt_space);
BUG_ON(ret);
list_del_init(&obj->exec_list);
@@ -156,7 +156,7 @@ found:
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
- if (drm_mm_scan_remove_block(obj->gtt_space)) {
+ if (drm_mm_scan_remove_block(&obj->gtt_space)) {
list_move(&obj->exec_list, &eviction_list);
continue;
}
@@ -521,7 +521,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
- if (!obj->gtt_space)
+ if (!drm_mm_node_allocated(&obj->gtt_space))
continue;
need_fence =
@@ -554,7 +554,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- if (!obj->gtt_space) {
+ if (!drm_mm_node_allocated(&obj->gtt_space)) {
bool need_mappable =
entry->relocation_count ? true : need_fence;
@@ -585,7 +585,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
/* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list) {
- if (obj->gtt_space)
+ if (drm_mm_node_allocated(&obj->gtt_space))
i915_gem_object_unpin(obj);
}
@@ -607,7 +607,7 @@ err:
struct drm_i915_gem_object,
exec_list);
while (objects != &obj->exec_list) {
- if (obj->gtt_space)
+ if (drm_mm_node_allocated(&obj->gtt_space))
i915_gem_object_unpin(obj);
obj = list_entry(obj->exec_list.prev,
@@ -83,10 +83,10 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
intel_gtt_insert_sg_entries(obj->sg_list,
obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
+ obj->gtt_space.start >> PAGE_SHIFT,
agp_type);
} else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+ intel_gtt_insert_pages(obj->gtt_space.start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
agp_type);
@@ -106,10 +106,10 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
intel_gtt_insert_sg_entries(obj->sg_list,
obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
+ obj->gtt_space.start >> PAGE_SHIFT,
agp_type);
} else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+ intel_gtt_insert_pages(obj->gtt_space.start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
agp_type);
@@ -117,7 +117,7 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
- intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ intel_gtt_clear_range(obj->gtt_space.start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
if (obj->sg_list) {
@@ -264,7 +264,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
while (size < obj->base.size)
size <<= 1;
- if (obj->gtt_space->size != size)
+ if (obj->gtt_space.size != size)
return false;
if (obj->gtt_offset & (size - 1))
@@ -349,7 +349,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
i915_gem_release_mmap(obj);
obj->map_and_fenceable =
- obj->gtt_space == NULL ||
+ !drm_mm_node_allocated(&obj->gtt_space) ||
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
@@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind,
TP_fast_assign(
__entry->obj = obj;
- __entry->offset = obj->gtt_space->start;
- __entry->size = obj->gtt_space->size;
+ __entry->offset = obj->gtt_space.start;
+ __entry->size = obj->gtt_space.size;
__entry->mappable = mappable;
),
@@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind,
TP_fast_assign(
__entry->obj = obj;
- __entry->offset = obj->gtt_space->start;
- __entry->size = obj->gtt_space->size;
+ __entry->offset = obj->gtt_space.start;
+ __entry->size = obj->gtt_space.size;
),
TP_printk("obj=%p, offset=%08x size=%x",
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> --- drivers/gpu/drm/i915/i915_debugfs.c | 16 +++--- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 75 +++++++++++---------------- drivers/gpu/drm/i915/i915_gem_evict.c | 6 +- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 8 ++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 10 ++-- drivers/gpu/drm/i915/i915_gem_tiling.c | 4 +- drivers/gpu/drm/i915/i915_trace.h | 8 ++-- 8 files changed, 58 insertions(+), 71 deletions(-)