@@ -66,6 +66,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->vma.list);
INIT_LIST_HEAD(&obj->mm.link);
+ INIT_LIST_HEAD(&obj->mm.region_link);
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->lut_lock);
@@ -79,6 +80,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
obj->flags = flags;
+ obj->mm.region = NULL;
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
@@ -16,6 +16,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ struct intel_memory_region *mem;
+ struct list_head *list;
int i;
assert_object_held_shared(obj);
@@ -64,7 +66,6 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
if (i915_gem_object_is_shrinkable(obj)) {
- struct list_head *list;
unsigned long flags;
assert_object_held(obj);
@@ -82,6 +83,18 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
atomic_set(&obj->mm.shrink_pin, 0);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+
+ mem = obj->mm.region;
+ if (mem) {
+ mutex_lock(&mem->objects.lock);
+ GEM_WARN_ON(!list_empty(&obj->mm.region_link));
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ list = &mem->objects.purgeable;
+ else
+ list = &mem->objects.list;
+ list_move_tail(&obj->mm.region_link, list);
+ mutex_unlock(&mem->objects.lock);
+ }
}
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
@@ -192,6 +205,7 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
+ struct intel_memory_region *mem = obj->mm.region;
struct sg_table *pages;
assert_object_held_shared(obj);
@@ -205,6 +219,12 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
i915_gem_object_make_unshrinkable(obj);
+ if (mem) {
+ mutex_lock(&mem->objects.lock);
+ list_del_init(&obj->mm.region_link);
+ mutex_unlock(&mem->objects.lock);
+ }
+
if (obj->mm.mapping) {
unmap_object(obj, page_mask_bits(obj->mm.mapping));
obj->mm.mapping = NULL;
@@ -105,30 +105,16 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
struct intel_memory_region *mem)
{
INIT_LIST_HEAD(&obj->mm.blocks);
+ WARN_ON(i915_gem_object_has_pages(obj));
obj->mm.region = intel_memory_region_get(mem);
if (obj->base.size <= mem->min_page_size)
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
-
- mutex_lock(&mem->objects.lock);
-
- if (obj->flags & I915_BO_ALLOC_VOLATILE)
- list_add(&obj->mm.region_link, &mem->objects.purgeable);
- else
- list_add(&obj->mm.region_link, &mem->objects.list);
-
- mutex_unlock(&mem->objects.lock);
}
void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
{
- struct intel_memory_region *mem = obj->mm.region;
-
- mutex_lock(&mem->objects.lock);
- list_del(&obj->mm.region_link);
- mutex_unlock(&mem->objects.lock);
-
- intel_memory_region_put(mem);
+ intel_memory_region_put(obj->mm.region);
}
struct drm_i915_gem_object *