@@ -237,7 +237,6 @@ struct drm_i915_gem_object {
* region->obj_lock.
*/
struct list_head region_link;
- struct list_head tmp_link;
struct sg_table *pages;
void *mapping;
@@ -272,64 +272,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
return freed;
}
-int i915_gem_shrink_memory_region(struct intel_memory_region *mem,
- resource_size_t target)
-{
- struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
- resource_size_t purged;
- LIST_HEAD(purgeable);
- int err = -ENOSPC;
-
- intel_gt_retire_requests(&i915->gt);
-
- purged = 0;
-
- mutex_lock(&mem->objects.lock);
-
- while ((obj = list_first_entry_or_null(&mem->objects.purgeable,
- typeof(*obj),
- mm.region_link))) {
- list_move_tail(&obj->mm.region_link, &purgeable);
-
- if (!i915_gem_object_has_pages(obj))
- continue;
-
- if (i915_gem_object_is_framebuffer(obj))
- continue;
-
- if (!kref_get_unless_zero(&obj->base.refcount))
- continue;
-
- mutex_unlock(&mem->objects.lock);
-
- if (!i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE)) {
- if (i915_gem_object_trylock(obj)) {
- __i915_gem_object_put_pages(obj);
- if (!i915_gem_object_has_pages(obj)) {
- purged += obj->base.size;
- if (!i915_gem_object_is_volatile(obj))
- obj->mm.madv = __I915_MADV_PURGED;
- }
- i915_gem_object_unlock(obj);
- }
- }
-
- i915_gem_object_put(obj);
-
- mutex_lock(&mem->objects.lock);
-
- if (purged >= target) {
- err = 0;
- break;
- }
- }
-
- list_splice_tail(&purgeable, &mem->objects.purgeable);
- mutex_unlock(&mem->objects.lock);
- return err;
-}
-
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
@@ -31,7 +31,5 @@ void i915_gem_driver_register__shrinker(struct drm_i915_private *i915);
void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915);
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
struct mutex *mutex);
-int i915_gem_shrink_memory_region(struct intel_memory_region *mem,
- resource_size_t target);
#endif /* __I915_GEM_SHRINKER_H__ */
@@ -1008,12 +1008,12 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
switch (obj->mm.madv) {
case I915_MADV_WILLNEED:
- list_move(&obj->mm.region_link,
- &obj->mm.region->objects.list);
+ list_move_tail(&obj->mm.region_link,
+ &obj->mm.region->objects.list);
break;
default:
- list_move(&obj->mm.region_link,
- &obj->mm.region->objects.purgeable);
+ list_move_tail(&obj->mm.region_link,
+ &obj->mm.region->objects.purgeable);
break;
}
@@ -3,6 +3,7 @@
* Copyright © 2019 Intel Corporation
*/
+#include "gt/intel_gt_requests.h"
#include "intel_memory_region.h"
#include "i915_drv.h"
@@ -94,6 +95,90 @@ __intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
__intel_memory_region_put_pages_buddy(block->private, &blocks);
}
+static int intel_memory_region_evict(struct intel_memory_region *mem,
+ resource_size_t target)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct list_head still_in_list;
+ struct drm_i915_gem_object *obj;
+ struct list_head *phases[] = {
+ &mem->objects.purgeable,
+ &mem->objects.list,
+ NULL,
+ };
+ struct list_head **phase;
+ resource_size_t found;
+ int pass;
+
+ intel_gt_retire_requests(&i915->gt);
+
+ found = 0;
+ pass = 0;
+ phase = phases;
+
+next:
+ INIT_LIST_HEAD(&still_in_list);
+ mutex_lock(&mem->objects.lock);
+
+ while (found < target &&
+ (obj = list_first_entry_or_null(*phase,
+ typeof(*obj),
+ mm.region_link))) {
+ list_move_tail(&obj->mm.region_link, &still_in_list);
+
+ if (!i915_gem_object_has_pages(obj))
+ continue;
+
+ if (i915_gem_object_is_framebuffer(obj))
+ continue;
+
+ /*
+ * For IOMEM region, only swap user space objects.
+ * kernel objects are bound and causes a lot of unbind
+ * warning message in driver.
+ * FIXME: swap kernel object as well.
+ */
+ if (i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)
+ && !obj->base.handle_count)
+ continue;
+
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ mutex_unlock(&mem->objects.lock);
+
+ if (!i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE)) {
+ if (i915_gem_object_trylock(obj)) {
+ __i915_gem_object_put_pages(obj);
+ /* May arrive from get_pages on another bo */
+ if (!i915_gem_object_has_pages(obj)) {
+ found += obj->base.size;
+ if (obj->mm.madv == I915_MADV_DONTNEED)
+ obj->mm.madv = __I915_MADV_PURGED;
+ }
+ i915_gem_object_unlock(obj);
+ }
+ }
+
+ i915_gem_object_put(obj);
+ mutex_lock(&mem->objects.lock);
+
+ if (found >= target)
+ break;
+ }
+ list_splice_tail(&still_in_list, *phase);
+ mutex_unlock(&mem->objects.lock);
+
+ if (found < target) {
+ pass++;
+ phase++;
+ if (*phase)
+ goto next;
+ }
+
+ return (found < target) ? -ENOSPC : 0;
+}
+
int
__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
resource_size_t size,
@@ -137,7 +222,7 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
do {
struct i915_buddy_block *block;
unsigned int order;
- bool retry = true;
+
retry:
order = min_t(u32, (fls(n_pages) - 1), max_order);
GEM_BUG_ON(order > mem->mm.max_order);
@@ -152,19 +237,15 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
resource_size_t target;
int err;
- if (!retry)
- goto err_free_blocks;
-
target = n_pages * mem->mm.chunk_size;
mutex_unlock(&mem->mm_lock);
- err = i915_gem_shrink_memory_region(mem,
- target);
+ err = intel_memory_region_evict(mem,
+ target);
mutex_lock(&mem->mm_lock);
if (err)
goto err_free_blocks;
- retry = false;
goto retry;
}
} while (1);
@@ -1093,7 +1093,8 @@ static void igt_mark_evictable(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_pages(obj);
obj->mm.madv = I915_MADV_DONTNEED;
- list_move(&obj->mm.region_link, &obj->mm.region->objects.purgeable);
+ list_move_tail(&obj->mm.region_link,
+ &obj->mm.region->objects.purgeable);
}
static int igt_mock_shrink(void *arg)