@@ -344,7 +344,7 @@ first_hole(struct drm_mm *mm,
u64 start, u64 end, u64 size,
enum drm_mm_insert_mode mode)
{
- switch (mode) {
+ switch (mode & DRM_MM_INSERT_MODE) {
default:
case DRM_MM_INSERT_BEST:
return best_hole(mm, size);
@@ -367,12 +367,17 @@ next_hole(struct drm_mm *mm,
struct drm_mm_node *node,
enum drm_mm_insert_mode mode)
{
+ if (mode & DRM_MM_INSERT_ONCE)
+ return NULL; /* check only the first hit */
+
/* Searching is slow; check if we ran out of time/patience */
- cond_resched();
- if (fatal_signal_pending(current))
- return NULL;
+ if (mode & DRM_MM_INSERT_INTERRUPTIBLE) {
+ cond_resched();
+ if (fatal_signal_pending(current))
+ return NULL;
+ }
- switch (mode) {
+ switch (mode & DRM_MM_INSERT_MODE) {
default:
case DRM_MM_INSERT_BEST:
return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
@@ -476,7 +481,6 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
{
struct drm_mm_node *hole;
u64 remainder_mask;
- bool once;
DRM_MM_BUG_ON(range_start > range_end);
@@ -489,13 +493,10 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
if (alignment <= 1)
alignment = 0;
- once = mode & DRM_MM_INSERT_ONCE;
- mode &= ~DRM_MM_INSERT_ONCE;
-
remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
for (hole = first_hole(mm, range_start, range_end, size, mode);
hole;
- hole = once ? NULL : next_hole(mm, hole, mode)) {
+ hole = next_hole(mm, hole, mode)) {
u64 hole_start = __drm_mm_hole_node_start(hole);
u64 hole_end = hole_start + hole->hole_size;
u64 adj_start, adj_end;
@@ -1057,7 +1057,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
(&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
+ DRM_MM_INSERT_LOW |
+ DRM_MM_INSERT_INTERRUPTIBLE);
mutex_unlock(&ggtt->vm.mutex);
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
@@ -43,7 +43,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
mutex_lock(&i915->mm.stolen_lock);
ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
size, alignment, 0,
- start, end, DRM_MM_INSERT_BEST);
+ start, end,
+ DRM_MM_INSERT_BEST |
+ DRM_MM_INSERT_INTERRUPTIBLE);
mutex_unlock(&i915->mm.stolen_lock);
return ret;
@@ -69,7 +69,8 @@ insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
size, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
+ DRM_MM_INSERT_LOW |
+ DRM_MM_INSERT_INTERRUPTIBLE);
mutex_unlock(&ggtt->vm.mutex);
@@ -232,7 +232,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
err = drm_mm_insert_node_in_range(&vm->mm, node,
size, alignment, color,
- start, end, mode);
+ start, end,
+ mode | DRM_MM_INSERT_INTERRUPTIBLE);
if (err != -ENOSPC)
return err;
@@ -240,7 +241,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
err = drm_mm_insert_node_in_range(&vm->mm, node,
size, alignment, color,
start, end,
- DRM_MM_INSERT_BEST);
+ DRM_MM_INSERT_BEST |
+ DRM_MM_INSERT_INTERRUPTIBLE);
if (err != -ENOSPC)
return err;
}
@@ -288,7 +290,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
return drm_mm_insert_node_in_range(&vm->mm, node,
size, alignment, color,
- start, end, DRM_MM_INSERT_EVICT);
+ start, end,
+ DRM_MM_INSERT_EVICT |
+ DRM_MM_INSERT_INTERRUPTIBLE);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
@@ -141,7 +141,18 @@ enum drm_mm_insert_mode {
* Does not search all holes.
*/
DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
+
+ /**
+ * @DRM_MM_INSERT_INTERRUPTIBLE:
+ *
+ * Check for pending signals and allow rescheduling admist the
+ * search. In some heavily fragmented cases, searching for an available
+ * node of just the right size can take a long time, in which case it
+ * is better to let something else run during our fruitless search.
+ */
+ DRM_MM_INSERT_INTERRUPTIBLE = BIT(30),
};
+#define DRM_MM_INSERT_MODE GENMASK(29, 0) /* all but the special bits */
/**
* struct drm_mm_node - allocated block in the DRM allocator
Sometimes the drm_mm is searched from within an atomic context (yikes!) so we must be cautious and not insert a schedule() unless the caller indicates it is safe to do so. Closes: https://gitlab.freedesktop.org/drm/intel/issues/1509 Fixes: 7be1b9b8e9d1 ("drm/mm: Break long searches in fragmented address spaces") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Zbigniew KempczyĆski <zbigniew.kempczynski@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> --- drivers/gpu/drm/drm_mm.c | 21 ++++++++++--------- .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 3 ++- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 4 +++- drivers/gpu/drm/i915/i915_gem.c | 3 ++- drivers/gpu/drm/i915/i915_gem_gtt.c | 10 ++++++--- include/drm/drm_mm.h | 11 ++++++++++ 6 files changed, 36 insertions(+), 16 deletions(-)