@@ -692,19 +692,21 @@ EXPORT_SYMBOL(drm_mm_replace_node);
* The DRM range allocator supports this use-case through the scanning
* interfaces. First a scan operation needs to be initialized with
* drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
- * objects to the roaster (probably by walking an LRU list, but this can be
- * freely implemented) until a suitable hole is found or there's no further
- * evictable object.
+ * objects to the roster (probably by walking an LRU list, but this can be
+ * freely implemented) (using drm_mm_scan_add_block()) until a suitable hole
+ * is found or there are no further evictable objects.
*
* The driver must walk through all objects again in exactly the reverse
* order to restore the allocator state. Note that while the allocator is used
* in the scan mode no other operation is allowed.
*
- * Finally the driver evicts all objects selected in the scan. Adding and
- * removing an object is O(1), and since freeing a node is also O(1) the overall
- * complexity is O(scanned_objects). So like the free stack which needs to be
- * walked before a scan operation even begins this is linear in the number of
- * objects. It doesn't seem to hurt badly.
+ * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
+ * reported true) in the scan, and any overlapping nodes after color adjustment
+ * (drm_mm_scan_evict_color()). Adding and removing an object is O(1), and
+ * since freeing a node is also O(1) the overall complexity is
+ * O(scanned_objects). So like the free stack which needs to be walked before a
+ * scan operation even begins this is linear in the number of objects. It
+ * doesn't seem to hurt too badly.
*/
/**
@@ -829,23 +831,8 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
}
}
- if (mm->color_adjust) {
- /* If allocations need adjusting due to neighbouring colours,
- * we do not have enough information to decide if we need
- * to evict nodes on either side of [adj_start, adj_end].
- * What almost works is
- * hit_start = adj_start + (hole_start - col_start);
- * hit_end = adj_start + scan->size + (hole_end - col_end);
- * but because the decision is only made on the final hole,
- * we may underestimate the required adjustments for an
- * interior allocation.
- */
- scan->hit_start = hole_start;
- scan->hit_end = hole_end;
- } else {
- scan->hit_start = adj_start;
- scan->hit_end = adj_start + scan->size;
- }
+ scan->hit_start = adj_start;
+ scan->hit_end = adj_start + scan->size;
DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
DRM_MM_BUG_ON(scan->hit_start < hole_start);
@@ -903,6 +890,45 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
EXPORT_SYMBOL(drm_mm_scan_remove_block);
/**
+ * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
+ * @scan: drm_mm scan with target hole
+ *
+ * After completing an eviction scan and removing the selected nodes, we may
+ * need to remove a few more nodes from either side of the target hole if
+ * mm.color_adjust is being used.
+ *
+ * Returns:
+ * A node to evict, or NULL if there are no overlapping nodes.
+ */
+struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
+{
+ struct drm_mm *mm = scan->mm;
+ struct drm_mm_node *hole;
+ u64 hole_start, hole_end;
+
+ DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
+
+ if (!mm->color_adjust)
+ return NULL;
+
+ hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
+ hole_start = __drm_mm_hole_node_start(hole);
+ hole_end = __drm_mm_hole_node_end(hole);
+
+ DRM_MM_BUG_ON(hole_start > scan->hit_start);
+ DRM_MM_BUG_ON(hole_end < scan->hit_end);
+
+ mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
+ if (hole_start > scan->hit_start)
+ return hole;
+ if (hole_end < scan->hit_end)
+ return list_next_entry(hole, node_list);
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_mm_scan_color_evict);
+
+/**
* drm_mm_init - initialize a drm-mm allocator
* @mm: the drm_mm structure to initialize
* @start: start of the range managed by @mm
@@ -108,6 +108,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
NULL,
}, **phase;
struct i915_vma *vma, *next;
+ struct drm_mm_node *node;
int ret;
lockdep_assert_held(&vm->i915->drm.struct_mutex);
@@ -218,6 +219,12 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (ret == 0)
ret = i915_vma_unbind(vma);
}
+
+ while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
+ vma = container_of(node, struct i915_vma, node);
+ ret = i915_vma_unbind(vma);
+ }
+
return ret;
}
@@ -1162,6 +1162,7 @@ static bool evict_nodes(struct drm_mm_scan *scan,
struct evict_node *nodes,
unsigned int *order,
unsigned int count,
+ bool use_color,
struct list_head *evict_list)
{
struct evict_node *e, *en;
@@ -1186,6 +1187,21 @@ static bool evict_nodes(struct drm_mm_scan *scan,
list_for_each_entry(e, evict_list, link)
drm_mm_remove_node(&e->node);
+ if (use_color) {
+ struct drm_mm_node *node;
+
+ while ((node = drm_mm_scan_color_evict(scan))) {
+ e = container_of(node, typeof(*e), node);
+ drm_mm_remove_node(&e->node);
+ list_add(&e->link, evict_list);
+ }
+ } else {
+ if (drm_mm_scan_color_evict(scan)) {
+ pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
+ return false;
+ }
+ }
+
return true;
}
@@ -1299,7 +1315,7 @@ static int evict_something(struct drm_mm *mm,
range_start, range_end,
mode->create_flags);
if (!evict_nodes(&scan,
- nodes, order, count,
+ nodes, order, count, false,
&evict_list))
return -EINVAL;
@@ -1878,7 +1894,7 @@ static int evict_color(struct drm_mm *mm,
range_start, range_end,
mode->create_flags);
if (!evict_nodes(&scan,
- nodes, order, count,
+ nodes, order, count, true,
&evict_list))
return -EINVAL;
@@ -422,6 +422,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
struct drm_mm_node *node);
bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
struct drm_mm_node *node);
+struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix);
#ifdef CONFIG_DEBUG_FS