diff mbox

[5/6] drm/ttm: cope with reserved buffers on lru list in ttm_mem_evict_first, v2

Message ID 1354101944-10455-5-git-send-email-maarten.lankhorst@canonical.com (mailing list archive)
State New, archived
Headers show

Commit Message

Maarten Lankhorst Nov. 28, 2012, 11:25 a.m. UTC
Replace the goto loop with a simple for each loop, and only run the
delayed destroy cleanup if we can reserve the buffer first.

No race occurs, since lru lock is never dropped any more. An empty list
and a list full of unreservable buffers both cause -EBUSY to be returned,
which is identical to the previous situation, because previously buffers
on the lru list were always guaranteed to be reservable.

This should work since currently ttm guarantees items on the lru are
always reservable, and reserving items blockingly with some bo held
are enough to cause you to run into a deadlock.

Currently this is not a concern since removal off the lru list and
reservations are always done with atomically, but when this guarantee
no longer holds, we have to handle this situation or end up with
possible deadlocks.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 42 +++++++++++-------------------------------
 1 file changed, 11 insertions(+), 31 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 74b296f..ef7b2ad 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -793,49 +793,29 @@  static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 	struct ttm_bo_global *glob = bdev->glob;
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 	struct ttm_buffer_object *bo;
-	int ret, put_count = 0;
+	int ret = -EBUSY, put_count;
 
-retry:
 	spin_lock(&glob->lru_lock);
-	if (list_empty(&man->lru)) {
+	list_for_each_entry(bo, &man->lru, lru) {
+		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+		if (!ret)
+			break;
+	}
+
+	if (ret) {
 		spin_unlock(&glob->lru_lock);
-		return -EBUSY;
+		return ret;
 	}
 
-	bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
 	kref_get(&bo->list_kref);
 
 	if (!list_empty(&bo->ddestroy)) {
-		ret = ttm_bo_reserve_locked(bo, interruptible, no_wait_reserve, false, 0);
-		if (!ret)
-			ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
-							     no_wait_gpu);
-		else
-			spin_unlock(&glob->lru_lock);
-
+		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+						     no_wait_gpu);
 		kref_put(&bo->list_kref, ttm_bo_release_list);
-
 		return ret;
 	}
 
-	ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
-	if (unlikely(ret == -EBUSY)) {
-		spin_unlock(&glob->lru_lock);
-		if (likely(!no_wait_reserve))
-			ret = ttm_bo_wait_unreserved(bo, interruptible);
-
-		kref_put(&bo->list_kref, ttm_bo_release_list);
-
-		/**
-		 * We *need* to retry after releasing the lru lock.
-		 */
-
-		if (unlikely(ret != 0))
-			return ret;
-		goto retry;
-	}
-
 	put_count = ttm_bo_del_from_lru(bo);
 	spin_unlock(&glob->lru_lock);