@@ -183,44 +183,48 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_resource_free(bo, &bo->resource);
}
-static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
+static void ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
{
- int r;
-
- if (bo->base.resv == &bo->base._resv)
- return 0;
+ if (bo->base.resv == &bo->base._resv || bo->type == ttm_bo_type_sg)
+ return;
BUG_ON(!dma_resv_trylock(&bo->base._resv));
- r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
+ if (dma_resv_copy_fences(&bo->base._resv, bo->base.resv)) {
+ /* Opportunistic trylock to block new fence additions */
+ bool locked = dma_resv_trylock(bo->base.resv);
- if (!r && bo->type != ttm_bo_type_sg) {
- /*
- * The TTM bo refcount is now zero and hence nobody will
- * therefore try to lock the bo at this point: the LRU
- * list lookups will trylock even if the refcount is zero,
- * but will only do that under the LRU lock and will
- * then immediately back off under the same LRU lock when it
- * sees the zero refcount.
- */
- spin_lock(&bo->bdev->lru_lock);
- bo->base.resv = &bo->base._resv;
+ /* Last resort if memory allocation failed for fence copying */
+ dma_resv_wait_timeout(bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, false,
+ 30 * HZ);
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+ }
- /* Since bulk move is closely tied with the shared resv,
- * clear it when we have now individualized, if that was not
- * done by the driver already.
- */
- if (bo->bulk_move) {
- if (bo->resource)
- ttm_resource_del_bulk_move(bo->resource, bo);
- bo->bulk_move = NULL;
- }
- spin_unlock(&bo->bdev->lru_lock);
+ /*
+ * The TTM bo refcount is now zero and hence nobody will
+ * therefore try to lock the bo at this point: the LRU
+ * list lookups will trylock even if the refcount is zero,
+ * but will only do that under the LRU lock and will
+ * then immediately back off under the same LRU lock when it
+ * sees the zero refcount.
+ */
+ spin_lock(&bo->bdev->lru_lock);
+ bo->base.resv = &bo->base._resv;
+
+ /* Since bulk move is closely tied with the shared resv,
+ * clear it when we have now individualized, if that was not
+ * done by the driver already.
+ */
+ if (bo->bulk_move) {
+ if (bo->resource)
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ bo->bulk_move = NULL;
}
+ spin_unlock(&bo->bdev->lru_lock);
dma_resv_unlock(&bo->base._resv);
-
- return r;
}
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
@@ -334,21 +338,11 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_buffer_object *bo =
container_of(kref, struct ttm_buffer_object, kref);
struct ttm_device *bdev = bo->bdev;
- int ret;
WARN_ON_ONCE(bo->pin_count);
if (!bo->deleted) {
- ret = ttm_bo_individualize_resv(bo);
- if (ret) {
- /* Last resort, if we fail to allocate memory for the
- * fences block for the BO to become idle
- */
- dma_resv_wait_timeout(bo->base.resv,
- DMA_RESV_USAGE_BOOKKEEP, false,
- 30 * HZ);
- }
-
+ ttm_bo_individualize_resv(bo);
WARN_ON_ONCE(bo->bulk_move);
if (bo->bdev->funcs->release_notify)
Even if fence copying fails, individualize the resv after the wait. If fence copying does fail, opportunistically trylock the vm's resv to attempt to limit the chance of starvation. Exit individdulizing earlier if the bo type is ttm_bo_type_sg. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> --- drivers/gpu/drm/ttm/ttm_bo.c | 74 +++++++++++++++++------------------- 1 file changed, 34 insertions(+), 40 deletions(-)