@@ -587,50 +587,148 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
return r == -EDEADLK ? -EBUSY : r;
}
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
+struct ttm_mem_evict_ctx {
+ const struct ttm_place *place;
+ struct ttm_operation_ctx *ctx;
+ struct ww_acquire_ctx *ticket;
+};
+
+/**
+ * ttm_mem_evict_allowable
+ *
+ * @lru_entity: The struct ttm_resource::lru_entity when this resource is
+ * added to drm lru list.
+ * @place: The preferred ttm placement where we want to evict memory for
+ * more memory space. If the current ttm_resource doesn't match the preferred
+ * placement, then there is no need to evict the current resource.
+ * @ctx: ttm operation context
+ * @ticket: dma reservation's context used to lock resource
+ * @busy: used to return whether the current resource is busy (i.e., locked
+ * by other clients)
+ * @locked: used to return whether this resource is locked during this check,
+ * i.e., successfully trylocked bo's dma reservation object
+ *
+ * Check whether we are allowed to evict a memory resource. Return true if we
+ * are allowed to evict resource; otherwise false.
+ *
+ * When this function returns true, a resource reference counter (bo's reference)
+ * is hold. This reference counter need to be released after evict operation later
+ * on.
+ *
+ * This function should be called with lru_lock hold.
+ */
+bool ttm_mem_evict_allowable(struct drm_lru_entity *lru_entity,
+ const struct drm_lru_evict_ctx *lru_evict_ctx,
+ bool *busy, bool *locked)
{
- struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct drm_lru_cursor cursor;
struct ttm_resource *res;
- struct drm_lru_entity *entity;
- bool locked = false;
- int ret;
+ struct ttm_buffer_object *bo = NULL;
+ struct ttm_device *bdev;
+ const struct ttm_place *place;
+ struct ttm_operation_ctx *ctx;
+ struct ww_acquire_ctx *ticket;
+ struct ttm_mem_evict_ctx *evict_ctx;
- spin_lock(bdev->lru_lock);
- drm_lru_for_each_entity(man->lru_mgr, &cursor, entity) {
- bool busy;
+ evict_ctx = (struct ttm_mem_evict_ctx *)lru_evict_ctx;
+ place = evict_ctx->place;
+ ctx = evict_ctx->ctx;
+ ticket = evict_ctx->ticket;
- res = container_of(entity, struct ttm_resource, lru_entity);
- if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
- &locked, &busy)) {
- if (busy && !busy_bo && ticket !=
- dma_resv_locking_ctx(res->bo->base.resv))
- busy_bo = res->bo;
- continue;
- }
+ res = container_of(lru_entity, struct ttm_resource, lru_entity);
+ bo = res->bo;
+ bdev = bo->bdev;
- if (ttm_bo_get_unless_zero(res->bo)) {
- bo = res->bo;
- break;
- }
- if (locked)
- dma_resv_unlock(res->bo->base.resv);
- }
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, place, locked, busy)) {
+ if (busy && ticket != dma_resv_locking_ctx(bo->base.resv))
+ *busy = true;
- if (!bo) {
- if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
- busy_bo = NULL;
- spin_unlock(bdev->lru_lock);
- ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
- if (busy_bo)
- ttm_bo_put(busy_bo);
- return ret;
+ return false;
}
+ if (ttm_bo_get_unless_zero(bo))
+ return true;
+
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+
+ return false;
+}
+
+/**
+ * ttm_mem_evict_busy_entity
+ *
+ * @lru_entity: The struct ttm_resource::lru_entity when this resource is
+ * added to drm lru list.
+ * @ctx: ttm operation context
+ * @ticket: dma reservation's context used to lock resource
+ *
+ * Evict a busy memory resource.
+ * This function should be called with lru_lock hold.
+ */
+int ttm_mem_evict_busy_entity(struct drm_lru_entity *lru_entity,
+ const struct drm_lru_evict_ctx *lru_evict_ctx)
+{
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo = NULL;
+ struct ttm_device *bdev;
+ int ret;
+ struct ttm_operation_ctx *ctx;
+ struct ww_acquire_ctx *ticket;
+ struct ttm_mem_evict_ctx *evict_ctx;
+
+ evict_ctx = (struct ttm_mem_evict_ctx *)lru_evict_ctx;
+ ctx = evict_ctx->ctx;
+ ticket = evict_ctx->ticket;
+
+ res = container_of(lru_entity, struct ttm_resource, lru_entity);
+ bo = res->bo;
+ bdev = bo->bdev;
+
+ if (bo && !ttm_bo_get_unless_zero(bo))
+ bo = NULL;
+ spin_unlock(bdev->lru_lock);
+ ret = ttm_mem_evict_wait_busy(bo, ctx, ticket);
+ /* FIXME: this is code copied originally from ttm_mem_evict_first.
+ * 1) Shouldn't we ttm_bo_evict this bo also? Otherwise how can we
+ * make any memory space?
+ * 2) We also need to check whether this busy entity is in the same
+ * ttm_place as specified in lru_evict_ctx::place; if not, there is
+ * no help to wait this busy entity.
+ */
+ if (bo)
+ ttm_bo_put(bo);
+
+ return ret;
+}
+
+/**
+ * @lru_entity: The struct ttm_resource::lru_entity when this resource is
+ * added to drm lru list.
+ * @ctx: ttm operation context
+ * @locked: whether this resource is dma-reserved (if reserved, we need to
+ * unreserve it in this function)
+ *
+ * Evict a memory resource corresponding to a lru_entity. This should be
+ * called holding lru_lock
+ *
+ */
+int ttm_mem_evict_entity(struct drm_lru_entity *lru_entity,
+ const struct drm_lru_evict_ctx *lru_evict_ctx, bool locked)
+{
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo = NULL;
+ struct ttm_device *bdev;
+ int ret;
+ struct ttm_operation_ctx *ctx;
+ struct ttm_mem_evict_ctx *evict_ctx;
+
+ evict_ctx = (struct ttm_mem_evict_ctx *)lru_evict_ctx;
+ ctx = evict_ctx->ctx;
+
+ res = container_of(lru_entity, struct ttm_resource, lru_entity);
+ bo = res->bo;
+ bdev = bo->bdev;
+
if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
ctx->no_wait_gpu, locked);
@@ -650,6 +748,28 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
return ret;
}
+struct drm_lru_evict_func ttm_evict_func = {
+ .evict_allowable = ttm_mem_evict_allowable,
+ .evict_busy_entity = ttm_mem_evict_busy_entity,
+ .evict_entity = ttm_mem_evict_entity
+};
+EXPORT_SYMBOL(ttm_evict_func);
+
+int ttm_mem_evict_first(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ const struct ttm_place *place,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket)
+{
+ struct drm_lru_evict_ctx evict_ctx = {
+ .data1 = place,
+ .data2 = ctx,
+ .data3 = ticket
+ };
+
+ return drm_lru_evict_first(man->lru_mgr, &evict_ctx);
+}
+
/**
* ttm_bo_pin - Pin the buffer object.
* @bo: The buffer object to pin
@@ -50,6 +50,7 @@ struct ttm_place;
struct ttm_resource;
struct ttm_resource_manager;
struct ttm_tt;
+struct drm_lru_evict_func;
/**
* enum ttm_bo_type
@@ -424,4 +425,5 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
pgprot_t tmp);
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
+extern struct drm_lru_evict_func ttm_evict_func;
#endif
Implement ttm_mem_evict_valuable, ttm_mem_evict_entity and ttm_mem_evict_busy_entity. Those are callback functions from drm lru manager. Register those functions during drm lru entity initialization. Those 3 functions are splitted from original ttm_mem_evict_first function. Reimplemented ttm_mem_evict_first function using drm_lru_evict_first function. For now, drm_lru_evict_first just calls back to above 3 functions which are splitted from ttm_mem_evict_first function, so there is no function change. In the future, when SVM code is added, drm_lru_evict_first function can also calls into SVM resource eviction functions, thus TTM and SVM can mutually evict resources from each other. Signed-off-by: Oak Zeng <oak.zeng@intel.com> --- drivers/gpu/drm/ttm/ttm_bo.c | 192 ++++++++++++++++++++++++++++------- include/drm/ttm/ttm_bo.h | 2 + 2 files changed, 158 insertions(+), 36 deletions(-)