@@ -994,7 +994,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
complete(&gpu->event_free);
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
- gpu->completed_fence = gpu->active_fence;
etnaviv_gpu_hw_init(gpu);
gpu->lastctx = NULL;
@@ -1306,8 +1305,6 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock;
}
- gpu->active_fence = gpu_fence->seqno;
-
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
kref_get(&submit->refcount);
@@ -1806,8 +1803,8 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
u32 idle, mask;
- /* If we have outstanding fences, we're not idle */
- if (gpu->completed_fence != gpu->active_fence)
+ /* If there are any jobs in the HW queue, we're not idle */
+ if (atomic_read(&gpu->sched.hw_rq_count))
return -EBUSY;
/* Check whether the hardware (except FE) is idle */
@@ -121,7 +121,6 @@ struct etnaviv_gpu {
struct mutex fence_lock;
struct idr fence_idr;
u32 next_fence;
- u32 active_fence;
u32 completed_fence;
wait_queue_head_t fence_event;
u64 fence_context;
There is no need to track the currently active fence. The GPU scheduler keeps track of all the in-flight jobs. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> --- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 7 ++----- drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 1 - 2 files changed, 2 insertions(+), 6 deletions(-)