@@ -1183,15 +1183,20 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
u64 total, gt_stamp_saved;
unsigned long flags;
u32 reset_count;
+ bool in_reset;
spin_lock_irqsave(&guc->timestamp.lock, flags);
/*
- * If a reset happened, we risk reading partially updated
- * engine busyness from GuC, so we just use the driver stored
- * copy of busyness. Synchronize with gt reset using reset_count.
+ * If a reset happened, we risk reading partially updated engine
+ * busyness from GuC, so we just use the driver stored copy of busyness.
+ * Synchronize with gt reset using reset_count and the
+ * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count
+ * after I915_RESET_BACKOFF flag, so ensure that the reset_count is
+ * usable by checking the flag afterwards.
*/
reset_count = i915_reset_count(gpu_error);
+ in_reset = test_bit(I915_RESET_BACKOFF, >->reset.flags);
*now = ktime_get();
@@ -1201,9 +1206,9 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
* start_gt_clk is derived from GuC state. To get a consistent
* view of activity, we query the GuC state only if gt is awake.
*/
- stats_saved = *stats;
- gt_stamp_saved = guc->timestamp.gt_stamp;
- if (intel_gt_pm_get_if_awake(gt)) {
+ if (intel_gt_pm_get_if_awake(gt) && !in_reset) {
+ stats_saved = *stats;
+ gt_stamp_saved = guc->timestamp.gt_stamp;
guc_update_engine_gt_clks(engine);
guc_update_pm_timestamp(guc, engine, now);
intel_gt_pm_put_async(gt);