@@ -655,23 +655,6 @@ void xe_device_assert_mem_access(struct xe_device *xe)
xe_assert(xe, !xe_pm_runtime_suspended(xe));
}
-bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe)
-{
- bool active;
-
- if (xe_pm_read_callback_task(xe) == current)
- return true;
-
- active = xe_pm_runtime_get_if_active(xe);
- if (active) {
- int ref = atomic_inc_return(&xe->mem_access.ref);
-
- xe_assert(xe, ref != S32_MAX);
- }
-
- return active;
-}
-
void xe_device_mem_access_get(struct xe_device *xe)
{
int ref;
@@ -134,7 +134,6 @@ static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
}
void xe_device_mem_access_get(struct xe_device *xe);
-bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe);
void xe_device_mem_access_put(struct xe_device *xe);
void xe_device_assert_mem_access(struct xe_device *xe);
@@ -1203,7 +1203,7 @@ void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
bool ongoing;
int len;
- ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
+ ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
return;
@@ -1216,7 +1216,7 @@ void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
spin_unlock(&ct->fast_lock);
if (ongoing)
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
}
/* Returns less than zero on error, 0 on done, 1 on more available */
@@ -1273,7 +1273,7 @@ static void g2h_worker_func(struct work_struct *w)
* responses, if the worker here is blocked on those callbacks
* completing, creating a deadlock.
*/
- ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
+ ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
return;
@@ -1292,7 +1292,7 @@ static void g2h_worker_func(struct work_struct *w)
} while (ret == 1);
if (ongoing)
- xe_device_mem_access_put(ct_to_xe(ct));
+ xe_pm_runtime_put(ct_to_xe(ct));
}
static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,