@@ -935,6 +935,16 @@ void intel_rps_park(struct intel_rps *rps)
GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
}
+void intel_rps_update_waiters(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+ if (rps_uses_slpc(rps))
+ intel_guc_slpc_update_waiters(slpc);
+ else
+ atomic_dec(&rps->num_waiters);
+}
+
void intel_rps_boost(struct i915_request *rq)
{
if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
@@ -944,6 +954,9 @@ void intel_rps_boost(struct i915_request *rq)
if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
+ if (rps_uses_slpc(rps))
+ return intel_guc_slpc_boost(rps_to_slpc(rps));
+
if (atomic_fetch_inc(&rps->num_waiters))
return;
@@ -23,6 +23,7 @@ void intel_rps_disable(struct intel_rps *rps);
void intel_rps_park(struct intel_rps *rps);
void intel_rps_unpark(struct intel_rps *rps);
void intel_rps_boost(struct i915_request *rq);
+void intel_rps_update_waiters(struct intel_rps *rps);
int intel_rps_set(struct intel_rps *rps, u8 val);
void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
@@ -598,6 +598,34 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
return 0;
}
+void intel_guc_slpc_boost(struct intel_guc_slpc *slpc)
+{
+ /* Raise min freq to boost. It's possible that
+ * this is greater than current max. But it will
+ * certainly be limited by RP0. An error setting
+ * the min param is not fatal.
+ */
+ if (!slpc->num_waiters)
+ slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ slpc->boost_freq);
+
+ slpc->num_waiters++;
+}
+
+void intel_guc_slpc_update_waiters(struct intel_guc_slpc *slpc)
+{
+ /* Return min back to the softlimit.
+ * This is called during request retire,
+ * so we don't need to fail that if the
+ * set_param fails.
+ */
+ if (!(--slpc->num_waiters))
+ slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ slpc->min_freq_softlimit);
+}
+
int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
@@ -38,5 +38,7 @@ int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val);
int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val);
int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p);
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
+void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
+void intel_guc_slpc_update_waiters(struct intel_guc_slpc *slpc);
#endif
@@ -339,7 +339,7 @@ bool i915_request_retire(struct i915_request *rq)
}
if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
- atomic_dec(&rq->engine->gt->rps.num_waiters);
+ intel_rps_update_waiters(&rq->engine->gt->rps);
/*
* We only loosely track inflight requests across preemption,
Add helpers in RPS code for handling SLPC and non-SLPC cases. When a boost is requested in the SLPC case, we can ask GuC to ramp up the frequency by setting the minimum frequency to RP0. Reset the frequency back to the min softlimit when there are no more waiters. Signed-off-by: Vinay Belgaumkar <vinay.belgaumkar@intel.com> --- drivers/gpu/drm/i915/gt/intel_rps.c | 13 ++++++++++ drivers/gpu/drm/i915/gt/intel_rps.h | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c | 28 +++++++++++++++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h | 2 ++ drivers/gpu/drm/i915/i915_request.c | 2 +- 5 files changed, 45 insertions(+), 1 deletion(-)