@@ -51,6 +51,7 @@ static int live_gt_resume(void *arg)
int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_rc6_busy),
SUBTEST(live_rc6_manual),
SUBTEST(live_gt_resume),
};
@@ -11,6 +11,7 @@
#include "selftest_rc6.h"
#include "selftests/i915_random.h"
+#include "selftests/igt_spinner.h"
int live_rc6_manual(void *arg)
{
@@ -202,3 +203,114 @@ int live_rc6_ctx_wa(void *arg)
kfree(engines);
return err;
}
+
+static u32 measure_rc6(struct intel_uncore *uncore, u32 interval)
+{
+ u32 ec1, ec2;
+
+ ec1 = intel_uncore_read(uncore, GEN6_GT_GFX_RC6);
+
+ /*
+ * It's not important to precisely wait the interval time.
+ * I'll wait at least twice the time in order to be sure
+ * that the counting happens in the reference counter.
+ */
+ msleep(interval);
+
+ ec2 = intel_uncore_read(uncore, GEN6_GT_GFX_RC6);
+
+ pr_info("interval:%x [%dms], threshold:%x, rc6:%x\n",
+ intel_uncore_read(uncore, GEN6_RC_EVALUATION_INTERVAL),
+ interval,
+ intel_uncore_read(uncore, GEN6_RC6_THRESHOLD),
+ ec2 - ec1);
+
+ /* paranoia? ec2 is always supposed to be bigger */
+ return (ec2 >= ec1) ? ec2 - ec1 : 0;
+}
+
+static bool is_rc6_active(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ intel_wakeref_t wakeref;
+ u32 interval;
+
+ wakeref = intel_runtime_pm_get(uncore->rpm);
+
+ interval = intel_uncore_read(uncore, GEN6_RC_EVALUATION_INTERVAL);
+
+ /*
+ * the interval is stored in steps of 1.28us
+ */
+ interval = div_u64(mul_u32_u32(interval, 128),
+ 100 * 1000); /* => milliseconds */
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+
+ return !!measure_rc6(uncore, 2 * interval);
+}
+
+int live_rc6_busy(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rc6 *rc6 = >->rc6;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ intel_wakeref_t wakeref;
+ enum intel_engine_id id;
+ int err;
+
+ if (!rc6->supported)
+ return 0;
+
+ err = igt_spinner_init(&spin, gt);
+ if (err)
+ return err;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ igt_wait_for_spinner(&spin, rq); /* it's enough waiting */
+
+ /* gpu is busy, we shouldn't be in rc6 */
+ if (is_rc6_active(rc6)) {
+ pr_err("%s: never busy enough for having a nap\n",
+ engine->name);
+ err = -EINVAL;
+ }
+
+ igt_spinner_end(&spin);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ intel_gt_wait_for_idle(gt, HZ / 5);
+ intel_gt_pm_wait_for_idle(gt);
+
+ /* gpu is idle, we should be in rc6 */
+ if (!is_rc6_active(rc6)) {
+ pr_err("%s is idle but doesn't go in rc6\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+ }
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+
+ igt_spinner_fini(&spin);
+ return err;
+}
@@ -7,6 +7,7 @@
#ifndef SELFTEST_RC6_H
#define SELFTEST_RC6_H
+int live_rc6_busy(void *arg);
int live_rc6_ctx_wa(void *arg);
int live_rc6_manual(void *arg);