@@ -51,6 +51,9 @@ static int live_gt_resume(void *arg)
int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_rc6_basic),
+ SUBTEST(live_rc6_threshold),
+ SUBTEST(live_rc6_busy),
SUBTEST(live_rc6_manual),
SUBTEST(live_gt_resume),
};
@@ -11,6 +11,7 @@
#include "selftest_rc6.h"
#include "selftests/i915_random.h"
+#include "selftests/igt_spinner.h"
int live_rc6_manual(void *arg)
{
@@ -202,3 +203,192 @@ int live_rc6_ctx_wa(void *arg)
kfree(engines);
return err;
}
+
+static bool test_rc6(struct intel_rc6 *rc6, bool enabled)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ intel_wakeref_t wakeref;
+ u32 ec1, ec2;
+ u32 interval;
+
+ wakeref = intel_runtime_pm_get(uncore->rpm);
+
+ interval = intel_uncore_read(uncore, GEN6_RC_EVALUATION_INTERVAL);
+
+ /*
+ * the interval is stored in steps of 1.28us
+ */
+ interval = div_u64(mul_u32_u32(interval, 128),
+ 100 * 1000); /* => miliseconds */
+
+ ec1 = intel_uncore_read(uncore, GEN6_GT_GFX_RC6);
+
+ /*
+ * It's not important to precisely wait the interval time.
+ * I'll wait at least twice the time in order to be sure
+ * that the counting happens in the reference counter.
+ */
+ msleep(2 * interval);
+
+ ec2 = intel_uncore_read(uncore, GEN6_GT_GFX_RC6);
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+
+ return enabled != (ec1 >= ec2);
+}
+
+int live_rc6_basic(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rc6 *rc6 = >->rc6;
+ intel_wakeref_t wakeref;
+ int i, err = 0;
+
+ if (!rc6->supported)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ /*
+ * the two loops test rc6 both in case it's enabled
+ * and in the case it's disabled. It restores the prvious
+ * status
+ */
+ for (i = 0; i < 2; i++) {
+ if (!test_rc6(rc6, rc6->enabled)) {
+ err = -EINVAL;
+
+ /* restore before leaving */
+ if (!i)
+ goto exit;
+ }
+
+ if (rc6->enabled)
+ intel_rc6_disable(>->rc6);
+ else
+ intel_rc6_enable(>->rc6);
+ }
+
+exit:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ return err;
+}
+
+int live_rc6_threshold(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_rc6 *rc6 = >->rc6;
+ intel_wakeref_t wakeref;
+ u32 threshold, interval;
+ u32 t_orig, i_orig;
+ int err = 0;
+
+ if (!rc6->supported)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(uncore->rpm);
+
+ t_orig = intel_uncore_read(uncore, GEN6_RC6_THRESHOLD);
+ i_orig = intel_uncore_read(uncore, GEN6_RC_EVALUATION_INTERVAL);
+
+ /*
+ * set the threshold to 50ms
+ *
+ * 50ms * 1000 = 50000us
+ * 50000 / (1.28 * 100) / 100 (we don't have floating point)
+ */
+ threshold = 50 * 1000 / 128 * 100;
+ intel_uncore_write(uncore, GEN6_RC6_THRESHOLD, threshold);
+
+ /* set interval indicatively to half the threshold */
+ interval = threshold / 2;
+ intel_uncore_write(uncore, GEN6_RC_EVALUATION_INTERVAL, interval);
+
+ /* interval < threshold */
+ if (!test_rc6(rc6, true)) {
+ pr_err("i915 mismatch: rc6 with interval < threshold\n");
+ err = -EINVAL;
+ }
+
+ /* set interval indicatively to twice the threshold */
+ interval = threshold * 2;
+ intel_uncore_write(uncore, GEN6_RC_EVALUATION_INTERVAL, interval);
+
+ /* interval > threshold */
+ if (!test_rc6(rc6, false)) {
+ pr_err("i915 mismatch: not in rc6 with interval > threshold\n");
+ err = -EINVAL;
+ }
+
+ intel_uncore_write(uncore, GEN6_RC6_THRESHOLD, t_orig);
+ intel_uncore_write(uncore, GEN6_RC_EVALUATION_INTERVAL, i_orig);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+
+ return err;
+}
+
+int live_rc6_busy(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rc6 *rc6 = >->rc6;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ intel_wakeref_t wakeref;
+ enum intel_engine_id id;
+ int err;
+
+ if (!rc6->supported)
+ return 0;
+
+ err = igt_spinner_init(&spin, gt);
+ if (err)
+ return err;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ igt_wait_for_spinner(&spin, rq); /* it's enough waiting */
+
+ /* gpu is busy, we shouldn't be in rc6 */
+ if (!test_rc6(rc6, false)) {
+ pr_err("%s: never busy enough for having a nap\n",
+ engine->name);
+ err = -EINVAL;
+ }
+
+ igt_spinner_end(&spin);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ intel_gt_wait_for_idle(gt, HZ / 5);
+ intel_gt_pm_wait_for_idle(gt);
+
+ /* gpu is busy, we should be in rc6 */
+ if (!test_rc6(rc6, true)) {
+ pr_err("%s is idle but doesn't go in rc6\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+ }
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+
+ igt_spinner_fini(&spin);
+ return err;
+}
@@ -7,6 +7,9 @@
#ifndef SELFTEST_RC6_H
#define SELFTEST_RC6_H
+int live_rc6_basic(void *arg);
+int live_rc6_threshold(void *arg);
+int live_rc6_busy(void *arg);
int live_rc6_ctx_wa(void *arg);
int live_rc6_manual(void *arg);