diff mbox series

[RFC,i-g-t,4/6] tests/perf_pmu: Add tests for engine queued/runnable/running stats

Message ID 20181003120718.6898-5-tvrtko.ursulin@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series 21st century intel_gpu_top & Co. | expand

Commit Message

Tvrtko Ursulin Oct. 3, 2018, 12:07 p.m. UTC
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Simple tests to check reported queue depths are correct.

v2:
 * Improvements similar to ones from i915_query.c.

v3:
 * Rebase for __igt_spin_batch_new.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 tests/perf_pmu.c | 259 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 259 insertions(+)
diff mbox series

Patch

diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index 6ab2595b114d..6f064b605a4c 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -169,6 +169,7 @@  static unsigned int e2ring(int gem_fd, const struct intel_execution_engine2 *e)
 #define TEST_RUNTIME_PM (8)
 #define FLAG_LONG (16)
 #define FLAG_HANG (32)
+#define TEST_CONTEXTS (64)
 
 static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
 {
@@ -959,6 +960,224 @@  multi_client(int gem_fd, const struct intel_execution_engine2 *e)
 	assert_within_epsilon(val[1], perf_slept[1], tolerance);
 }
 
+static double calc_queued(uint64_t d_val, uint64_t d_ns)
+{
+	return (double)d_val * 1e9 / I915_SAMPLE_QUEUED_DIVISOR / d_ns;
+}
+
+static void
+queued(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
+{
+	const unsigned long engine = e2ring(gem_fd, e);
+	const uint32_t bbe = MI_BATCH_BUFFER_END;
+	const unsigned int max_rq = 10;
+	double queued[max_rq + 1];
+	unsigned int n, i;
+	uint64_t val[2];
+	uint64_t ts[2];
+	uint32_t bo;
+	int fd;
+
+	igt_require_sw_sync();
+	if (flags & TEST_CONTEXTS)
+		gem_require_contexts(gem_fd);
+
+	memset(queued, 0, sizeof(queued));
+
+	bo = gem_create(gem_fd, 4096);
+	gem_write(gem_fd, bo, 4092, &bbe, sizeof(bbe));
+
+	fd = open_pmu(I915_PMU_ENGINE_QUEUED(e->class, e->instance));
+
+	for (n = 0; n <= max_rq; n++) {
+		IGT_CORK_FENCE(cork);
+		int fence = -1;
+
+		gem_quiescent_gpu(gem_fd);
+
+		if (n)
+			fence = igt_cork_plug(&cork, -1);
+
+		for (i = 0; i < n; i++) {
+			struct drm_i915_gem_exec_object2 obj = { };
+			struct drm_i915_gem_execbuffer2 eb = { };
+
+			obj.handle = bo;
+
+			eb.buffer_count = 1;
+			eb.buffers_ptr = to_user_pointer(&obj);
+
+			eb.flags = engine | I915_EXEC_FENCE_IN;
+			if (flags & TEST_CONTEXTS)
+				eb.rsvd1 = gem_context_create(gem_fd);
+			eb.rsvd2 = fence;
+
+			gem_execbuf(gem_fd, &eb);
+
+			if (flags & TEST_CONTEXTS)
+				gem_context_destroy(gem_fd, eb.rsvd1);
+		}
+
+		val[0] = __pmu_read_single(fd, &ts[0]);
+		usleep(batch_duration_ns / 1000);
+		val[1] = __pmu_read_single(fd, &ts[1]);
+
+		queued[n] = calc_queued(val[1] - val[0], ts[1] - ts[0]);
+		igt_info("n=%u queued=%.2f\n", n, queued[n]);
+
+		if (fence >= 0)
+			igt_cork_unplug(&cork);
+
+		for (i = 0; i < n; i++)
+			gem_sync(gem_fd, bo);
+	}
+
+	close(fd);
+
+	gem_close(gem_fd, bo);
+
+	for (i = 0; i <= max_rq; i++)
+		assert_within_epsilon(queued[i], i, tolerance);
+}
+
+static unsigned long __query_wait(igt_spin_t *spin, unsigned int n)
+{
+	struct timespec ts = { };
+	unsigned long t;
+
+	igt_nsec_elapsed(&ts);
+
+	if (spin->running) {
+		igt_spin_busywait_until_running(spin);
+	} else {
+		igt_debug("__spin_wait - usleep mode\n");
+		usleep(500e3); /* Better than nothing! */
+	}
+
+	t = igt_nsec_elapsed(&ts);
+
+	return spin->running ? t : 500e6 / n;
+}
+
+static void
+runnable(int gem_fd, const struct intel_execution_engine2 *e)
+{
+	const unsigned long engine = e2ring(gem_fd, e);
+	bool contexts = gem_has_contexts(gem_fd);
+	const unsigned int max_rq = 10;
+	igt_spin_t *spin[max_rq + 1];
+	double runnable[max_rq + 1];
+	uint32_t ctx[max_rq];
+	unsigned int n, i;
+	uint64_t val[2];
+	uint64_t ts[2];
+	int fd;
+
+	memset(runnable, 0, sizeof(runnable));
+
+	if (contexts) {
+		for (i = 0; i < max_rq; i++)
+			ctx[i] = gem_context_create(gem_fd);
+	}
+
+	fd = open_pmu(I915_PMU_ENGINE_RUNNABLE(e->class, e->instance));
+
+	for (n = 0; n <= max_rq; n++) {
+		gem_quiescent_gpu(gem_fd);
+
+		for (i = 0; i < n; i++) {
+			uint32_t ctx_ = contexts ? ctx[i] : 0;
+
+			if (i == 0)
+				spin[i] = __spin_poll(gem_fd, ctx_, engine);
+			else
+				spin[i] = __igt_spin_batch_new(gem_fd,
+							       .ctx = ctx_,
+							       .engine = engine);
+		}
+
+		if (n)
+			usleep(__query_wait(spin[0], n) * n);
+
+		val[0] = __pmu_read_single(fd, &ts[0]);
+		usleep(batch_duration_ns / 1000);
+		val[1] = __pmu_read_single(fd, &ts[1]);
+
+		runnable[n] = calc_queued(val[1] - val[0], ts[1] - ts[0]);
+		igt_info("n=%u runnable=%.2f\n", n, runnable[n]);
+
+		for (i = 0; i < n; i++) {
+			end_spin(gem_fd, spin[i], FLAG_SYNC);
+			igt_spin_batch_free(gem_fd, spin[i]);
+		}
+	}
+
+	if (contexts) {
+		for (i = 0; i < max_rq; i++)
+			gem_context_destroy(gem_fd, ctx[i]);
+	}
+
+	close(fd);
+
+	assert_within_epsilon(runnable[0], 0, tolerance);
+	igt_assert(runnable[max_rq] > 0.0);
+
+	if (contexts)
+		assert_within_epsilon(runnable[max_rq] - runnable[max_rq - 1],
+				      1, tolerance);
+}
+
+static void
+running(int gem_fd, const struct intel_execution_engine2 *e)
+{
+	const unsigned long engine = e2ring(gem_fd, e);
+	const unsigned int max_rq = 10;
+	igt_spin_t *spin[max_rq + 1];
+	double running[max_rq + 1];
+	unsigned int n, i;
+	uint64_t val[2];
+	uint64_t ts[2];
+	int fd;
+
+	memset(running, 0, sizeof(running));
+	memset(spin, 0, sizeof(spin));
+
+	fd = open_pmu(I915_PMU_ENGINE_RUNNING(e->class, e->instance));
+
+	for (n = 0; n <= max_rq; n++) {
+		gem_quiescent_gpu(gem_fd);
+
+		for (i = 0; i < n; i++) {
+			if (i == 0)
+				spin[i] = __spin_poll(gem_fd, 0, engine);
+			else
+				spin[i] = __igt_spin_batch_new(gem_fd,
+							       .engine = engine);
+		}
+
+		if (n)
+			usleep(__query_wait(spin[0], n) * n);
+
+		val[0] = __pmu_read_single(fd, &ts[0]);
+		usleep(batch_duration_ns / 1000);
+		val[1] = __pmu_read_single(fd, &ts[1]);
+
+		running[n] = calc_queued(val[1] - val[0], ts[1] - ts[0]);
+		igt_info("n=%u running=%.2f\n", n, running[n]);
+
+		for (i = 0; i < n; i++) {
+			end_spin(gem_fd, spin[i], FLAG_SYNC);
+			igt_spin_batch_free(gem_fd, spin[i]);
+		}
+	}
+
+	close(fd);
+
+	assert_within_epsilon(running[0], 0, tolerance);
+	for (i = 1; i <= max_rq; i++)
+		igt_assert(running[i] > 0);
+}
+
 /**
  * Tests that i915 PMU corectly errors out in invalid initialization.
  * i915 PMU is uncore PMU, thus:
@@ -1709,6 +1928,15 @@  igt_main
 		igt_subtest_f("init-sema-%s", e->name)
 			init(fd, e, I915_SAMPLE_SEMA);
 
+		igt_subtest_f("init-queued-%s", e->name)
+			init(fd, e, I915_SAMPLE_QUEUED);
+
+		igt_subtest_f("init-runnable-%s", e->name)
+			init(fd, e, I915_SAMPLE_RUNNABLE);
+
+		igt_subtest_f("init-running-%s", e->name)
+			init(fd, e, I915_SAMPLE_RUNNING);
+
 		igt_subtest_group {
 			igt_fixture {
 				gem_require_engine(fd, e->class, e->instance);
@@ -1814,6 +2042,27 @@  igt_main
 
 			igt_subtest_f("busy-hang-%s", e->name)
 				single(fd, e, TEST_BUSY | FLAG_HANG);
+
+			/**
+			 * Test that queued metric works.
+			 */
+			igt_subtest_f("queued-%s", e->name)
+				queued(fd, e, 0);
+
+			igt_subtest_f("queued-contexts-%s", e->name)
+				queued(fd, e, TEST_CONTEXTS);
+
+			/**
+			 * Test that runnable metric works.
+			 */
+			igt_subtest_f("runnable-%s", e->name)
+				runnable(fd, e);
+
+			/**
+			 * Test that running metric works.
+			 */
+			igt_subtest_f("running-%s", e->name)
+				running(fd, e);
 		}
 
 		/**
@@ -1906,6 +2155,16 @@  igt_main
 					      e->name)
 					single(render_fd, e,
 					       TEST_BUSY | TEST_TRAILING_IDLE);
+				igt_subtest_f("render-node-queued-%s", e->name)
+					queued(render_fd, e, 0);
+				igt_subtest_f("render-node-queued-contexts-%s",
+					      e->name)
+					queued(render_fd, e, TEST_CONTEXTS);
+				igt_subtest_f("render-node-runnable-%s",
+					      e->name)
+					runnable(render_fd, e);
+				igt_subtest_f("render-node-running-%s", e->name)
+					running(render_fd, e);
 			}
 		}