diff mbox

[i-g-t,v4,5/5] gem_scheduler: Added subtests to test priority starving

Message ID 1459342838-25267-6-git-send-email-derek.j.morton@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Derek Morton March 30, 2016, 1 p.m. UTC
When a batch is selected for execution all other batch buffers in the
scheduler queue get a small priority increase to prevent starving due
to continuous submissions by a high priority user.
Added a subtest to check this behaviour.

Requested by Joonas Lahtinen during scheduler code review

v4: Addressed review comments from Daniele Ceraolo Spurio

Signed-off-by: Derek Morton <derek.j.morton@intel.com>
---
 tests/gem_scheduler.c | 134 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 134 insertions(+)
diff mbox

Patch

diff --git a/tests/gem_scheduler.c b/tests/gem_scheduler.c
index 504607f..5d8fecf 100644
--- a/tests/gem_scheduler.c
+++ b/tests/gem_scheduler.c
@@ -257,6 +257,134 @@  static void run_test_basic(int in_flight, int ringid, int priority)
 	free(in_flight_bbs);
 }
 
+
+/* Test priority starving behaviour.
+   When a batch is selected for execution all other batch buffers in the
+   scheduler queue get a small priority increase to prevent starving due
+   to continuous submissions by a high priority user.
+ * Submit two delay batch buffers then fill the in flight queue. Submit a
+ * slightly reduced priority batch buffer and a normal priority batch buffer.
+ * When the first delay batch buffer completes and the next is put in flight all
+ * other batch buffers in the scheduler queue should get a small priority
+ * increase.
+ * Submit another normal priority batch buffer. It should not overtake the
+ * reduced priority batch buffer whose priority has been increased.
+ */
+#define NBR_PRIO_STARVE_FDs (4)
+static void run_test_priority_starve(int in_flight, int ringid)
+{
+	int fd[NBR_PRIO_STARVE_FDs];
+	int loop;
+	drm_intel_bufmgr *bufmgr[NBR_PRIO_STARVE_FDs];
+	uint32_t *delay_buf, *delay2_buf, *ts1_buf, *ts2_buf, *ts3_buf;
+	struct intel_batchbuffer *ts1_bb, *ts2_bb, *ts3_bb;
+	struct intel_batchbuffer **in_flight_bbs;
+	uint32_t calibrated_1s;
+	drm_intel_bo *delay_bo, *delay2_bo ,*ts1_bo, *ts2_bo, *ts3_bo;
+
+	in_flight_bbs = malloc(in_flight * sizeof(struct intel_batchbuffer *));
+	igt_assert(in_flight_bbs);
+
+	/* Need multiple i915 fd's. Scheduler will not change execution order of
+	 * batch buffers from the same context.
+	 */
+	for(loop = 0; loop < NBR_PRIO_STARVE_FDs; loop++)
+		init_context(&(fd[loop]), &(bufmgr[loop]), ringid);
+
+	/* Lower priority of first timestamp batch buffer
+	 * Second timestamp batch buffer should overtake the first
+	 * Overtaken batch should then get a priority increase so the third does
+	 * not overtake it.
+	 */
+	set_priority(fd[1], -1);
+
+	/* Create buffer objects */
+	delay_bo = create_and_check_bo(bufmgr[0], "delay bo");
+	delay2_bo = create_and_check_bo(bufmgr[0], "delay bo2");
+	ts1_bo = create_and_check_bo(bufmgr[1], "ts1 bo");
+	ts2_bo = create_and_check_bo(bufmgr[2], "ts2 bo");
+	ts3_bo = create_and_check_bo(bufmgr[3], "ts3 bo");
+
+	calibrated_1s = igt_calibrate_delay_bb(fd[0], bufmgr[0], ringid);
+
+	/* Batch buffers to fill the in flight queue */
+	in_flight_bbs[0] = create_delay_bb(fd[0], bufmgr[0], ringid, calibrated_1s, delay_bo);
+	in_flight_bbs[1] = create_delay_bb(fd[0], bufmgr[0], ringid, calibrated_1s, delay2_bo);
+	for(loop = 2; loop < in_flight; loop++)
+		in_flight_bbs[loop] = create_noop_bb(fd[0], bufmgr[0], 5);
+
+	/* Extra batch buffers in the scheduler queue */
+	ts1_bb = create_timestamp_bb(fd[1], bufmgr[1], ringid, ts1_bo, NULL, false);
+	ts2_bb = create_timestamp_bb(fd[2], bufmgr[2], ringid, ts2_bo, NULL, false);
+	ts3_bb = create_timestamp_bb(fd[3], bufmgr[3], ringid, ts3_bo, NULL, false);
+
+	/* Flush batchbuffers */
+	for(loop = 0; loop < in_flight; loop++)
+		intel_batchbuffer_flush_on_ring(in_flight_bbs[loop], ringid);
+	intel_batchbuffer_flush_on_ring(ts1_bb, ringid);
+	intel_batchbuffer_flush_on_ring(ts2_bb, ringid);
+
+	/* This will not return until the bo has finished executing */
+	drm_intel_bo_map(delay_bo, 0);
+	/* Once the first delay is complete and any bumping has occured, submit
+	 * the final batch buffer
+	 */
+	intel_batchbuffer_flush_on_ring(ts3_bb, ringid);
+
+	drm_intel_bo_map(delay2_bo, 0);
+	drm_intel_bo_map(ts1_bo, 0);
+	drm_intel_bo_map(ts2_bo, 0);
+	drm_intel_bo_map(ts3_bo, 0);
+
+	delay_buf = delay_bo->virtual;
+	delay2_buf = delay2_bo->virtual;
+	ts1_buf = ts1_bo->virtual;
+	ts2_buf = ts2_bo->virtual;
+	ts3_buf = ts3_bo->virtual;
+
+	igt_debug("Delay Timestamp = 0x%08" PRIx32 "\n", delay_buf[2]);
+	igt_debug("Delay Timestamp = 0x%08" PRIx32 "\n", delay2_buf[2]);
+	igt_debug("TS1 Timestamp = 0x%08" PRIx32 "\n", ts1_buf[0]);
+	igt_debug("TS2 Timestamp = 0x%08" PRIx32 "\n", ts2_buf[0]);
+	igt_debug("TS3 Timestamp = 0x%08" PRIx32 "\n", ts3_buf[0]);
+
+	/* buf[0] in the target buffer should be 0 if the batch buffer completed */
+	igt_assert_f(delay_buf[0] == 0,
+	             "delay_buf[0] expected 0x0, got 0x%" PRIx32 "\n", delay_buf[0]);
+
+	/* Delay BB should be first */
+	igt_assert_f(igt_compare_timestamps(delay_buf[2], ts1_buf[0]),
+	             "Delay ts (0x%08" PRIx32 ") > TS1 ts (0x%08" PRIx32 ")\n",
+	             delay_buf[2], ts1_buf[0]);
+
+	/* TS2 BB should overtake TS1 BB */
+	igt_assert_f(igt_compare_timestamps(ts2_buf[0], ts1_buf[0]),
+	             "TS2 ts (0x%08" PRIx32 ") > TS1 ts (0x%08" PRIx32 ")\n",
+	             ts2_buf[0], ts1_buf[0]);
+
+	/* TS3 BB should not overtake TS1 BB */
+	igt_assert_f(igt_compare_timestamps(ts1_buf[0], ts3_buf[0]),
+	             "TS1 ts (0x%08" PRIx32 ") > TS3 ts (0x%08" PRIx32 ")\n",
+	             ts1_buf[0], ts3_buf[0]);
+
+	/* Cleanup */
+	for(loop = 0; loop < in_flight; loop++)
+		intel_batchbuffer_free(in_flight_bbs[loop]);
+	intel_batchbuffer_free(ts1_bb);
+	intel_batchbuffer_free(ts2_bb);
+
+	drm_intel_bo_unreference(delay_bo);
+	drm_intel_bo_unreference(delay2_bo);
+	drm_intel_bo_unreference(ts1_bo);
+	drm_intel_bo_unreference(ts2_bo);
+	drm_intel_bo_unreference(ts3_bo);
+	for(loop = 0; loop < NBR_PRIO_STARVE_FDs; loop++) {
+		drm_intel_bufmgr_destroy(bufmgr[loop]);
+		close(fd[loop]);
+	}
+	free(in_flight_bbs);
+}
+
 /* Dependency test.
  * write=0, Submit batch buffers with read dependencies to all rings. Delay one
  * with a long executing batch buffer. Check the others are not held up.
@@ -482,6 +610,12 @@  igt_main
 			run_test_dependency(in_flight, loop, true);
 		}
 
+	for (loop = 0; loop < NBR_RINGS; loop++)
+		igt_subtest_f("%s-priority-starve", rings[loop].name) {
+			gem_require_ring(fd, rings[loop].id);
+			run_test_priority_starve(in_flight, rings[loop].id);
+		}
+
 	igt_fixture {
 		close(fd);
 	}