@@ -1423,6 +1423,214 @@ test_perf_ts_mmio(void)
igt_waitchildren();
}
+struct ts_ctxid_sample {
+ uint64_t ctx_id;
+ uint64_t ts;
+};
+
+static void
+verify_ctxid_nonoa(uint8_t *perf_reports, int num_reports, size_t report_size,
+ uint64_t hw_ctx_id, int match_index)
+{
+ struct ts_ctxid_sample *sample;
+
+ for (int i = 0; i < num_reports; i++) {
+ size_t offset = i * report_size;
+
+ sample = (struct ts_ctxid_sample *) (perf_reports + offset);
+
+ igt_debug("read report: ctx_id= %lu, timestamp = %lu\n",
+ sample->ctx_id, sample->ts);
+
+ if (i < match_index)
+ igt_assert(sample->ctx_id != hw_ctx_id);
+ else
+ igt_assert(sample->ctx_id == hw_ctx_id);
+ }
+}
+
+#define LOCAL_I915_EXEC_NO_RELOC (1<<11)
+#define LOCAL_I915_EXEC_HANDLE_LUT (1<<12)
+
+/* Logic taken from store_ring in gem_sync */
+static void
+test_concurrent_streams(void)
+{
+ const int gen = intel_gen(intel_get_drm_devid(drm_fd));
+ unsigned int engines[16];
+ int num_children = 1;
+ int timeout = 5;
+ int num_engines = 0;
+ const struct intel_execution_engine *e;
+ uint64_t render_hw_ctx_id;
+ uint64_t blt_hw_ctx_id;
+ uint64_t render_properties[] = {
+ /* CS parameters */
+ DRM_I915_PERF_PROP_ENGINE, I915_EXEC_RENDER,
+ DRM_I915_PERF_PROP_SAMPLE_CTX_ID, true,
+ DRM_I915_PERF_PROP_SAMPLE_TS, true,
+ };
+ struct drm_i915_perf_open_param render_param = {
+ .flags = I915_PERF_FLAG_FD_CLOEXEC,
+ .num_properties = sizeof(render_properties) / 16,
+ .properties_ptr = to_user_pointer(render_properties),
+ };
+ uint64_t blt_properties[] = {
+ /* CS parameters */
+ DRM_I915_PERF_PROP_ENGINE, I915_EXEC_BLT,
+ DRM_I915_PERF_PROP_SAMPLE_CTX_ID, true,
+ DRM_I915_PERF_PROP_SAMPLE_TS, true,
+ };
+ struct drm_i915_perf_open_param blt_param = {
+ .flags = I915_PERF_FLAG_FD_CLOEXEC,
+ .num_properties = sizeof(blt_properties) / 16,
+ .properties_ptr = to_user_pointer(blt_properties),
+ };
+ int num_reports = 4;
+ int render_prop_size = ARRAY_SIZE(render_properties);
+ int render_report_size = get_perf_report_size_nonoa(render_properties,
+ render_prop_size);
+ int render_total_size = num_reports * render_report_size;
+ int render_stream_fd;
+ int blt_prop_size = ARRAY_SIZE(blt_properties);
+ int blt_report_size = get_perf_report_size_nonoa(blt_properties,
+ blt_prop_size);
+ int blt_total_size = num_reports * blt_report_size;
+ int blt_stream_fd;
+ bool valid_data = false;
+
+ for (e = intel_execution_engines; e->name; e++) {
+ if (e->exec_id == 0)
+ continue;
+
+ if (!gem_has_ring(drm_fd, e->exec_id | e->flags))
+ continue;
+
+ if (e->exec_id == I915_EXEC_BSD) {
+ int is_bsd2 = e->flags != 0;
+
+ if (gem_has_bsd2(drm_fd) != is_bsd2)
+ continue;
+ }
+
+ engines[num_engines++] = e->exec_id | e->flags;
+ if (num_engines == ARRAY_SIZE(engines))
+ break;
+ }
+
+ /* should be default, but just to be sure... */
+ write_u64_file("/proc/sys/dev/i915/perf_stream_paranoid", 1);
+
+ num_children *= num_engines;
+
+ intel_detect_and_clear_missed_interrupts(drm_fd);
+ igt_fork(child, num_children) {
+ uint8_t *render_perf_reports = NULL;
+ uint8_t *blt_perf_reports = NULL;
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 object[1];
+ struct drm_i915_gem_execbuffer2 execbuf;
+ drm_intel_bufmgr *bufmgr;
+ drm_intel_context *context0;
+ uint32_t ctx_id = 0xffffffff; /* invalid id */
+ int ret;
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(object);
+ execbuf.flags = engines[child % num_engines];
+ execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC;
+ execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT;
+ if (gen < 6)
+ execbuf.flags |= I915_EXEC_SECURE;
+
+ memset(object, 0, sizeof(object));
+ object[0].handle = gem_create(drm_fd, 4096);
+ gem_write(drm_fd, object[0].handle, 0, &bbe, sizeof(bbe));
+
+ bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
+ drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+
+ context0 = drm_intel_gem_context_create(bufmgr);
+ igt_assert(context0);
+
+ ret = drm_intel_gem_context_get_id(context0, &ctx_id);
+ igt_assert_eq(ret, 0);
+ igt_assert_neq(ctx_id, 0xffffffff);
+
+ if ((execbuf.flags & I915_EXEC_RING_MASK) == I915_EXEC_RENDER) {
+ render_hw_ctx_id = context_get_hw_ctx_id(drm_fd,
+ ctx_id);
+ igt_debug("render user_handle = %u hw_id = %lu\n",
+ ctx_id, render_hw_ctx_id);
+
+ render_perf_reports = malloc(render_total_size);
+ igt_assert(render_perf_reports);
+
+ igt_debug("opening render i915-perf stream\n");
+ render_stream_fd = __perf_open(drm_fd, &render_param);
+ }
+
+ if ((execbuf.flags & I915_EXEC_RING_MASK) == I915_EXEC_BLT) {
+ blt_hw_ctx_id = context_get_hw_ctx_id(drm_fd, ctx_id);
+ igt_debug("blt user_handle = %u hw_id = %lu\n",
+ ctx_id, blt_hw_ctx_id);
+
+ blt_perf_reports = malloc(blt_total_size);
+ igt_assert(blt_perf_reports);
+
+ igt_debug("opening blt i915-perf stream\n");
+ blt_stream_fd = __perf_open(drm_fd, &blt_param);
+ }
+
+ execbuf.buffer_count = 1;
+ /* Do a submission with default context */
+ gem_execbuf(drm_fd, &execbuf);
+
+ i915_execbuffer2_set_context_id(execbuf, ctx_id);
+ /* Now submit with user created context */
+ gem_execbuf(drm_fd, &execbuf);
+
+ gem_close(drm_fd, object[0].handle);
+
+ drm_intel_gem_context_destroy(context0);
+ drm_intel_bufmgr_destroy(bufmgr);
+
+ if ((execbuf.flags & I915_EXEC_RING_MASK) == I915_EXEC_RENDER) {
+ valid_data = read_perf_reports(render_stream_fd,
+ render_perf_reports,
+ num_reports,
+ render_report_size,
+ false);
+ igt_assert(valid_data);
+ close(render_stream_fd);
+
+ igt_debug("Verify render ctx id: %lu\n",
+ render_hw_ctx_id);
+ verify_ctxid_nonoa(render_perf_reports, num_reports,
+ render_report_size, render_hw_ctx_id,
+ 2);
+ free(render_perf_reports);
+ }
+
+ if ((execbuf.flags & I915_EXEC_RING_MASK) == I915_EXEC_BLT) {
+ valid_data = read_perf_reports(blt_stream_fd,
+ blt_perf_reports,
+ num_reports,
+ blt_report_size,
+ false);
+ igt_assert(valid_data);
+ close(blt_stream_fd);
+
+ igt_debug("Verify blt ctx id: %lu\n", blt_hw_ctx_id);
+ verify_ctxid_nonoa(blt_perf_reports, num_reports,
+ blt_report_size, blt_hw_ctx_id, 2);
+ free(blt_perf_reports);
+ }
+ }
+ igt_waitchildren_timeout(timeout+10, NULL);
+ igt_assert_eq(intel_detect_and_clear_missed_interrupts(drm_fd), 0);
+}
+
igt_main
{
igt_skip_on_simulation();
@@ -1462,6 +1670,9 @@ igt_main
test_perf_oa_mmio();
}
+ igt_subtest("concurrent-streams")
+ test_concurrent_streams();
+
igt_fixture {
close(drm_fd);
}
Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@intel.com> --- tests/intel_perf_dapc.c | 211 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 211 insertions(+)