@@ -309,10 +309,10 @@ static void test_fence_await(int fd, const intel_ctx_t *ctx,
{
const struct intel_execution_engine2 *e2;
uint32_t scratch = gem_create(fd, 4096);
- igt_spin_t *spin;
+ igt_spin_t *spin, *invalid_spin;
uint32_t *out;
uint64_t scratch_offset, ahnd = get_reloc_ahnd(fd, ctx->id);
- int i;
+ int out_fence, i;
scratch_offset = get_offset(ahnd, scratch, 4096, 0);
@@ -325,10 +325,25 @@ static void test_fence_await(int fd, const intel_ctx_t *ctx,
.ctx = ctx,
.engine = e->flags,
.flags = IGT_SPIN_FENCE_OUT |
- IGT_SPIN_POLL_RUN |
- spin_hang(flags));
+ IGT_SPIN_POLL_RUN);
igt_assert(spin->out_fence != -1);
+ if (flags & HANG) {
+ invalid_spin = igt_spin_new(fd,
+ .ahnd = ahnd,
+ .ctx = ctx,
+ .engine = e->flags,
+ .fence = spin->out_fence,
+ .flags = IGT_SPIN_FENCE_IN |
+ IGT_SPIN_FENCE_OUT |
+ IGT_SPIN_POLL_RUN |
+ spin_hang(flags));
+ igt_assert(invalid_spin->out_fence != -1);
+ out_fence = invalid_spin->out_fence;
+ } else {
+ out_fence = spin->out_fence;
+ }
+
i = 0;
for_each_ctx_engine(fd, ctx, e2) {
if (!gem_class_can_store_dword(fd, e2->class))
@@ -337,12 +352,12 @@ static void test_fence_await(int fd, const intel_ctx_t *ctx,
i++;
if (flags & NONBLOCK) {
- igt_store_word(fd, ahnd, ctx, e2, spin->out_fence,
+ igt_store_word(fd, ahnd, ctx, e2, out_fence,
scratch, scratch_offset, i, i);
} else {
igt_fork(child, 1) {
ahnd = get_reloc_ahnd(fd, ctx->id);
- igt_store_word(fd, ahnd, ctx, e2, spin->out_fence,
+ igt_store_word(fd, ahnd, ctx, e2, out_fence,
scratch, scratch_offset, i, i);
put_ahnd(ahnd);
}
@@ -350,6 +365,16 @@ static void test_fence_await(int fd, const intel_ctx_t *ctx,
}
igt_spin_busywait_until_started(spin);
+ if (flags & HANG) {
+ igt_assert(fence_busy(spin->out_fence));
+ igt_fail_on(igt_spin_has_started(invalid_spin));
+ igt_assert(fence_busy(out_fence));
+ for (int n = 0; n <= i; n++)
+ igt_assert_eq_u32(out[n], 0);
+
+ igt_spin_end(spin);
+ igt_spin_busywait_until_started(invalid_spin);
+ }
/* Long, but not too long to anger preemption disable checks */
usleep(50 * 1000); /* 50 ms, typical preempt reset is 150+ms */
@@ -365,7 +390,7 @@ static void test_fence_await(int fd, const intel_ctx_t *ctx,
igt_waitchildren();
gem_set_domain(fd, scratch, I915_GEM_DOMAIN_GTT, 0);
- igt_assert(!fence_busy(spin->out_fence));
+ igt_assert(!fence_busy(out_fence));
if ((flags & HANG) == 0) {
do
igt_assert_eq_u32(out[i], i);
@@ -373,6 +398,8 @@ static void test_fence_await(int fd, const intel_ctx_t *ctx,
}
munmap(out, 4096);
+ if (flags & HANG)
+ igt_spin_free(fd, invalid_spin);
igt_spin_free(fd, spin);
gem_close(fd, scratch);
put_offset(ahnd, scratch);
Commit c8f6aaf32d83 "tests/gem_exec_fence: Check stored values only for valid workloads" resolved an issue, observed in *await-hang scenarios, where a fence exposed by an invalid spin batch was signaled asynchronously to pending checks for depended test batches still waiting for that fence. Those checks have been disabled, weakening those scenarios. This change takes an alternative approach: it makes the invalid spin batch dependent on another fence so the test has full control over the moment when that batch starts, triggers a GPU hang and its fence is signaled. With that in place, the test is able to check synchronously if execution of dependent test batches is still blocked on the not yet signaled fence of the not yet completed spin batch, as it does in counterpart non-hanging scenarios. v2: preserve checking the pipeline runs ahead of the hang (Chris) Signed-off-by: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com> --- tests/i915/gem_exec_fence.c | 41 ++++++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 7 deletions(-)