@@ -260,6 +260,8 @@ emit_recursive_batch(igt_spin_t *spin,
obj[SCRATCH].flags = EXEC_OBJECT_PINNED;
obj[BATCH].flags = EXEC_OBJECT_PINNED;
+ spin->cmd_precondition = *spin->batch;
+
return fence_fd;
}
@@ -366,6 +368,24 @@ void igt_spin_set_timeout(igt_spin_t *spin, int64_t ns)
spin->timer = timer;
}
+/**
+ * igt_spin_reset:
+ * @spin: spin state from igt_spin_new()
+ *
+ * Reset the state of spin, allowing its reuse.
+ */
+void igt_spin_reset(igt_spin_t *spin)
+{
+ if (!spin)
+ return;
+
+ if (igt_spin_has_poll(spin))
+ spin->poll[SPIN_POLL_START_IDX] = 0;
+
+ *spin->batch = spin->cmd_precondition;
+ __sync_synchronize();
+}
+
/**
* igt_spin_end:
* @spin: spin state from igt_spin_new()
@@ -37,6 +37,7 @@ typedef struct igt_spin {
timer_t timer;
struct igt_list link;
uint32_t *batch;
+ uint32_t cmd_precondition;
int out_fence;
struct drm_i915_gem_exec_object2 obj[2];
struct drm_i915_gem_execbuffer2 execbuf;
@@ -68,6 +69,7 @@ igt_spin_factory(int fd, const struct igt_spin_factory *opts);
igt_spin_factory(fd, &((struct igt_spin_factory){__VA_ARGS__}))
void igt_spin_set_timeout(igt_spin_t *spin, int64_t ns);
+void igt_spin_reset(igt_spin_t *spin);
void igt_spin_end(igt_spin_t *spin);
void igt_spin_free(int fd, igt_spin_t *spin);
@@ -73,19 +73,17 @@ poll_ring(int fd, unsigned ring, const char *name)
unsigned long cycles;
igt_spin_t *spin[2];
uint64_t elapsed;
- uint32_t cmd;
gem_require_ring(fd, ring);
igt_require(gem_can_store_dword(fd, ring));
spin[0] = __igt_spin_factory(fd, &opts);
igt_assert(igt_spin_has_poll(spin[0]));
- cmd = *spin[0]->batch;
spin[1] = __igt_spin_factory(fd, &opts);
igt_assert(igt_spin_has_poll(spin[1]));
- igt_assert(cmd == *spin[1]->batch);
+ igt_assert(*spin[0]->batch == *spin[1]->batch);
igt_spin_end(spin[0]);
igt_spin_busywait_until_started(spin[1]);
@@ -96,8 +94,8 @@ poll_ring(int fd, unsigned ring, const char *name)
while ((elapsed = igt_nsec_elapsed(&tv)) < 2ull << 30) {
const unsigned int idx = cycles++ & 1;
- *spin[idx]->batch = cmd;
- spin[idx]->poll[SPIN_POLL_START_IDX] = 0;
+ igt_spin_reset(spin[idx]);
+
gem_execbuf(fd, &spin[idx]->execbuf);
igt_spin_end(spin[!idx]);
@@ -414,15 +412,6 @@ static void latency_from_ring(int fd,
}
}
-static void __rearm_spin(igt_spin_t *spin)
-{
- const uint32_t mi_arb_chk = 0x5 << 23;
-
- *spin->batch = mi_arb_chk;
- spin->poll[SPIN_POLL_START_IDX] = 0;
- __sync_synchronize();
-}
-
static void
__submit_spin(int fd, igt_spin_t *spin, unsigned int flags)
{
@@ -557,7 +546,7 @@ rthog_latency_on_ring(int fd, unsigned int engine, const char *name, unsigned in
if (nengine > 1)
usleep(10*nengine);
- __rearm_spin(spin);
+ igt_spin_reset(spin);
igt_nsec_elapsed(&ts);
__submit_spin(fd, spin, engine);
@@ -209,7 +209,6 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
struct drm_i915_gem_execbuffer2 execbuf;
double end, this, elapsed, now, baseline;
unsigned long cycles;
- uint32_t cmd;
igt_spin_t *spin;
memset(&object, 0, sizeof(object));
@@ -226,7 +225,6 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FAST));
igt_assert(igt_spin_has_poll(spin));
- cmd = *spin->batch;
gem_execbuf(fd, &execbuf);
@@ -238,8 +236,8 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
elapsed = 0;
cycles = 0;
do {
- *spin->batch = cmd;
- spin->poll[SPIN_POLL_START_IDX] = 0;
+ igt_spin_reset(spin);
+
gem_execbuf(fd, &spin->execbuf);
igt_spin_busywait_until_started(spin);
@@ -262,8 +260,8 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
elapsed = 0;
cycles = 0;
do {
- *spin->batch = cmd;
- spin->poll[SPIN_POLL_START_IDX] = 0;
+ igt_spin_reset(spin);
+
gem_execbuf(fd, &spin->execbuf);
igt_spin_busywait_until_started(spin);
@@ -321,17 +319,14 @@ static void active_ring(int fd, unsigned ring, int timeout)
double start, end, elapsed;
unsigned long cycles;
igt_spin_t *spin[2];
- uint32_t cmd;
spin[0] = __igt_spin_new(fd,
.engine = ring,
.flags = IGT_SPIN_FAST);
- cmd = *spin[0]->batch;
spin[1] = __igt_spin_new(fd,
.engine = ring,
.flags = IGT_SPIN_FAST);
- igt_assert(*spin[1]->batch == cmd);
start = gettime();
end = start + timeout;
@@ -343,7 +338,8 @@ static void active_ring(int fd, unsigned ring, int timeout)
igt_spin_end(s);
gem_sync(fd, s->handle);
- *s->batch = cmd;
+ igt_spin_reset(s);
+
gem_execbuf(fd, &s->execbuf);
}
cycles += 1024;
@@ -393,7 +389,6 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
double end, this, elapsed, now, baseline;
unsigned long cycles;
igt_spin_t *spin[2];
- uint32_t cmd;
memset(&object, 0, sizeof(object));
object.handle = gem_create(fd, 4096);
@@ -409,7 +404,6 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FAST));
igt_assert(igt_spin_has_poll(spin[0]));
- cmd = *spin[0]->batch;
spin[1] = __igt_spin_new(fd,
.engine = execbuf.flags,
@@ -423,8 +417,8 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
gem_sync(fd, object.handle);
for (int warmup = 0; warmup <= 1; warmup++) {
- *spin[0]->batch = cmd;
- spin[0]->poll[SPIN_POLL_START_IDX] = 0;
+ igt_spin_reset(spin[0]);
+
gem_execbuf(fd, &spin[0]->execbuf);
end = gettime() + timeout/10.;
@@ -433,8 +427,8 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
do {
igt_spin_busywait_until_started(spin[0]);
- *spin[1]->batch = cmd;
- spin[1]->poll[SPIN_POLL_START_IDX] = 0;
+ igt_spin_reset(spin[1]);
+
gem_execbuf(fd, &spin[1]->execbuf);
this = gettime();
@@ -454,8 +448,8 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
names[child % num_engines] ? " b" : "B",
cycles, elapsed*1e6/cycles);
- *spin[0]->batch = cmd;
- spin[0]->poll[SPIN_POLL_START_IDX] = 0;
+ igt_spin_reset(spin[0]);
+
gem_execbuf(fd, &spin[0]->execbuf);
end = gettime() + timeout;
@@ -467,8 +461,8 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
for (int n = 0; n < wlen; n++)
gem_execbuf(fd, &execbuf);
- *spin[1]->batch = cmd;
- spin[1]->poll[SPIN_POLL_START_IDX] = 0;
+ igt_spin_reset(spin[1]);
+
gem_execbuf(fd, &spin[1]->execbuf);
this = gettime();
@@ -1501,14 +1501,6 @@ test_enable_race(int gem_fd, const struct intel_execution_engine2 *e)
gem_quiescent_gpu(gem_fd);
}
-static void __rearm_spin(igt_spin_t *spin)
-{
- const uint32_t mi_arb_chk = 0x5 << 23;
-
- *spin->batch = mi_arb_chk;
- __sync_synchronize();
-}
-
#define __assert_within(x, ref, tol_up, tol_down) \
igt_assert_f((double)(x) <= ((double)(ref) + (tol_up)) && \
(double)(x) >= ((double)(ref) - (tol_down)), \
@@ -1596,7 +1588,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
nanosleep(&_ts, NULL);
/* Restart the spinbatch. */
- __rearm_spin(spin);
+ igt_spin_reset(spin);
__submit_spin(gem_fd, spin, e, 0);
/* PWM busy sleep. */
Libify resetting a spin for reuse. v2: use also in perf_pmu v3: s/cmd_spin/cmd_precondition Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> --- lib/igt_dummyload.c | 20 ++++++++++++++++++++ lib/igt_dummyload.h | 2 ++ tests/i915/gem_exec_latency.c | 19 ++++--------------- tests/i915/gem_sync.c | 34 ++++++++++++++-------------------- tests/perf_pmu.c | 10 +--------- 5 files changed, 41 insertions(+), 44 deletions(-)