@@ -62,6 +62,7 @@
#define MI_ARB_CHK (0x5 << 23)
static const int BATCH_SIZE = 4096;
+static const int POLL_SIZE = 4096;
static const int LOOP_START_OFFSET = 64;
static IGT_LIST(spin_list);
@@ -71,16 +72,19 @@ static int
emit_recursive_batch(igt_spin_t *spin,
int fd, const struct igt_spin_factory *opts)
{
-#define SCRATCH 0
-#define BATCH 1
const int gen = intel_gen(intel_get_drm_devid(fd));
- struct drm_i915_gem_relocation_entry relocs[2], *r;
+ struct drm_i915_gem_exec_object2 * const batch =
+ &spin->_obj[SPIN_OBJ_BATCH];
+ struct drm_i915_gem_exec_object2 * const poll =
+ &spin->_obj[SPIN_OBJ_POLL];
+ struct drm_i915_gem_exec_object2 * const dep =
+ &spin->_obj[SPIN_OBJ_DEP];
+ struct drm_i915_gem_relocation_entry relocs[3], *r;
struct drm_i915_gem_execbuffer2 *execbuf;
- struct drm_i915_gem_exec_object2 *obj;
unsigned int engines[16];
unsigned int nengine;
int fence_fd = -1;
- uint32_t *cs, *batch;
+ uint32_t *cs, *batch_start;
int i;
nengine = 0;
@@ -101,65 +105,49 @@ emit_recursive_batch(igt_spin_t *spin,
memset(&spin->execbuf, 0, sizeof(spin->execbuf));
execbuf = &spin->execbuf;
- memset(spin->obj, 0, sizeof(spin->obj));
- obj = spin->obj;
+ memset(spin->_obj, 0, sizeof(spin->_obj));
memset(relocs, 0, sizeof(relocs));
- obj[BATCH].handle = gem_create(fd, BATCH_SIZE);
- batch = __gem_mmap__wc(fd, obj[BATCH].handle,
- 0, BATCH_SIZE, PROT_WRITE);
- if (!batch)
- batch = gem_mmap__gtt(fd, obj[BATCH].handle,
- BATCH_SIZE, PROT_WRITE);
+ batch->handle = gem_create(fd, BATCH_SIZE);
+ spin->handle = batch->handle;
- gem_set_domain(fd, obj[BATCH].handle,
+ batch_start = __gem_mmap__wc(fd, batch->handle,
+ 0, BATCH_SIZE, PROT_WRITE);
+ if (!batch_start)
+ batch_start = gem_mmap__gtt(fd, batch->handle,
+ BATCH_SIZE, PROT_WRITE);
+ gem_set_domain(fd, batch->handle,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
execbuf->buffer_count++;
- cs = batch;
+ cs = batch_start;
- if (opts->dependency) {
- igt_assert(!(opts->flags & IGT_SPIN_POLL_RUN));
-
- r = &relocs[obj[BATCH].relocation_count++];
-
- /* dummy write to dependency */
- obj[SCRATCH].handle = opts->dependency;
- r->presumed_offset = 0;
- r->target_handle = obj[SCRATCH].handle;
- r->offset = sizeof(uint32_t) * 1020;
- r->delta = 0;
- r->read_domains = I915_GEM_DOMAIN_RENDER;
- r->write_domain = I915_GEM_DOMAIN_RENDER;
-
- execbuf->buffer_count++;
- } else if (opts->flags & IGT_SPIN_POLL_RUN) {
- r = &relocs[obj[BATCH].relocation_count++];
+ poll->handle = gem_create(fd, POLL_SIZE);
+ spin->poll_handle = poll->handle;
+ execbuf->buffer_count++;
- igt_assert(!opts->dependency);
+ if (opts->flags & IGT_SPIN_POLL_RUN) {
+ r = &relocs[batch->relocation_count++];
if (gen == 4 || gen == 5) {
execbuf->flags |= I915_EXEC_SECURE;
igt_require(__igt_device_set_master(fd) == 0);
}
- spin->poll_handle = gem_create(fd, 4096);
- obj[SCRATCH].handle = spin->poll_handle;
-
- if (__gem_set_caching(fd, spin->poll_handle,
+ if (__gem_set_caching(fd, poll->handle,
I915_CACHING_CACHED) == 0)
- spin->poll = gem_mmap__cpu(fd, spin->poll_handle,
- 0, 4096,
+ spin->poll = gem_mmap__cpu(fd, poll->handle,
+ 0, POLL_SIZE,
PROT_READ | PROT_WRITE);
else
- spin->poll = gem_mmap__wc(fd, spin->poll_handle,
- 0, 4096,
+ spin->poll = gem_mmap__wc(fd, poll->handle,
+ 0, POLL_SIZE,
PROT_READ | PROT_WRITE);
igt_assert_eq(spin->poll[SPIN_POLL_START_IDX], 0);
/* batch is first */
- r->presumed_offset = 4096;
- r->target_handle = obj[SCRATCH].handle;
+ r->presumed_offset = BATCH_SIZE;
+ r->target_handle = poll->handle;
r->offset = sizeof(uint32_t) * 1;
r->delta = sizeof(uint32_t) * SPIN_POLL_START_IDX;
@@ -178,14 +166,25 @@ emit_recursive_batch(igt_spin_t *spin,
}
*cs++ = 1;
+ }
+
+ if (opts->dependency) {
+ r = &relocs[batch->relocation_count++];
+
+ /* dummy write to dependency */
+ dep->handle = opts->dependency;
+ r->presumed_offset = BATCH_SIZE + POLL_SIZE;
+ r->target_handle = dep->handle;
+ r->offset = sizeof(uint32_t) * 1020;
+ r->delta = 0;
+ r->read_domains = I915_GEM_DOMAIN_RENDER;
+ r->write_domain = I915_GEM_DOMAIN_RENDER;
execbuf->buffer_count++;
}
- spin->handle = obj[BATCH].handle;
-
- igt_assert_lt(cs - batch, LOOP_START_OFFSET / sizeof(*cs));
- spin->condition = batch + LOOP_START_OFFSET / sizeof(*cs);
+ igt_assert_lt(cs - batch_start, LOOP_START_OFFSET / sizeof(*cs));
+ spin->condition = batch_start + LOOP_START_OFFSET / sizeof(*cs);
cs = spin->condition;
/* Allow ourselves to be preempted */
@@ -207,9 +206,9 @@ emit_recursive_batch(igt_spin_t *spin,
cs += 1000;
/* recurse */
- r = &relocs[obj[BATCH].relocation_count++];
- r->target_handle = obj[BATCH].handle;
- r->offset = (cs + 1 - batch) * sizeof(*cs);
+ r = &relocs[batch->relocation_count++];
+ r->target_handle = batch->handle;
+ r->offset = (cs + 1 - batch_start) * sizeof(*cs);
r->read_domains = I915_GEM_DOMAIN_COMMAND;
r->delta = LOOP_START_OFFSET;
if (gen >= 8) {
@@ -226,10 +225,10 @@ emit_recursive_batch(igt_spin_t *spin,
*cs = r->delta;
cs++;
}
- obj[BATCH].relocs_ptr = to_user_pointer(relocs);
+ batch->relocs_ptr = to_user_pointer(relocs);
- execbuf->buffers_ptr = to_user_pointer(obj +
- (2 - execbuf->buffer_count));
+ execbuf->buffers_ptr = to_user_pointer(spin->_obj);
+ execbuf->flags |= I915_EXEC_BATCH_FIRST;
execbuf->rsvd1 = opts->ctx;
if (opts->flags & IGT_SPIN_FENCE_OUT)
@@ -258,15 +257,13 @@ emit_recursive_batch(igt_spin_t *spin,
}
}
- igt_assert_lt(cs - batch, BATCH_SIZE / sizeof(*cs));
-
- /* Make it easier for callers to resubmit. */
+ igt_assert_lt(cs - batch_start, BATCH_SIZE / sizeof(*cs));
- obj[BATCH].relocation_count = 0;
- obj[BATCH].relocs_ptr = 0;
-
- obj[SCRATCH].flags = EXEC_OBJECT_PINNED;
- obj[BATCH].flags = EXEC_OBJECT_PINNED;
+ for (i = 0; i < execbuf->buffer_count; i++) {
+ spin->_obj[i].relocation_count = 0;
+ spin->_obj[i].relocs_ptr = 0;
+ spin->_obj[i].flags = EXEC_OBJECT_PINNED;
+ }
spin->cmd_precondition = *spin->condition;
@@ -433,11 +430,10 @@ void igt_spin_free(int fd, igt_spin_t *spin)
gem_munmap((void *)((unsigned long)spin->condition & (~4095UL)),
BATCH_SIZE);
- if (spin->poll) {
+ if (spin->poll)
gem_munmap(spin->poll, 4096);
- gem_close(fd, spin->poll_handle);
- }
+ gem_close(fd, spin->poll_handle);
gem_close(fd, spin->handle);
if (spin->out_fence >= 0)
@@ -34,6 +34,7 @@
typedef struct igt_spin {
unsigned int handle;
+
timer_t timer;
struct igt_list link;
@@ -41,8 +42,13 @@ typedef struct igt_spin {
uint32_t cmd_precondition;
int out_fence;
- struct drm_i915_gem_exec_object2 obj[2];
+
+ struct drm_i915_gem_exec_object2 _obj[3];
+#define SPIN_OBJ_BATCH 0
+#define SPIN_OBJ_POLL 1
+#define SPIN_OBJ_DEP 2
struct drm_i915_gem_execbuffer2 execbuf;
+
uint32_t poll_handle;
uint32_t *poll;
#define SPIN_POLL_START_IDX 0
@@ -957,7 +957,8 @@ static igt_hang_t all_hang(void)
if (engine == I915_EXEC_RENDER)
continue;
- eb.flags = engine;
+ eb.flags &= ~(I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK);
+ eb.flags |= engine;
__gem_execbuf(fd, &eb);
}
@@ -223,11 +223,11 @@ static void independent(int fd, unsigned int engine)
if (spin == NULL) {
spin = __igt_spin_new(fd, .engine = other);
} else {
- struct drm_i915_gem_execbuffer2 eb = {
- .buffer_count = 1,
- .buffers_ptr = to_user_pointer(&spin->obj[1]),
- .flags = other,
- };
+ struct drm_i915_gem_execbuffer2 eb = spin->execbuf;
+
+ eb.flags &= ~(I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK);
+ eb.flags |= other;
+
gem_execbuf(fd, &eb);
}
@@ -619,8 +619,8 @@ static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
.engine = other);
} else {
struct drm_i915_gem_execbuffer2 eb = {
- .buffer_count = 1,
- .buffers_ptr = to_user_pointer(&spin->obj[1]),
+ .buffer_count = spin->execbuf.buffer_count,
+ .buffers_ptr = to_user_pointer(&spin->_obj[SPIN_OBJ_BATCH]),
.rsvd1 = ctx,
.flags = other,
};
@@ -360,7 +360,7 @@ static void test_evict_hang(int fd)
execbuf.buffer_count = 1;
hang = igt_hang_ctx(fd, 0, 0, 0);
- expected = hang.spin->obj[1].offset;
+ expected = hang.spin->_obj[SPIN_OBJ_BATCH].offset;
/* Replace the hung batch with ourselves, forcing an eviction */
object.offset = expected;
@@ -77,28 +77,28 @@ static void spin_resubmit(int fd, unsigned int engine, unsigned int flags)
igt_spin_t *spin = __igt_spin_new(fd, .ctx = ctx0, .engine = engine);
unsigned int other;
- struct drm_i915_gem_execbuffer2 eb = {
- .buffer_count = 1,
- .buffers_ptr = to_user_pointer(&spin->obj[1]),
- .rsvd1 = ctx1,
- };
+ struct drm_i915_gem_execbuffer2 eb = spin->execbuf;
+
+ eb.rsvd1 = ctx1;
if (flags & RESUBMIT_ALL_ENGINES) {
for_each_physical_engine(fd, other) {
if (other == engine)
continue;
- eb.flags = other;
+ eb.flags &= ~(I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK);
+ eb.flags |= other;
gem_execbuf(fd, &eb);
}
} else {
- eb.flags = engine;
+ eb.flags &= ~(I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK);
+ eb.flags |= engine;
gem_execbuf(fd, &eb);
}
igt_spin_end(spin);
- gem_sync(fd, spin->obj[1].handle);
+ gem_sync(fd, spin->handle);
igt_spin_free(fd, spin);
@@ -209,7 +209,7 @@ static void test_error_state_capture(unsigned ring_id,
clear_error_state();
hang = igt_hang_ctx(device, 0, ring_id, HANG_ALLOW_CAPTURE);
- offset = hang.spin->obj[1].offset;
+ offset = hang.spin->_obj[SPIN_OBJ_BATCH].offset;
batch = gem_mmap__cpu(device, hang.spin->handle, 0, 4096, PROT_READ);
gem_set_domain(device, hang.spin->handle, I915_GEM_DOMAIN_CPU, 0);
To simplify emitting the recursive batch, make batch always the first object on the execbuf list. v2: set handles early, poll_ptr indecency (Chris) v3: allow dep with poll Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> --- lib/igt_dummyload.c | 124 ++++++++++++++++---------------- lib/igt_dummyload.h | 8 ++- tests/i915/gem_concurrent_all.c | 3 +- tests/i915/gem_exec_schedule.c | 14 ++-- tests/i915/gem_softpin.c | 2 +- tests/i915/gem_spin_batch.c | 16 ++--- tests/i915/i915_hangman.c | 2 +- 7 files changed, 86 insertions(+), 83 deletions(-)