@@ -62,6 +62,7 @@
#define MI_ARB_CHK (0x5 << 23)
static const int BATCH_SIZE = 4096;
+static const int POLL_SIZE = 4096;
static IGT_LIST(spin_list);
static pthread_mutex_t list_lock = PTHREAD_MUTEX_INITIALIZER;
@@ -69,16 +70,15 @@ static int
emit_recursive_batch(igt_spin_t *spin,
int fd, const struct igt_spin_factory *opts)
{
-#define SCRATCH 0
-#define BATCH 1
const int gen = intel_gen(intel_get_drm_devid(fd));
+ struct drm_i915_gem_exec_object2 * const batch = &spin->_obj[0];
+ struct drm_i915_gem_exec_object2 * const poll = &spin->_obj[1];
struct drm_i915_gem_relocation_entry relocs[2], *r;
struct drm_i915_gem_execbuffer2 *execbuf;
- struct drm_i915_gem_exec_object2 *obj;
unsigned int engines[16];
unsigned int nengine;
int fence_fd = -1;
- uint32_t *batch, *batch_start;
+ uint32_t *cs, *cs_start;
int i;
nengine = 0;
@@ -99,30 +99,31 @@ emit_recursive_batch(igt_spin_t *spin,
memset(&spin->execbuf, 0, sizeof(spin->execbuf));
execbuf = &spin->execbuf;
- memset(spin->obj, 0, sizeof(spin->obj));
- obj = spin->obj;
+ memset(spin->_obj, 0, sizeof(spin->_obj));
memset(relocs, 0, sizeof(relocs));
- obj[BATCH].handle = gem_create(fd, BATCH_SIZE);
- batch = __gem_mmap__wc(fd, obj[BATCH].handle,
- 0, BATCH_SIZE, PROT_WRITE);
- if (!batch)
- batch = gem_mmap__gtt(fd, obj[BATCH].handle,
- BATCH_SIZE, PROT_WRITE);
- gem_set_domain(fd, obj[BATCH].handle,
- I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ batch->handle = gem_create(fd, BATCH_SIZE);
+ spin->handle = batch->handle;
+
+ cs = __gem_mmap__wc(fd, batch->handle,
+ 0, BATCH_SIZE, PROT_WRITE);
+ if (!cs)
+ cs = gem_mmap__gtt(fd, batch->handle,
+ BATCH_SIZE, PROT_WRITE);
+ gem_set_domain(fd, batch->handle,
+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
execbuf->buffer_count++;
- batch_start = batch;
+ cs_start = cs;
if (opts->dependency) {
igt_assert(!(opts->flags & IGT_SPIN_POLL_RUN));
- r = &relocs[obj[BATCH].relocation_count++];
+ r = &relocs[batch->relocation_count++];
/* dummy write to dependency */
- obj[SCRATCH].handle = opts->dependency;
+ poll->handle = opts->dependency;
r->presumed_offset = 0;
- r->target_handle = obj[SCRATCH].handle;
+ r->target_handle = poll->handle;
r->offset = sizeof(uint32_t) * 1020;
r->delta = 0;
r->read_domains = I915_GEM_DOMAIN_RENDER;
@@ -130,7 +131,7 @@ emit_recursive_batch(igt_spin_t *spin,
execbuf->buffer_count++;
} else if (opts->flags & IGT_SPIN_POLL_RUN) {
- r = &relocs[obj[BATCH].relocation_count++];
+ r = &relocs[batch->relocation_count++];
igt_assert(!opts->dependency);
@@ -139,52 +140,51 @@ emit_recursive_batch(igt_spin_t *spin,
igt_require(__igt_device_set_master(fd) == 0);
}
- spin->poll_handle = gem_create(fd, 4096);
- obj[SCRATCH].handle = spin->poll_handle;
+ poll->handle = gem_create(fd, POLL_SIZE);
+ spin->poll_handle = poll->handle;
- if (__gem_set_caching(fd, spin->poll_handle,
+ if (__gem_set_caching(fd, poll->handle,
I915_CACHING_CACHED) == 0)
- spin->poll = gem_mmap__cpu(fd, spin->poll_handle,
- 0, 4096,
+ spin->poll = gem_mmap__cpu(fd, poll->handle,
+ 0, POLL_SIZE,
PROT_READ | PROT_WRITE);
else
- spin->poll = gem_mmap__wc(fd, spin->poll_handle,
- 0, 4096,
+ spin->poll = gem_mmap__wc(fd, poll->handle,
+ 0, POLL_SIZE,
PROT_READ | PROT_WRITE);
igt_assert_eq(spin->poll[SPIN_POLL_START_IDX], 0);
/* batch is first */
- r->presumed_offset = 4096;
- r->target_handle = obj[SCRATCH].handle;
+ r->presumed_offset = BATCH_SIZE;
+ r->target_handle = poll->handle;
r->offset = sizeof(uint32_t) * 1;
r->delta = sizeof(uint32_t) * SPIN_POLL_START_IDX;
- *batch++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+ *cs++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
if (gen >= 8) {
- *batch++ = r->presumed_offset + r->delta;
- *batch++ = 0;
+ *cs++ = r->presumed_offset + r->delta;
+ *cs++ = 0;
} else if (gen >= 4) {
- *batch++ = 0;
- *batch++ = r->presumed_offset + r->delta;
+ *cs++ = 0;
+ *cs++ = r->presumed_offset + r->delta;
r->offset += sizeof(uint32_t);
} else {
- batch[-1]--;
- *batch++ = r->presumed_offset + r->delta;
+ cs[-1]--;
+ *cs++ = r->presumed_offset + r->delta;
}
- *batch++ = 1;
+ *cs++ = 1;
execbuf->buffer_count++;
}
- spin->batch = batch = batch_start + 64 / sizeof(*batch);
- spin->handle = obj[BATCH].handle;
+ spin->cs = cs = cs_start + 64 / sizeof(*cs);
/* Allow ourselves to be preempted */
if (!(opts->flags & IGT_SPIN_NO_PREEMPTION))
- *batch++ = MI_ARB_CHK;
+ *cs++ = MI_ARB_CHK;
/* Pad with a few nops so that we do not completely hog the system.
*
@@ -198,32 +198,33 @@ emit_recursive_batch(igt_spin_t *spin,
* trouble. See https://bugs.freedesktop.org/show_bug.cgi?id=102262
*/
if (!(opts->flags & IGT_SPIN_FAST))
- batch += 1000;
+ cs += 1000;
/* recurse */
- r = &relocs[obj[BATCH].relocation_count++];
- r->target_handle = obj[BATCH].handle;
- r->offset = (batch + 1 - batch_start) * sizeof(*batch);
+ r = &relocs[batch->relocation_count++];
+ r->presumed_offset = 0;
+ r->target_handle = batch->handle;
+ r->offset = (cs + 1 - cs_start) * sizeof(*cs);
r->read_domains = I915_GEM_DOMAIN_COMMAND;
r->delta = 64;
if (gen >= 8) {
- *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
- *batch++ = r->delta;
- *batch++ = 0;
+ *cs++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *cs++ = r->presumed_offset + r->delta;
+ *cs++ = 0;
} else if (gen >= 6) {
- *batch++ = MI_BATCH_BUFFER_START | 1 << 8;
- *batch++ = r->delta;
+ *cs++ = MI_BATCH_BUFFER_START | 1 << 8;
+ *cs++ = r->presumed_offset + r->delta;
} else {
- *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
+ *cs++ = MI_BATCH_BUFFER_START | 2 << 6;
if (gen < 4)
r->delta |= 1;
- *batch = r->delta;
- batch++;
+ *cs = r->presumed_offset + r->delta;
+ cs++;
}
- obj[BATCH].relocs_ptr = to_user_pointer(relocs);
+ batch->relocs_ptr = to_user_pointer(relocs);
- execbuf->buffers_ptr = to_user_pointer(obj +
- (2 - execbuf->buffer_count));
+ execbuf->buffers_ptr = to_user_pointer(spin->_obj);
+ execbuf->flags |= I915_EXEC_BATCH_FIRST;
execbuf->rsvd1 = opts->ctx;
if (opts->flags & IGT_SPIN_FENCE_OUT)
@@ -252,15 +253,13 @@ emit_recursive_batch(igt_spin_t *spin,
}
}
- /* Make it easier for callers to resubmit. */
-
- obj[BATCH].relocation_count = 0;
- obj[BATCH].relocs_ptr = 0;
-
- obj[SCRATCH].flags = EXEC_OBJECT_PINNED;
- obj[BATCH].flags = EXEC_OBJECT_PINNED;
+ for (i = 0; i < execbuf->buffer_count; i++) {
+ spin->_obj[i].relocation_count = 0;
+ spin->_obj[i].relocs_ptr = 0;
+ spin->_obj[i].flags = EXEC_OBJECT_PINNED;
+ }
- spin->cmd_spin = *spin->batch;
+ spin->cmd_spin = *spin->cs;
return fence_fd;
}
@@ -382,7 +381,7 @@ void igt_spin_reset(igt_spin_t *spin)
if (igt_spin_has_poll(spin))
spin->poll[SPIN_POLL_START_IDX] = 0;
- *spin->batch = spin->cmd_spin;
+ *spin->cs = spin->cmd_spin;
__sync_synchronize();
}
@@ -397,7 +396,7 @@ void igt_spin_end(igt_spin_t *spin)
if (!spin)
return;
- *spin->batch = MI_BATCH_BUFFER_END;
+ *spin->cs = MI_BATCH_BUFFER_END;
__sync_synchronize();
}
@@ -422,7 +421,7 @@ void igt_spin_free(int fd, igt_spin_t *spin)
timer_delete(spin->timer);
igt_spin_end(spin);
- gem_munmap((void *)((unsigned long)spin->batch & (~4095UL)),
+ gem_munmap((void *)((unsigned long)spin->cs & (~4095UL)),
BATCH_SIZE);
if (spin->poll) {
@@ -33,14 +33,20 @@
#include "i915_drm.h"
typedef struct igt_spin {
- unsigned int handle;
+ uint32_t handle;
+
timer_t timer;
struct igt_list link;
- uint32_t *batch;
+
+ uint32_t *cs;
uint32_t cmd_spin;
int out_fence;
- struct drm_i915_gem_exec_object2 obj[2];
+
+ struct drm_i915_gem_exec_object2 _obj[2];
+#define SPIN_BATCH_IDX 0
+
struct drm_i915_gem_execbuffer2 execbuf;
+
uint32_t poll_handle;
uint32_t *poll;
#define SPIN_POLL_START_IDX 0
@@ -957,7 +957,8 @@ static igt_hang_t all_hang(void)
if (engine == I915_EXEC_RENDER)
continue;
- eb.flags = engine;
+ eb.flags &= ~(I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK);
+ eb.flags |= engine;
__gem_execbuf(fd, &eb);
}
@@ -83,7 +83,7 @@ poll_ring(int fd, unsigned ring, const char *name)
spin[1] = __igt_spin_factory(fd, &opts);
igt_assert(igt_spin_has_poll(spin[1]));
- igt_assert(*spin[0]->batch == *spin[1]->batch);
+ igt_assert(*spin[0]->cs == *spin[1]->cs);
igt_spin_end(spin[0]);
igt_spin_busywait_until_started(spin[1]);
@@ -223,11 +223,11 @@ static void independent(int fd, unsigned int engine)
if (spin == NULL) {
spin = __igt_spin_new(fd, .engine = other);
} else {
- struct drm_i915_gem_execbuffer2 eb = {
- .buffer_count = 1,
- .buffers_ptr = to_user_pointer(&spin->obj[1]),
- .flags = other,
- };
+ struct drm_i915_gem_execbuffer2 eb = spin->execbuf;
+
+ eb.flags &= ~(I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK);
+ eb.flags |= other;
+
gem_execbuf(fd, &eb);
}
@@ -619,8 +619,8 @@ static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
.engine = other);
} else {
struct drm_i915_gem_execbuffer2 eb = {
- .buffer_count = 1,
- .buffers_ptr = to_user_pointer(&spin->obj[1]),
+ .buffer_count = spin->execbuf.buffer_count,
+ .buffers_ptr = to_user_pointer(&spin->_obj[SPIN_BATCH_IDX]),
.rsvd1 = ctx,
.flags = other,
};
@@ -360,7 +360,7 @@ static void test_evict_hang(int fd)
execbuf.buffer_count = 1;
hang = igt_hang_ctx(fd, 0, 0, 0);
- expected = hang.spin->obj[1].offset;
+ expected = hang.spin->_obj[SPIN_BATCH_IDX].offset;
/* Replace the hung batch with ourselves, forcing an eviction */
object.offset = expected;
@@ -77,28 +77,28 @@ static void spin_resubmit(int fd, unsigned int engine, unsigned int flags)
igt_spin_t *spin = __igt_spin_new(fd, .ctx = ctx0, .engine = engine);
unsigned int other;
- struct drm_i915_gem_execbuffer2 eb = {
- .buffer_count = 1,
- .buffers_ptr = to_user_pointer(&spin->obj[1]),
- .rsvd1 = ctx1,
- };
+ struct drm_i915_gem_execbuffer2 eb = spin->execbuf;
+
+ eb.rsvd1 = ctx1;
if (flags & RESUBMIT_ALL_ENGINES) {
for_each_physical_engine(fd, other) {
if (other == engine)
continue;
- eb.flags = other;
+ eb.flags &= ~(I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK);
+ eb.flags |= other;
gem_execbuf(fd, &eb);
}
} else {
- eb.flags = engine;
+ eb.flags &= ~(I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK);
+ eb.flags |= engine;
gem_execbuf(fd, &eb);
}
igt_spin_end(spin);
- gem_sync(fd, spin->obj[1].handle);
+ gem_sync(fd, spin->handle);
igt_spin_free(fd, spin);
@@ -209,7 +209,7 @@ static void test_error_state_capture(unsigned ring_id,
clear_error_state();
hang = igt_hang_ctx(device, 0, ring_id, HANG_ALLOW_CAPTURE);
- offset = hang.spin->obj[1].offset;
+ offset = hang.spin->_obj[SPIN_BATCH_IDX].offset;
batch = gem_mmap__cpu(device, hang.spin->handle, 0, 4096, PROT_READ);
gem_set_domain(device, hang.spin->handle, I915_GEM_DOMAIN_CPU, 0);
To simplify emitting the recursive batch, make batch always the first object on the execbuf list. v2: set handles early, poll_ptr indecency (Chris) Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> --- lib/igt_dummyload.c | 129 ++++++++++++++++---------------- lib/igt_dummyload.h | 12 ++- tests/i915/gem_concurrent_all.c | 3 +- tests/i915/gem_exec_latency.c | 2 +- tests/i915/gem_exec_schedule.c | 14 ++-- tests/i915/gem_softpin.c | 2 +- tests/i915/gem_spin_batch.c | 16 ++-- tests/i915/i915_hangman.c | 2 +- 8 files changed, 93 insertions(+), 87 deletions(-)