@@ -121,8 +121,8 @@ void emit_recursive_batch(igt_spin_t *spin,
spin->batch = batch;
spin->handle = obj[BATCH].handle;
- /* Allow ourselves to be preempted */
- *batch++ = MI_ARB_CHK;
+ if (opts.preemptible)
+ *batch++ = MI_ARB_CHK; /* Allow ourselves to be preempted */
/* Pad with a few nops so that we do not completely hog the system.
*
@@ -169,7 +169,7 @@ void emit_recursive_batch(igt_spin_t *spin,
gem_execbuf(fd, &execbuf);
}
- spin->spinning_offset = obj->offset;
+ spin->gtt_offset = obj[BATCH].offset;
}
igt_spin_t *
@@ -35,13 +35,14 @@ typedef struct igt_spin {
timer_t timer;
struct igt_list link;
uint32_t *batch;
- uint64_t spinning_offset;
+ uint64_t gtt_offset;
} igt_spin_t;
typedef struct igt_spin_opt {
uint32_t ctx;
unsigned engine;
uint32_t dep;
+ bool preemptible;
} igt_spin_opt_t;
void emit_recursive_batch(igt_spin_t *spin, int fd, igt_spin_opt_t opts);
@@ -295,10 +295,11 @@ igt_hang_t igt_hang_ctx(int fd, igt_hang_opt_t opts)
emit_recursive_batch(&spin, fd, (igt_spin_opt_t){
.ctx = opts.ctx,
- .engine = opts.ring});
+ .engine = opts.ring,
+ .preemptible = false});
if (opts.offset)
- *opts.offset = spin.spinning_offset;
+ *opts.offset = spin.gtt_offset;
return (igt_hang_t){ spin.handle, opts.ctx, ban, opts.flags };
}
@@ -34,7 +34,7 @@ IGT_TEST_DESCRIPTION("Inject missed interrupts and make sure they are caught");
static void trigger_missed_interrupt(int fd, unsigned ring)
{
igt_spin_t *spin = __igt_spin_batch_new(fd,
- (igt_spin_opt_t){.engine = ring});
+ (igt_spin_opt_t){.engine = ring, .preemptible = true});
igt_fork(child, 1) {
/* We are now a low priority child on the *same* CPU as the
@@ -115,7 +115,8 @@ static void semaphore(int fd, unsigned ring, uint32_t flags)
handle[BUSY] = gem_create(fd, 4096);
spin = igt_spin_batch_new(fd, (igt_spin_opt_t){
.engine = ring,
- .dep = handle[BUSY]});
+ .dep = handle[BUSY],
+ .preemptible = true});
/* Queue a batch after the busy, it should block and remain "busy" */
igt_assert(exec_noop(fd, handle, ring | flags, false));
@@ -462,7 +463,8 @@ static void close_race(int fd)
for (i = 0; i < nhandles; i++) {
spin[i] = igt_spin_batch_new(fd, (igt_spin_opt_t){
- .engine =engines[rand() % nengine]});
+ .engine =engines[rand() % nengine],
+ .preemptible = true});
handles[i] = spin[i]->handle;
}
@@ -470,7 +472,8 @@ static void close_race(int fd)
for (i = 0; i < nhandles; i++) {
igt_spin_batch_free(fd, spin[i]);
spin[i] = igt_spin_batch_new(fd, (igt_spin_opt_t){
- .engine = engines[rand() % nengine]});
+ .engine = engines[rand() % nengine],
+ .preemptible = true});
handles[i] = spin[i]->handle;
__sync_synchronize();
}
@@ -512,8 +515,9 @@ static bool has_semaphores(int fd)
static bool has_extended_busy_ioctl(int fd)
{
- igt_spin_t *spin = igt_spin_batch_new(fd,
- (igt_spin_opt_t){.engine = I915_EXEC_RENDER});
+ igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){
+ .engine = I915_EXEC_RENDER,
+ .preemptible = true});
uint32_t read, write;
__gem_busy(fd, spin->handle, &read, &write);
@@ -524,7 +528,9 @@ static bool has_extended_busy_ioctl(int fd)
static void basic(int fd, unsigned ring, unsigned flags)
{
- igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){.engine = ring});
+ igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){
+ .engine = ring,
+ .preemptible = true});
struct timespec tv;
int timeout;
bool busy;
@@ -440,7 +440,8 @@ static void test_parallel(int fd, unsigned int master)
*/
spin = igt_spin_batch_new(fd, (igt_spin_opt_t){
.engine = master,
- .dep = c.handle});
+ .dep = c.handle,
+ .preemptible = true});
resubmit(fd, spin->handle, master, 16);
/* Now queue the master request and its secondaries */
@@ -963,7 +964,8 @@ static void test_syncobj_unused_fence(int fd)
struct local_gem_exec_fence fence = {
.handle = syncobj_create(fd),
};
- igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){/* All 0s */});
+ igt_spin_t *spin = igt_spin_batch_new(fd,
+ (igt_spin_opt_t){.preemptible = true});
/* sanity check our syncobj_to_sync_file interface */
igt_assert_eq(__syncobj_to_sync_file(fd, 0), -ENOENT);
@@ -1055,7 +1057,8 @@ static void test_syncobj_signal(int fd)
struct local_gem_exec_fence fence = {
.handle = syncobj_create(fd),
};
- igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){/* All 0s */});
+ igt_spin_t *spin = igt_spin_batch_new(fd,
+ (igt_spin_opt_t){.preemptible = true});
/* Check that the syncobj is signaled only when our request/fence is */
@@ -1105,7 +1108,7 @@ static void test_syncobj_wait(int fd)
gem_quiescent_gpu(fd);
- spin = igt_spin_batch_new(fd, (igt_spin_opt_t){/* All 0s */});
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){.preemptible = true});
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
@@ -1175,7 +1178,8 @@ static void test_syncobj_export(int fd)
.handle = syncobj_create(fd),
};
int export[2];
- igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){/* All 0s */});
+ igt_spin_t *spin = igt_spin_batch_new(fd,
+ (igt_spin_opt_t){.preemptible = true});
/* Check that if we export the syncobj prior to use it picks up
* the later fence. This allows a syncobj to establish a channel
@@ -1233,7 +1237,8 @@ static void test_syncobj_repeat(int fd)
struct drm_i915_gem_execbuffer2 execbuf;
struct local_gem_exec_fence *fence;
int export;
- igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){/* All 0s */});
+ igt_spin_t *spin = igt_spin_batch_new(fd,
+ (igt_spin_opt_t){.preemptible = true});
/* Check that we can wait on the same fence multiple times */
fence = calloc(nfences, sizeof(*fence));
@@ -1288,7 +1293,8 @@ static void test_syncobj_import(int fd)
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
- igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){/* All 0s */});
+ igt_spin_t *spin = igt_spin_batch_new(fd,
+ (igt_spin_opt_t){.preemptible = true});
uint32_t sync = syncobj_create(fd);
int fence;
@@ -346,7 +346,8 @@ static void latency_from_ring(int fd,
if (flags & PREEMPT)
spin = igt_spin_batch_new(fd, (igt_spin_opt_t){
.ctx = ctx[0],
- .engine = ring});
+ .engine = ring,
+ .preemptible = true});
if (flags & CORK) {
plug(fd, &c);
@@ -622,7 +622,8 @@ static void preempt(int fd, uint32_t handle,
igt_spin_t *spin =
__igt_spin_batch_new(fd, (igt_spin_opt_t){
.ctx = ctx[0],
- .engine = ring_id});
+ .engine = ring_id,
+ .preemptible = true});
for (int loop = 0; loop < 1024; loop++)
gem_execbuf(fd, &execbuf);
@@ -390,7 +390,8 @@ static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags)
if (flags & ACTIVE) {
spin = igt_spin_batch_new(fd, (igt_spin_opt_t){
.engine = I915_EXEC_DEFAULT,
- .dep = obj.handle});
+ .dep = obj.handle,
+ .preemptible = true});
if (!(flags & HANG))
igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
igt_assert(gem_bo_busy(fd, obj.handle));
@@ -458,7 +459,8 @@ static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags)
if (flags & ACTIVE) {
spin = igt_spin_batch_new(fd, (igt_spin_opt_t){
.engine = I915_EXEC_DEFAULT,
- .dep = obj.handle});
+ .dep = obj.handle,
+ .preemptible = true});
if (!(flags & HANG))
igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
igt_assert(gem_bo_busy(fd, obj.handle));
@@ -585,7 +587,9 @@ static void basic_range(int fd, unsigned flags)
execbuf.buffer_count = n + 1;
if (flags & ACTIVE) {
- spin = igt_spin_batch_new(fd, (igt_spin_opt_t){.dep = obj[n].handle});
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){
+ .dep = obj[n].handle,
+ .preemptible = true});
if (!(flags & HANG))
igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
igt_assert(gem_bo_busy(fd, obj[n].handle));
@@ -149,7 +149,8 @@ static void unplug_show_queue(int fd, struct cork *c, unsigned int engine)
uint32_t ctx = create_highest_priority(fd);
spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){
.ctx = ctx,
- .engine = engine});
+ .engine = engine,
+ .preemptible = true});
gem_context_destroy(fd, ctx);
}
@@ -380,7 +381,8 @@ static void preempt(int fd, unsigned ring, unsigned flags)
}
spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){
.ctx = ctx[LO],
- .engine = ring});
+ .engine = ring,
+ .preemptible = true});
igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
@@ -431,7 +433,8 @@ static void preempt_other(int fd, unsigned ring)
for_each_engine(fd, other) {
spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){
.ctx = ctx[NOISE],
- .engine = other});
+ .engine = other,
+ .preemptible = true});
store_dword(fd, ctx[LO], other,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
@@ -486,7 +489,8 @@ static void preempt_self(int fd, unsigned ring)
for_each_engine(fd, other) {
spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){
.ctx = ctx[NOISE],
- .engine = other});
+ .engine = other,
+ .preemptible = true});
store_dword(fd, ctx[HI], other,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
@@ -201,7 +201,9 @@ static void run_test(int fd, unsigned engine, unsigned flags)
}
if (flags & HANG)
- spin = igt_spin_batch_new(fd, (igt_spin_opt_t){.engine = engine});
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){
+ .engine = engine,
+ .preemptible = true});
switch (mode(flags)) {
case NOSLEEP:
@@ -311,10 +311,13 @@ static void reclaim(unsigned engine, int timeout)
} while (!*shared);
}
- spin = igt_spin_batch_new(fd, (igt_spin_opt_t){.engine = engine});
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){
+ .engine = engine,
+ .preemptible = true});
igt_until_timeout(timeout) {
igt_spin_t *next = __igt_spin_batch_new(fd, (igt_spin_opt_t){
- .engine = engine});
+ .engine = engine,
+ .preemptible = true});
igt_spin_batch_set_timeout(spin, timeout_100ms);
gem_sync(fd, spin->handle);
@@ -44,7 +44,8 @@ static void spin(int fd, unsigned int engine, unsigned int timeout_sec)
spin = igt_spin_batch_new(fd, (igt_spin_opt_t){.engine = engine});
while ((elapsed = igt_nsec_elapsed(&tv)) >> 30 < timeout_sec) {
igt_spin_t *next = __igt_spin_batch_new(fd, (igt_spin_opt_t){
- .engine = engine});
+ .engine = engine,
+ .preemptible = true});
igt_spin_batch_set_timeout(spin,
timeout_100ms - igt_nsec_elapsed(&itv));
@@ -112,7 +112,8 @@ static void basic(int fd, unsigned engine, unsigned flags)
struct cork cork = plug(fd, flags);
igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){
.engine = engine,
- .dep = cork.handle});
+ .dep = cork.handle,
+ .preemptible = true});
struct drm_i915_gem_wait wait = {
flags & WRITE ? cork.handle : spin->handle
};
@@ -91,8 +91,10 @@ static void flip_to_fb(igt_display_t *dpy, int pipe,
struct timespec tv = { 1, 0 };
struct drm_event_vblank ev;
- igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd,
- (igt_spin_opt_t){.engine = ring, .dep = fb->gem_handle});
+ igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd, (igt_spin_opt_t){
+ .engine = ring,
+ .dep = fb->gem_handle,
+ .preemptible = true});
if (modeset) {
/*
@@ -210,7 +212,8 @@ static void test_atomic_commit_hang(igt_display_t *dpy, igt_plane_t *primary,
igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd,
(igt_spin_opt_t){
.engine = ring,
- .dep = busy_fb->gem_handle});
+ .dep = busy_fb->gem_handle,
+ .preemptible = true});
struct pollfd pfd = { .fd = dpy->drm_fd, .events = POLLIN };
unsigned flags = 0;
struct drm_event_vblank ev;
@@ -299,7 +302,8 @@ static void test_pageflip_modeset_hang(igt_display_t *dpy,
t = igt_spin_batch_new(dpy->drm_fd, (igt_spin_opt_t){
.engine = ring,
- .dep = fb.gem_handle});
+ .dep = fb.gem_handle,
+ .preemptible = true});
do_or_die(drmModePageFlip(dpy->drm_fd, dpy->pipes[pipe].crtc_id, fb.fb_id, DRM_MODE_PAGE_FLIP_EVENT, &fb));
@@ -533,7 +533,9 @@ static void basic_flip_cursor(igt_display_t *display,
spin = NULL;
if (flags & BASIC_BUSY)
spin = igt_spin_batch_new(display->drm_fd,
- (igt_spin_opt_t){.dep = fb_info.gem_handle});
+ (igt_spin_opt_t){
+ .dep = fb_info.gem_handle,
+ .preemptible = true});
/* Start with a synchronous query to align with the vblank */
vblank_start = get_vblank(display->drm_fd, pipe, DRM_VBLANK_NEXTONMISS);
@@ -1300,7 +1302,8 @@ static void flip_vs_cursor_busy_crc(igt_display_t *display, bool atomic)
static const int max_crcs = 8;
spin = igt_spin_batch_new(display->drm_fd, (igt_spin_opt_t){
- .dep = fb_info[1].gem_handle});
+ .dep = fb_info[1].gem_handle,
+ .preemptible = true});
vblank_start = get_vblank(display->drm_fd, pipe, DRM_VBLANK_NEXTONMISS);
@@ -141,8 +141,9 @@ single(int gem_fd, const struct intel_execution_engine2 *e, bool busy)
fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
if (busy) {
- spin = igt_spin_batch_new(gem_fd,
- (igt_spin_opt_t){.engine = e2ring(gem_fd, e)});
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){
+ .engine = e2ring(gem_fd, e),
+ .preemptible = true});
igt_spin_batch_set_timeout(spin, batch_duration_ns);
} else {
usleep(batch_duration_ns / 1000);
@@ -204,8 +205,9 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
igt_assert_eq(i, num_engines);
- spin = igt_spin_batch_new(gem_fd,
- (igt_spin_opt_t){.engine = e2ring(gem_fd, e)});
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){
+ .engine = e2ring(gem_fd, e),
+ .preemptible = true});
igt_spin_batch_set_timeout(spin, batch_duration_ns);
gem_sync(gem_fd, spin->handle);
@@ -251,7 +253,9 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
idle_idx = i;
} else {
spin[i] = igt_spin_batch_new(gem_fd,
- (igt_spin_opt_t){.engine = e2ring(gem_fd, e_)});
+ (igt_spin_opt_t){
+ .engine = e2ring(gem_fd, e_),
+ .preemptible = true});
igt_spin_batch_set_timeout(spin[i], batch_duration_ns);
}
@@ -299,8 +303,9 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines)
fd[i] = open_group(I915_PMU_ENGINE_BUSY(e->class, e->instance),
fd[0]);
- spin[i] = igt_spin_batch_new(gem_fd,
- (igt_spin_opt_t){.engine = e2ring(gem_fd, e)});
+ spin[i] = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){
+ .engine = e2ring(gem_fd, e),
+ .preemptible = true});
igt_spin_batch_set_timeout(spin[i], batch_duration_ns);
i++;
@@ -331,8 +336,9 @@ no_sema(int gem_fd, const struct intel_execution_engine2 *e, bool busy)
open_group(I915_PMU_ENGINE_WAIT(e->class, e->instance), fd);
if (busy) {
- spin = igt_spin_batch_new(gem_fd,
- (igt_spin_opt_t){.engine = e2ring(gem_fd, e)});
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){
+ .engine = e2ring(gem_fd, e),
+ .preemptible = true});
igt_spin_batch_set_timeout(spin, batch_duration_ns);
} else {
usleep(batch_duration_ns / 1000);
@@ -651,8 +657,9 @@ multi_client(int gem_fd, const struct intel_execution_engine2 *e)
*/
fd[1] = open_pmu(config);
- spin = igt_spin_batch_new(gem_fd,
- (igt_spin_opt_t){.engine = e2ring(gem_fd, e)});
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){
+ .engine = e2ring(gem_fd, e),
+ .preemptible = true});
igt_spin_batch_set_timeout(spin, 2 * batch_duration_ns);
slept = measured_usleep(batch_duration_ns / 1000);
@@ -757,8 +764,9 @@ static void cpu_hotplug(int gem_fd)
fd = perf_i915_open(I915_PMU_ENGINE_BUSY(I915_ENGINE_CLASS_RENDER, 0));
igt_assert(fd >= 0);
- spin = igt_spin_batch_new(gem_fd,
- (igt_spin_opt_t){.engine = I915_EXEC_RENDER});
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){
+ .engine = I915_EXEC_RENDER,
+ .preemptible = true});
igt_nsec_elapsed(&start);
@@ -871,7 +879,7 @@ test_interrupts(int gem_fd)
gem_quiescent_gpu(gem_fd);
fd = open_pmu(I915_PMU_INTERRUPTS);
- spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){/* All 0s */});
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){.preemptible = true});
obj.handle = gem_create(gem_fd, sz);
gem_write(gem_fd, obj.handle, sz - sizeof(bbe), &bbe, sizeof(bbe));
@@ -953,8 +961,9 @@ test_frequency(int gem_fd)
pmu_read_multi(fd, 2, start);
- spin = igt_spin_batch_new(gem_fd,
- (igt_spin_opt_t){.engine = I915_EXEC_RENDER});
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){
+ .engine = I915_EXEC_RENDER,
+ .preemptible = true});
igt_spin_batch_set_timeout(spin, duration_ns);
gem_sync(gem_fd, spin->handle);
@@ -979,8 +988,9 @@ test_frequency(int gem_fd)
pmu_read_multi(fd, 2, start);
- spin = igt_spin_batch_new(gem_fd,
- (igt_spin_opt_t){.engine = I915_EXEC_RENDER});
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){
+ .engine = I915_EXEC_RENDER,
+ .preemptible = true});
igt_spin_batch_set_timeout(spin, duration_ns);
gem_sync(gem_fd, spin->handle);
@@ -588,7 +588,10 @@ static void boost_freq(int fd, int *boost_freqs)
engine = I915_EXEC_RENDER;
if (intel_gen(lh.devid) >= 6)
engine = I915_EXEC_BLT;
- load = igt_spin_batch_new(fd, (igt_spin_opt_t){.engine = engine});
+ load = igt_spin_batch_new(fd, (igt_spin_opt_t){
+ .engine = engine,
+ .preemptible = true});
+
/* Waiting will grant us a boost to maximum */
gem_wait(fd, load->handle, &timeout);
This patch adds a parameter that allows to make the spinning batch pre-emptible by conditionally adding an arbitration point to the spinning loop. From RFC: - Implicitly initialize struct members to zero. (Chris) Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Antonio Argenziano <antonio.argenziano@intel.com> --- lib/igt_dummyload.c | 6 +++--- lib/igt_dummyload.h | 3 ++- lib/igt_gt.c | 5 +++-- tests/drv_missed_irq.c | 2 +- tests/gem_busy.c | 18 ++++++++++++------ tests/gem_exec_fence.c | 20 +++++++++++++------- tests/gem_exec_latency.c | 3 ++- tests/gem_exec_nop.c | 3 ++- tests/gem_exec_reloc.c | 10 +++++++--- tests/gem_exec_schedule.c | 12 ++++++++---- tests/gem_exec_suspend.c | 4 +++- tests/gem_shrink.c | 7 +++++-- tests/gem_spin_batch.c | 3 ++- tests/gem_wait.c | 3 ++- tests/kms_busy.c | 12 ++++++++---- tests/kms_cursor_legacy.c | 7 +++++-- tests/perf_pmu.c | 46 ++++++++++++++++++++++++++++------------------ tests/pm_rps.c | 5 ++++- 18 files changed, 110 insertions(+), 59 deletions(-)