@@ -172,14 +172,14 @@ static void emit_recursive_batch(igt_spin_t *spin,
}
igt_spin_t *
-__igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
+__igt_spin_batch_new(int fd, igt_spin_opt_t opts)
{
igt_spin_t *spin;
spin = calloc(1, sizeof(struct igt_spin));
igt_assert(spin);
- emit_recursive_batch(spin, fd, ctx, engine, dep);
+ emit_recursive_batch(spin, fd, opts.ctx, opts.engine, opts.dep);
igt_assert(gem_bo_busy(fd, spin->handle));
igt_list_add(&spin->link, &spin_list);
@@ -190,10 +190,13 @@ __igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
/**
* igt_spin_batch_new:
* @fd: open i915 drm file descriptor
- * @engine: Ring to execute batch OR'd with execbuf flags. If value is less
- * than 0, execute on all available rings.
- * @dep: handle to a buffer object dependency. If greater than 0, add a
- * relocation entry to this buffer within the batch.
+ * @opt: structure containing the parameters used by the call:
+ * - ctx: Context that will be used to submit spinning batch. Provide 0
+ * to use default context.
+ * - engine: Ring to execute batch OR'd with execbuf flags. If value
+ * is less than 0, execute on all available rings.
+ * - dep: handle to a buffer object dependency. If greater than 0, add a
+ * relocation entry to this buffer within the batch.
*
* Start a recursive batch on a ring. Immediately returns a #igt_spin_t that
* contains the batch's handle that can be waited upon. The returned structure
@@ -203,11 +206,11 @@ __igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
* Structure with helper internal state for igt_spin_batch_free().
*/
igt_spin_t *
-igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
+igt_spin_batch_new(int fd, igt_spin_opt_t opts)
{
igt_require_gem(fd);
- return __igt_spin_batch_new(fd, ctx, engine, dep);
+ return __igt_spin_batch_new(fd, opts);
}
static void notify(union sigval arg)
@@ -37,14 +37,14 @@ typedef struct igt_spin {
uint32_t *batch;
} igt_spin_t;
-igt_spin_t *__igt_spin_batch_new(int fd,
- uint32_t ctx,
- unsigned engine,
- uint32_t dep);
-igt_spin_t *igt_spin_batch_new(int fd,
- uint32_t ctx,
- unsigned engine,
- uint32_t dep);
+typedef struct igt_spin_opt {
+ uint32_t ctx;
+ unsigned engine;
+ uint32_t dep;
+} igt_spin_opt_t;
+
+igt_spin_t *__igt_spin_batch_new(int fd, igt_spin_opt_t opts);
+igt_spin_t *igt_spin_batch_new(int fd, igt_spin_opt_t opts);
void igt_spin_batch_set_timeout(igt_spin_t *spin, int64_t ns);
void igt_spin_batch_end(igt_spin_t *spin);
void igt_spin_batch_free(int fd, igt_spin_t *spin);
@@ -33,7 +33,7 @@ IGT_TEST_DESCRIPTION("Inject missed interrupts and make sure they are caught");
static void trigger_missed_interrupt(int fd, unsigned ring)
{
- igt_spin_t *spin = __igt_spin_batch_new(fd, 0, ring, 0);
+ igt_spin_t *spin = __igt_spin_batch_new(fd, (igt_spin_opt_t){0, ring, 0});
igt_fork(child, 1) {
/* We are now a low priority child on the *same* CPU as the
@@ -113,7 +113,7 @@ static void semaphore(int fd, unsigned ring, uint32_t flags)
/* Create a long running batch which we can use to hog the GPU */
handle[BUSY] = gem_create(fd, 4096);
- spin = igt_spin_batch_new(fd, 0, ring, handle[BUSY]);
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, ring, handle[BUSY]});
/* Queue a batch after the busy, it should block and remain "busy" */
igt_assert(exec_noop(fd, handle, ring | flags, false));
@@ -459,17 +459,17 @@ static void close_race(int fd)
igt_assert(sched_setscheduler(getpid(), SCHED_RR, &rt) == 0);
for (i = 0; i < nhandles; i++) {
- spin[i] = igt_spin_batch_new(fd, 0,
- engines[rand() % nengine], 0);
+ spin[i] = igt_spin_batch_new(fd, (igt_spin_opt_t){0,
+ engines[rand() % nengine], 0});
handles[i] = spin[i]->handle;
}
igt_until_timeout(20) {
for (i = 0; i < nhandles; i++) {
igt_spin_batch_free(fd, spin[i]);
- spin[i] = igt_spin_batch_new(fd, 0,
+ spin[i] = igt_spin_batch_new(fd, (igt_spin_opt_t){0,
engines[rand() % nengine],
- 0);
+ 0});
handles[i] = spin[i]->handle;
__sync_synchronize();
}
@@ -511,7 +511,7 @@ static bool has_semaphores(int fd)
static bool has_extended_busy_ioctl(int fd)
{
- igt_spin_t *spin = igt_spin_batch_new(fd, 0, I915_EXEC_RENDER, 0);
+ igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, I915_EXEC_RENDER, 0});
uint32_t read, write;
__gem_busy(fd, spin->handle, &read, &write);
@@ -522,7 +522,7 @@ static bool has_extended_busy_ioctl(int fd)
static void basic(int fd, unsigned ring, unsigned flags)
{
- igt_spin_t *spin = igt_spin_batch_new(fd, 0, ring, 0);
+ igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, ring, 0});
struct timespec tv;
int timeout;
bool busy;
@@ -438,7 +438,7 @@ static void test_parallel(int fd, unsigned int master)
/* Fill the queue with many requests so that the next one has to
* wait before it can be executed by the hardware.
*/
- spin = igt_spin_batch_new(fd, 0, master, c.handle);
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, master, c.handle});
resubmit(fd, spin->handle, master, 16);
/* Now queue the master request and its secondaries */
@@ -961,7 +961,7 @@ static void test_syncobj_unused_fence(int fd)
struct local_gem_exec_fence fence = {
.handle = syncobj_create(fd),
};
- igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+ igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0});
/* sanity check our syncobj_to_sync_file interface */
igt_assert_eq(__syncobj_to_sync_file(fd, 0), -ENOENT);
@@ -1053,7 +1053,7 @@ static void test_syncobj_signal(int fd)
struct local_gem_exec_fence fence = {
.handle = syncobj_create(fd),
};
- igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+ igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0});
/* Check that the syncobj is signaled only when our request/fence is */
@@ -1103,7 +1103,7 @@ static void test_syncobj_wait(int fd)
gem_quiescent_gpu(fd);
- spin = igt_spin_batch_new(fd, 0, 0, 0);
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0});
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
@@ -1173,7 +1173,7 @@ static void test_syncobj_export(int fd)
.handle = syncobj_create(fd),
};
int export[2];
- igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+ igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0});
/* Check that if we export the syncobj prior to use it picks up
* the later fence. This allows a syncobj to establish a channel
@@ -1231,7 +1231,7 @@ static void test_syncobj_repeat(int fd)
struct drm_i915_gem_execbuffer2 execbuf;
struct local_gem_exec_fence *fence;
int export;
- igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+ igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0});
/* Check that we can wait on the same fence multiple times */
fence = calloc(nfences, sizeof(*fence));
@@ -1286,7 +1286,7 @@ static void test_syncobj_import(int fd)
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
- igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+ igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0});
uint32_t sync = syncobj_create(fd);
int fence;
@@ -344,7 +344,7 @@ static void latency_from_ring(int fd,
I915_GEM_DOMAIN_GTT);
if (flags & PREEMPT)
- spin = igt_spin_batch_new(fd, ctx[0], ring, 0);
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[0], ring, 0});
if (flags & CORK) {
plug(fd, &c);
@@ -620,7 +620,7 @@ static void preempt(int fd, uint32_t handle,
clock_gettime(CLOCK_MONOTONIC, &start);
do {
igt_spin_t *spin =
- __igt_spin_batch_new(fd, ctx[0], ring_id, 0);
+ __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[0], ring_id, 0});
for (int loop = 0; loop < 1024; loop++)
gem_execbuf(fd, &execbuf);
@@ -388,7 +388,7 @@ static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags)
}
if (flags & ACTIVE) {
- spin = igt_spin_batch_new(fd, 0, I915_EXEC_DEFAULT, obj.handle);
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, I915_EXEC_DEFAULT, obj.handle});
if (!(flags & HANG))
igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
igt_assert(gem_bo_busy(fd, obj.handle));
@@ -454,7 +454,7 @@ static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags)
}
if (flags & ACTIVE) {
- spin = igt_spin_batch_new(fd, 0, I915_EXEC_DEFAULT, obj.handle);
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, I915_EXEC_DEFAULT, obj.handle});
if (!(flags & HANG))
igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
igt_assert(gem_bo_busy(fd, obj.handle));
@@ -581,7 +581,7 @@ static void basic_range(int fd, unsigned flags)
execbuf.buffer_count = n + 1;
if (flags & ACTIVE) {
- spin = igt_spin_batch_new(fd, 0, 0, obj[n].handle);
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, obj[n].handle});
if (!(flags & HANG))
igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
igt_assert(gem_bo_busy(fd, obj[n].handle));
@@ -147,7 +147,7 @@ static void unplug_show_queue(int fd, struct cork *c, unsigned int engine)
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
uint32_t ctx = create_highest_priority(fd);
- spin[n] = __igt_spin_batch_new(fd, ctx, engine, 0);
+ spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx, engine, 0});
gem_context_destroy(fd, ctx);
}
@@ -376,7 +376,7 @@ static void preempt(int fd, unsigned ring, unsigned flags)
ctx[LO] = gem_context_create(fd);
gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
}
- spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0);
+ spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[LO], ring, 0});
igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
@@ -425,7 +425,7 @@ static void preempt_other(int fd, unsigned ring)
n = 0;
for_each_engine(fd, other) {
- spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
+ spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[NOISE], other, 0});
store_dword(fd, ctx[LO], other,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
@@ -478,7 +478,7 @@ static void preempt_self(int fd, unsigned ring)
n = 0;
gem_context_set_priority(fd, ctx[HI], MIN_PRIO);
for_each_engine(fd, other) {
- spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
+ spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[NOISE], other, 0});
store_dword(fd, ctx[HI], other,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
@@ -201,7 +201,7 @@ static void run_test(int fd, unsigned engine, unsigned flags)
}
if (flags & HANG)
- spin = igt_spin_batch_new(fd, 0, engine, 0);
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0});
switch (mode(flags)) {
case NOSLEEP:
@@ -311,9 +311,9 @@ static void reclaim(unsigned engine, int timeout)
} while (!*shared);
}
- spin = igt_spin_batch_new(fd, 0, engine, 0);
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0});
igt_until_timeout(timeout) {
- igt_spin_t *next = __igt_spin_batch_new(fd, 0, engine, 0);
+ igt_spin_t *next = __igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0});
igt_spin_batch_set_timeout(spin, timeout_100ms);
gem_sync(fd, spin->handle);
@@ -41,9 +41,9 @@ static void spin(int fd, unsigned int engine, unsigned int timeout_sec)
struct timespec itv = { };
uint64_t elapsed;
- spin = igt_spin_batch_new(fd, 0, engine, 0);
+ spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0});
while ((elapsed = igt_nsec_elapsed(&tv)) >> 30 < timeout_sec) {
- igt_spin_t *next = __igt_spin_batch_new(fd, 0, engine, 0);
+ igt_spin_t *next = __igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0});
igt_spin_batch_set_timeout(spin,
timeout_100ms - igt_nsec_elapsed(&itv));
@@ -756,10 +756,10 @@ preempt(int fd, unsigned ring, int num_children, int timeout)
cycles = 0;
do {
igt_spin_t *spin =
- __igt_spin_batch_new(fd,
+ __igt_spin_batch_new(fd, (igt_spin_opt_t){
ctx[0],
execbuf.flags,
- 0);
+ 0});
do {
gem_execbuf(fd, &execbuf);
@@ -110,7 +110,7 @@ static void unplug(struct cork *c)
static void basic(int fd, unsigned engine, unsigned flags)
{
struct cork cork = plug(fd, flags);
- igt_spin_t *spin = igt_spin_batch_new(fd, 0, engine, cork.handle);
+ igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, cork.handle});
struct drm_i915_gem_wait wait = {
flags & WRITE ? cork.handle : spin->handle
};
@@ -92,7 +92,7 @@ static void flip_to_fb(igt_display_t *dpy, int pipe,
struct drm_event_vblank ev;
igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd,
- 0, ring, fb->gem_handle);
+ (igt_spin_opt_t){0, ring, fb->gem_handle});
if (modeset) {
/*
@@ -208,7 +208,7 @@ static void test_atomic_commit_hang(igt_display_t *dpy, igt_plane_t *primary,
struct igt_fb *busy_fb, unsigned ring)
{
igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd,
- 0, ring, busy_fb->gem_handle);
+ (igt_spin_opt_t){0, ring, busy_fb->gem_handle});
struct pollfd pfd = { .fd = dpy->drm_fd, .events = POLLIN };
unsigned flags = 0;
struct drm_event_vblank ev;
@@ -295,7 +295,7 @@ static void test_pageflip_modeset_hang(igt_display_t *dpy,
igt_display_commit2(dpy, dpy->is_atomic ? COMMIT_ATOMIC : COMMIT_LEGACY);
- t = igt_spin_batch_new(dpy->drm_fd, 0, ring, fb.gem_handle);
+ t = igt_spin_batch_new(dpy->drm_fd, (igt_spin_opt_t){0, ring, fb.gem_handle});
do_or_die(drmModePageFlip(dpy->drm_fd, dpy->pipes[pipe].crtc_id, fb.fb_id, DRM_MODE_PAGE_FLIP_EVENT, &fb));
@@ -532,7 +532,7 @@ static void basic_flip_cursor(igt_display_t *display,
spin = NULL;
if (flags & BASIC_BUSY)
- spin = igt_spin_batch_new(display->drm_fd, 0, 0, fb_info.gem_handle);
+ spin = igt_spin_batch_new(display->drm_fd, (igt_spin_opt_t){0, 0, fb_info.gem_handle});
/* Start with a synchronous query to align with the vblank */
vblank_start = get_vblank(display->drm_fd, pipe, DRM_VBLANK_NEXTONMISS);
@@ -1298,8 +1298,8 @@ static void flip_vs_cursor_busy_crc(igt_display_t *display, bool atomic)
int ncrcs;
static const int max_crcs = 8;
- spin = igt_spin_batch_new(display->drm_fd, 0, 0,
- fb_info[1].gem_handle);
+ spin = igt_spin_batch_new(display->drm_fd, (igt_spin_opt_t){0, 0,
+ fb_info[1].gem_handle});
vblank_start = get_vblank(display->drm_fd, pipe, DRM_VBLANK_NEXTONMISS);
@@ -692,15 +692,15 @@ static unsigned int run_test_step(struct test_output *o)
o->current_fb_id = !o->current_fb_id;
if (o->flags & TEST_WITH_DUMMY_BCS) {
- spin_bcs = igt_spin_batch_new(drm_fd, 0, I915_EXEC_BLT,
- o->fb_info[o->current_fb_id].gem_handle);
+ spin_bcs = igt_spin_batch_new(drm_fd, (igt_spin_opt_t){0, I915_EXEC_BLT,
+ o->fb_info[o->current_fb_id].gem_handle});
igt_spin_batch_set_timeout(spin_bcs,
NSEC_PER_SEC);
}
if (o->flags & TEST_WITH_DUMMY_RCS) {
- spin_rcs = igt_spin_batch_new(drm_fd, 0, I915_EXEC_RENDER,
- o->fb_info[o->current_fb_id].gem_handle);
+ spin_rcs = igt_spin_batch_new(drm_fd, (igt_spin_opt_t){0, I915_EXEC_RENDER,
+ o->fb_info[o->current_fb_id].gem_handle});
igt_spin_batch_set_timeout(spin_rcs,
NSEC_PER_SEC);
}
@@ -141,7 +141,7 @@ single(int gem_fd, const struct intel_execution_engine2 *e, bool busy)
fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
if (busy) {
- spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, e2ring(gem_fd, e), 0});
igt_spin_batch_set_timeout(spin, batch_duration_ns);
} else {
usleep(batch_duration_ns / 1000);
@@ -203,7 +203,7 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
igt_assert_eq(i, num_engines);
- spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, e2ring(gem_fd, e), 0});
igt_spin_batch_set_timeout(spin, batch_duration_ns);
gem_sync(gem_fd, spin->handle);
@@ -248,8 +248,8 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
if (e == e_) {
idle_idx = i;
} else {
- spin[i] = igt_spin_batch_new(gem_fd, 0,
- e2ring(gem_fd, e_), 0);
+ spin[i] = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0,
+ e2ring(gem_fd, e_), 0});
igt_spin_batch_set_timeout(spin[i], batch_duration_ns);
}
@@ -297,7 +297,7 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines)
fd[i] = open_group(I915_PMU_ENGINE_BUSY(e->class, e->instance),
fd[0]);
- spin[i] = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+ spin[i] = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, e2ring(gem_fd, e), 0});
igt_spin_batch_set_timeout(spin[i], batch_duration_ns);
i++;
@@ -328,7 +328,7 @@ no_sema(int gem_fd, const struct intel_execution_engine2 *e, bool busy)
open_group(I915_PMU_ENGINE_WAIT(e->class, e->instance), fd);
if (busy) {
- spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, e2ring(gem_fd, e), 0});
igt_spin_batch_set_timeout(spin, batch_duration_ns);
} else {
usleep(batch_duration_ns / 1000);
@@ -647,7 +647,7 @@ multi_client(int gem_fd, const struct intel_execution_engine2 *e)
*/
fd[1] = open_pmu(config);
- spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, e2ring(gem_fd, e), 0});
igt_spin_batch_set_timeout(spin, 2 * batch_duration_ns);
slept = measured_usleep(batch_duration_ns / 1000);
@@ -752,7 +752,7 @@ static void cpu_hotplug(int gem_fd)
fd = perf_i915_open(I915_PMU_ENGINE_BUSY(I915_ENGINE_CLASS_RENDER, 0));
igt_assert(fd >= 0);
- spin = igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0);
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, I915_EXEC_RENDER, 0});
igt_nsec_elapsed(&start);
@@ -865,7 +865,7 @@ test_interrupts(int gem_fd)
gem_quiescent_gpu(gem_fd);
fd = open_pmu(I915_PMU_INTERRUPTS);
- spin = igt_spin_batch_new(gem_fd, 0, 0, 0);
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, 0, 0});
obj.handle = gem_create(gem_fd, sz);
gem_write(gem_fd, obj.handle, sz - sizeof(bbe), &bbe, sizeof(bbe));
@@ -947,7 +947,7 @@ test_frequency(int gem_fd)
pmu_read_multi(fd, 2, start);
- spin = igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0);
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, I915_EXEC_RENDER, 0});
igt_spin_batch_set_timeout(spin, duration_ns);
gem_sync(gem_fd, spin->handle);
@@ -972,7 +972,7 @@ test_frequency(int gem_fd)
pmu_read_multi(fd, 2, start);
- spin = igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0);
+ spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, I915_EXEC_RENDER, 0});
igt_spin_batch_set_timeout(spin, duration_ns);
gem_sync(gem_fd, spin->handle);
@@ -588,7 +588,7 @@ static void boost_freq(int fd, int *boost_freqs)
engine = I915_EXEC_RENDER;
if (intel_gen(lh.devid) >= 6)
engine = I915_EXEC_BLT;
- load = igt_spin_batch_new(fd, 0, engine, 0);
+ load = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0});
/* Waiting will grant us a boost to maximum */
gem_wait(fd, load->handle, &timeout);
The intent of this patch is to clean-up the interface of the spinning batch workload by grouping the parameters used to spawn it into a struct. Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Antonio Argenziano <antonio.argenziano@intel.com> --- lib/igt_dummyload.c | 19 +++++++++++-------- lib/igt_dummyload.h | 16 ++++++++-------- tests/drv_missed_irq.c | 2 +- tests/gem_busy.c | 14 +++++++------- tests/gem_exec_fence.c | 14 +++++++------- tests/gem_exec_latency.c | 2 +- tests/gem_exec_nop.c | 2 +- tests/gem_exec_reloc.c | 6 +++--- tests/gem_exec_schedule.c | 8 ++++---- tests/gem_exec_suspend.c | 2 +- tests/gem_shrink.c | 4 ++-- tests/gem_spin_batch.c | 4 ++-- tests/gem_sync.c | 4 ++-- tests/gem_wait.c | 2 +- tests/kms_busy.c | 6 +++--- tests/kms_cursor_legacy.c | 6 +++--- tests/kms_flip.c | 8 ++++---- tests/perf_pmu.c | 22 +++++++++++----------- tests/pm_rps.c | 2 +- 19 files changed, 73 insertions(+), 70 deletions(-)