diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2018-06-25 13:27:36 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2018-07-06 14:29:35 +0100 |
commit | 4b57f85b31f01701584190dd2f0adbc9563562be (patch) | |
tree | 80902143e54703a7688c4e383703ca876340cb30 | |
parent | f4a60b943391519aa95bcd6aa2a4a7f9ed34d084 (diff) |
lib: Convert spin batch constructor to a factory
In order to make adding more options easier, expose the full set of
options to the caller.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Petri Latvala <petri.latvala@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
-rw-r--r-- | lib/igt_dummyload.c | 147 | ||||
-rw-r--r-- | lib/igt_dummyload.h | 42 | ||||
-rw-r--r-- | tests/drv_missed_irq.c | 2 | ||||
-rw-r--r-- | tests/gem_busy.c | 17 | ||||
-rw-r--r-- | tests/gem_ctx_isolation.c | 26 | ||||
-rw-r--r-- | tests/gem_eio.c | 13 | ||||
-rw-r--r-- | tests/gem_exec_fence.c | 16 | ||||
-rw-r--r-- | tests/gem_exec_latency.c | 18 | ||||
-rw-r--r-- | tests/gem_exec_nop.c | 4 | ||||
-rw-r--r-- | tests/gem_exec_reloc.c | 10 | ||||
-rw-r--r-- | tests/gem_exec_schedule.c | 27 | ||||
-rw-r--r-- | tests/gem_exec_suspend.c | 2 | ||||
-rw-r--r-- | tests/gem_fenced_exec_thrash.c | 2 | ||||
-rw-r--r-- | tests/gem_shrink.c | 4 | ||||
-rw-r--r-- | tests/gem_spin_batch.c | 4 | ||||
-rw-r--r-- | tests/gem_sync.c | 5 | ||||
-rw-r--r-- | tests/gem_wait.c | 4 | ||||
-rw-r--r-- | tests/kms_busy.c | 10 | ||||
-rw-r--r-- | tests/kms_cursor_legacy.c | 7 | ||||
-rw-r--r-- | tests/perf_pmu.c | 33 | ||||
-rw-r--r-- | tests/pm_rps.c | 9 |
21 files changed, 189 insertions, 213 deletions
diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c index 3809b4e6..94efdf74 100644 --- a/lib/igt_dummyload.c +++ b/lib/igt_dummyload.c @@ -75,12 +75,9 @@ fill_reloc(struct drm_i915_gem_relocation_entry *reloc, reloc->write_domain = write_domains; } -#define OUT_FENCE (1 << 0) -#define POLL_RUN (1 << 1) - static int -emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine, - uint32_t dep, unsigned int flags) +emit_recursive_batch(igt_spin_t *spin, + int fd, const struct igt_spin_factory *opts) { #define SCRATCH 0 #define BATCH 1 @@ -95,21 +92,18 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine, int i; nengine = 0; - if (engine == ALL_ENGINES) { - for_each_engine(fd, engine) { - if (engine) { - if (flags & POLL_RUN) - igt_require(!(flags & POLL_RUN) || - gem_can_store_dword(fd, engine)); - - engines[nengine++] = engine; - } + if (opts->engine == ALL_ENGINES) { + unsigned int engine; + + for_each_physical_engine(fd, engine) { + if (opts->flags & IGT_SPIN_POLL_RUN && + !gem_can_store_dword(fd, engine)) + continue; + + engines[nengine++] = engine; } } else { - gem_require_ring(fd, engine); - igt_require(!(flags & POLL_RUN) || - gem_can_store_dword(fd, engine)); - engines[nengine++] = engine; + engines[nengine++] = opts->engine; } igt_require(nengine); @@ -130,20 +124,20 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine, execbuf->buffer_count++; batch_start = batch; - if (dep) { - igt_assert(!(flags & POLL_RUN)); + if (opts->dependency) { + igt_assert(!(opts->flags & IGT_SPIN_POLL_RUN)); /* dummy write to dependency */ - obj[SCRATCH].handle = dep; + obj[SCRATCH].handle = opts->dependency; fill_reloc(&relocs[obj[BATCH].relocation_count++], - dep, 1020, + opts->dependency, 1020, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER); execbuf->buffer_count++; - } else if (flags & POLL_RUN) { + } else if (opts->flags & IGT_SPIN_POLL_RUN) { unsigned int offset; - igt_assert(!dep); + igt_assert(!opts->dependency); if (gen == 4 || gen == 5) { execbuf->flags |= I915_EXEC_SECURE; @@ -231,9 +225,9 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine, execbuf->buffers_ptr = to_user_pointer(obj + (2 - execbuf->buffer_count)); - execbuf->rsvd1 = ctx; + execbuf->rsvd1 = opts->ctx; - if (flags & OUT_FENCE) + if (opts->flags & IGT_SPIN_FENCE_OUT) execbuf->flags |= I915_EXEC_FENCE_OUT; for (i = 0; i < nengine; i++) { @@ -242,7 +236,7 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine, gem_execbuf_wr(fd, execbuf); - if (flags & OUT_FENCE) { + if (opts->flags & IGT_SPIN_FENCE_OUT) { int _fd = execbuf->rsvd2 >> 32; igt_assert(_fd >= 0); @@ -271,16 +265,14 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine, } static igt_spin_t * -___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep, - unsigned int flags) +spin_batch_create(int fd, const struct igt_spin_factory *opts) { igt_spin_t *spin; spin = calloc(1, sizeof(struct igt_spin)); igt_assert(spin); - spin->out_fence = emit_recursive_batch(spin, fd, ctx, engine, dep, - flags); + spin->out_fence = emit_recursive_batch(spin, fd, opts); pthread_mutex_lock(&list_lock); igt_list_add(&spin->link, &spin_list); @@ -290,18 +282,15 @@ ___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep, } igt_spin_t * -__igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep) +__igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts) { - return ___igt_spin_batch_new(fd, ctx, engine, dep, 0); + return spin_batch_create(fd, opts); } /** - * igt_spin_batch_new: + * igt_spin_batch_factory: * @fd: open i915 drm file descriptor - * @engine: Ring to execute batch OR'd with execbuf flags. If value is less - * than 0, execute on all available rings. - * @dep: handle to a buffer object dependency. If greater than 0, add a - * relocation entry to this buffer within the batch. + * @opts: controlling options such as context, engine, dependencies etc * * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that * contains the batch's handle that can be waited upon. The returned structure @@ -311,86 +300,26 @@ __igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep) * Structure with helper internal state for igt_spin_batch_free(). */ igt_spin_t * -igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep) +igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts) { igt_spin_t *spin; igt_require_gem(fd); - spin = __igt_spin_batch_new(fd, ctx, engine, dep); - igt_assert(gem_bo_busy(fd, spin->handle)); - - return spin; -} - -igt_spin_t * -__igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine) -{ - return ___igt_spin_batch_new(fd, ctx, engine, 0, OUT_FENCE); -} + if (opts->engine != ALL_ENGINES) { + gem_require_ring(fd, opts->engine); + if (opts->flags & IGT_SPIN_POLL_RUN) + igt_require(gem_can_store_dword(fd, opts->engine)); + } -/** - * igt_spin_batch_new_fence: - * @fd: open i915 drm file descriptor - * @engine: Ring to execute batch OR'd with execbuf flags. If value is less - * than 0, execute on all available rings. - * - * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that - * contains the batch's handle that can be waited upon. The returned structure - * must be passed to igt_spin_batch_free() for post-processing. - * - * igt_spin_t will contain an output fence associtated with this batch. - * - * Returns: - * Structure with helper internal state for igt_spin_batch_free(). - */ -igt_spin_t * -igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine) -{ - igt_spin_t *spin; + spin = spin_batch_create(fd, opts); - igt_require_gem(fd); - igt_require(gem_has_exec_fence(fd)); - - spin = __igt_spin_batch_new_fence(fd, ctx, engine); igt_assert(gem_bo_busy(fd, spin->handle)); - igt_assert(poll(&(struct pollfd){spin->out_fence, POLLIN}, 1, 0) == 0); - - return spin; -} - -igt_spin_t * -__igt_spin_batch_new_poll(int fd, uint32_t ctx, unsigned engine) -{ - return ___igt_spin_batch_new(fd, ctx, engine, 0, POLL_RUN); -} + if (opts->flags & IGT_SPIN_FENCE_OUT) { + struct pollfd pfd = { spin->out_fence, POLLIN }; -/** - * igt_spin_batch_new_poll: - * @fd: open i915 drm file descriptor - * @engine: Ring to execute batch OR'd with execbuf flags. If value is less - * than 0, execute on all available rings. - * - * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that - * contains the batch's handle that can be waited upon. The returned structure - * must be passed to igt_spin_batch_free() for post-processing. - * - * igt_spin_t->running will containt a pointer which target will change from - * zero to one once the spinner actually starts executing on the GPU. - * - * Returns: - * Structure with helper internal state for igt_spin_batch_free(). - */ -igt_spin_t * -igt_spin_batch_new_poll(int fd, uint32_t ctx, unsigned engine) -{ - igt_spin_t *spin; - - igt_require_gem(fd); - igt_require(gem_mmap__has_wc(fd)); - - spin = __igt_spin_batch_new_poll(fd, ctx, engine); - igt_assert(gem_bo_busy(fd, spin->handle)); + igt_assert(poll(&pfd, 1, 0) == 0); + } return spin; } diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h index c6ccc293..c794f254 100644 --- a/lib/igt_dummyload.h +++ b/lib/igt_dummyload.h @@ -43,29 +43,25 @@ typedef struct igt_spin { bool *running; } igt_spin_t; -igt_spin_t *__igt_spin_batch_new(int fd, - uint32_t ctx, - unsigned engine, - uint32_t dep); -igt_spin_t *igt_spin_batch_new(int fd, - uint32_t ctx, - unsigned engine, - uint32_t dep); - -igt_spin_t *__igt_spin_batch_new_fence(int fd, - uint32_t ctx, - unsigned engine); - -igt_spin_t *igt_spin_batch_new_fence(int fd, - uint32_t ctx, - unsigned engine); - -igt_spin_t *__igt_spin_batch_new_poll(int fd, - uint32_t ctx, - unsigned engine); -igt_spin_t *igt_spin_batch_new_poll(int fd, - uint32_t ctx, - unsigned engine); +struct igt_spin_factory { + uint32_t ctx; + uint32_t dependency; + unsigned int engine; + unsigned int flags; +}; + +#define IGT_SPIN_FENCE_OUT (1 << 0) +#define IGT_SPIN_POLL_RUN (1 << 1) + +igt_spin_t * +__igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts); +igt_spin_t * +igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts); + +#define __igt_spin_batch_new(fd, ...) \ + __igt_spin_batch_factory(fd, &((struct igt_spin_factory){__VA_ARGS__})) +#define igt_spin_batch_new(fd, ...) \ + igt_spin_batch_factory(fd, &((struct igt_spin_factory){__VA_ARGS__})) void igt_spin_batch_set_timeout(igt_spin_t *spin, int64_t ns); void igt_spin_batch_end(igt_spin_t *spin); diff --git a/tests/drv_missed_irq.c b/tests/drv_missed_irq.c index 791ee51f..78690c36 100644 --- a/tests/drv_missed_irq.c +++ b/tests/drv_missed_irq.c @@ -33,7 +33,7 @@ IGT_TEST_DESCRIPTION("Inject missed interrupts and make sure they are caught"); static void trigger_missed_interrupt(int fd, unsigned ring) { - igt_spin_t *spin = __igt_spin_batch_new(fd, 0, ring, 0); + igt_spin_t *spin = __igt_spin_batch_new(fd, .engine = ring); uint32_t go; int link[2]; diff --git a/tests/gem_busy.c b/tests/gem_busy.c index f564651b..76b44a5d 100644 --- a/tests/gem_busy.c +++ b/tests/gem_busy.c @@ -114,7 +114,9 @@ static void semaphore(int fd, unsigned ring, uint32_t flags) /* Create a long running batch which we can use to hog the GPU */ handle[BUSY] = gem_create(fd, 4096); - spin = igt_spin_batch_new(fd, 0, ring, handle[BUSY]); + spin = igt_spin_batch_new(fd, + .engine = ring, + .dependency = handle[BUSY]); /* Queue a batch after the busy, it should block and remain "busy" */ igt_assert(exec_noop(fd, handle, ring | flags, false)); @@ -363,17 +365,16 @@ static void close_race(int fd) igt_assert(sched_setscheduler(getpid(), SCHED_RR, &rt) == 0); for (i = 0; i < nhandles; i++) { - spin[i] = __igt_spin_batch_new(fd, 0, - engines[rand() % nengine], 0); + spin[i] = __igt_spin_batch_new(fd, + .engine = engines[rand() % nengine]); handles[i] = spin[i]->handle; } igt_until_timeout(20) { for (i = 0; i < nhandles; i++) { igt_spin_batch_free(fd, spin[i]); - spin[i] = __igt_spin_batch_new(fd, 0, - engines[rand() % nengine], - 0); + spin[i] = __igt_spin_batch_new(fd, + .engine = engines[rand() % nengine]); handles[i] = spin[i]->handle; __sync_synchronize(); } @@ -415,7 +416,7 @@ static bool has_semaphores(int fd) static bool has_extended_busy_ioctl(int fd) { - igt_spin_t *spin = igt_spin_batch_new(fd, 0, I915_EXEC_RENDER, 0); + igt_spin_t *spin = igt_spin_batch_new(fd, .engine = I915_EXEC_RENDER); uint32_t read, write; __gem_busy(fd, spin->handle, &read, &write); @@ -426,7 +427,7 @@ static bool has_extended_busy_ioctl(int fd) static void basic(int fd, unsigned ring, unsigned flags) { - igt_spin_t *spin = igt_spin_batch_new(fd, 0, ring, 0); + igt_spin_t *spin = igt_spin_batch_new(fd, .engine = ring); struct timespec tv; int timeout; bool busy; diff --git a/tests/gem_ctx_isolation.c b/tests/gem_ctx_isolation.c index fe7d3490..2e19e8c0 100644 --- a/tests/gem_ctx_isolation.c +++ b/tests/gem_ctx_isolation.c @@ -502,7 +502,7 @@ static void isolation(int fd, ctx[0] = gem_context_create(fd); regs[0] = read_regs(fd, ctx[0], e, flags); - spin = igt_spin_batch_new(fd, ctx[0], engine, 0); + spin = igt_spin_batch_new(fd, .ctx = ctx[0], .engine = engine); if (flags & DIRTY1) { igt_debug("%s[%d]: Setting all registers of ctx 0 to 0x%08x\n", @@ -557,8 +557,11 @@ static void isolation(int fd, static void inject_reset_context(int fd, unsigned int engine) { + struct igt_spin_factory opts = { + .ctx = gem_context_create(fd), + .engine = engine, + }; igt_spin_t *spin; - uint32_t ctx; /* * Force a context switch before triggering the reset, or else @@ -566,19 +569,20 @@ static void inject_reset_context(int fd, unsigned int engine) * HW for screwing up if the context was already broken. */ - ctx = gem_context_create(fd); - if (gem_can_store_dword(fd, engine)) { - spin = __igt_spin_batch_new_poll(fd, ctx, engine); + if (gem_can_store_dword(fd, engine)) + opts.flags |= IGT_SPIN_POLL_RUN; + + spin = __igt_spin_batch_factory(fd, &opts); + + if (spin->running) igt_spin_busywait_until_running(spin); - } else { - spin = __igt_spin_batch_new(fd, ctx, engine, 0); + else usleep(1000); /* better than nothing */ - } igt_force_gpu_reset(fd); igt_spin_batch_free(fd, spin); - gem_context_destroy(fd, ctx); + gem_context_destroy(fd, opts.ctx); } static void preservation(int fd, @@ -604,7 +608,7 @@ static void preservation(int fd, gem_quiescent_gpu(fd); ctx[num_values] = gem_context_create(fd); - spin = igt_spin_batch_new(fd, ctx[num_values], engine, 0); + spin = igt_spin_batch_new(fd, .ctx = ctx[num_values], .engine = engine); regs[num_values][0] = read_regs(fd, ctx[num_values], e, flags); for (int v = 0; v < num_values; v++) { ctx[v] = gem_context_create(fd); @@ -644,7 +648,7 @@ static void preservation(int fd, break; } - spin = igt_spin_batch_new(fd, ctx[num_values], engine, 0); + spin = igt_spin_batch_new(fd, .ctx = ctx[num_values], .engine = engine); for (int v = 0; v < num_values; v++) regs[v][1] = read_regs(fd, ctx[v], e, flags); regs[num_values][1] = read_regs(fd, ctx[num_values], e, flags); diff --git a/tests/gem_eio.c b/tests/gem_eio.c index 5faf7502..0ec1aaec 100644 --- a/tests/gem_eio.c +++ b/tests/gem_eio.c @@ -157,10 +157,15 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout) static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags) { - if (gem_can_store_dword(fd, flags)) - return __igt_spin_batch_new_poll(fd, ctx, flags); - else - return __igt_spin_batch_new(fd, ctx, flags, 0); + struct igt_spin_factory opts = { + .ctx = ctx, + .engine = flags, + }; + + if (gem_can_store_dword(fd, opts.engine)) + opts.flags |= IGT_SPIN_POLL_RUN; + + return __igt_spin_batch_factory(fd, &opts); } static void __spin_wait(int fd, igt_spin_t *spin) diff --git a/tests/gem_exec_fence.c b/tests/gem_exec_fence.c index eb93308d..ba46595d 100644 --- a/tests/gem_exec_fence.c +++ b/tests/gem_exec_fence.c @@ -468,7 +468,7 @@ static void test_parallel(int fd, unsigned int master) /* Fill the queue with many requests so that the next one has to * wait before it can be executed by the hardware. */ - spin = igt_spin_batch_new(fd, 0, master, plug); + spin = igt_spin_batch_new(fd, .engine = master, .dependency = plug); resubmit(fd, spin->handle, master, 16); /* Now queue the master request and its secondaries */ @@ -651,7 +651,7 @@ static void test_keep_in_fence(int fd, unsigned int engine, unsigned int flags) igt_spin_t *spin; int fence; - spin = igt_spin_batch_new(fd, 0, engine, 0); + spin = igt_spin_batch_new(fd, .engine = engine); gem_execbuf_wr(fd, &execbuf); fence = upper_32_bits(execbuf.rsvd2); @@ -1070,7 +1070,7 @@ static void test_syncobj_unused_fence(int fd) struct local_gem_exec_fence fence = { .handle = syncobj_create(fd), }; - igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0); + igt_spin_t *spin = igt_spin_batch_new(fd); /* sanity check our syncobj_to_sync_file interface */ igt_assert_eq(__syncobj_to_sync_file(fd, 0), -ENOENT); @@ -1162,7 +1162,7 @@ static void test_syncobj_signal(int fd) struct local_gem_exec_fence fence = { .handle = syncobj_create(fd), }; - igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0); + igt_spin_t *spin = igt_spin_batch_new(fd); /* Check that the syncobj is signaled only when our request/fence is */ @@ -1212,7 +1212,7 @@ static void test_syncobj_wait(int fd) gem_quiescent_gpu(fd); - spin = igt_spin_batch_new(fd, 0, 0, 0); + spin = igt_spin_batch_new(fd); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = to_user_pointer(&obj); @@ -1282,7 +1282,7 @@ static void test_syncobj_export(int fd) .handle = syncobj_create(fd), }; int export[2]; - igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0); + igt_spin_t *spin = igt_spin_batch_new(fd); /* Check that if we export the syncobj prior to use it picks up * the later fence. This allows a syncobj to establish a channel @@ -1340,7 +1340,7 @@ static void test_syncobj_repeat(int fd) struct drm_i915_gem_execbuffer2 execbuf; struct local_gem_exec_fence *fence; int export; - igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0); + igt_spin_t *spin = igt_spin_batch_new(fd); /* Check that we can wait on the same fence multiple times */ fence = calloc(nfences, sizeof(*fence)); @@ -1395,7 +1395,7 @@ static void test_syncobj_import(int fd) const uint32_t bbe = MI_BATCH_BUFFER_END; struct drm_i915_gem_exec_object2 obj; struct drm_i915_gem_execbuffer2 execbuf; - igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0); + igt_spin_t *spin = igt_spin_batch_new(fd); uint32_t sync = syncobj_create(fd); int fence; diff --git a/tests/gem_exec_latency.c b/tests/gem_exec_latency.c index ea2e4c68..75811f32 100644 --- a/tests/gem_exec_latency.c +++ b/tests/gem_exec_latency.c @@ -63,6 +63,10 @@ static unsigned int ring_size; static void poll_ring(int fd, unsigned ring, const char *name) { + const struct igt_spin_factory opts = { + .engine = ring, + .flags = IGT_SPIN_POLL_RUN, + }; struct timespec tv = {}; unsigned long cycles; igt_spin_t *spin[2]; @@ -72,11 +76,11 @@ poll_ring(int fd, unsigned ring, const char *name) gem_require_ring(fd, ring); igt_require(gem_can_store_dword(fd, ring)); - spin[0] = __igt_spin_batch_new_poll(fd, 0, ring); + spin[0] = __igt_spin_batch_factory(fd, &opts); igt_assert(spin[0]->running); cmd = *spin[0]->batch; - spin[1] = __igt_spin_batch_new_poll(fd, 0, ring); + spin[1] = __igt_spin_batch_factory(fd, &opts); igt_assert(spin[1]->running); igt_assert(cmd == *spin[1]->batch); @@ -312,7 +316,9 @@ static void latency_from_ring(int fd, I915_GEM_DOMAIN_GTT); if (flags & PREEMPT) - spin = __igt_spin_batch_new(fd, ctx[0], ring, 0); + spin = __igt_spin_batch_new(fd, + .ctx = ctx[0], + .engine = ring); if (flags & CORK) { obj[0].handle = igt_cork_plug(&c, fd); @@ -456,6 +462,10 @@ rthog_latency_on_ring(int fd, unsigned int engine, const char *name, unsigned in }; #define NPASS ARRAY_SIZE(passname) #define MMAP_SZ (64 << 10) + const struct igt_spin_factory opts = { + .engine = engine, + .flags = IGT_SPIN_POLL_RUN, + }; struct rt_pkt *results; unsigned int engines[16]; const char *names[16]; @@ -513,7 +523,7 @@ rthog_latency_on_ring(int fd, unsigned int engine, const char *name, unsigned in usleep(250); - spin = __igt_spin_batch_new_poll(fd, 0, engine); + spin = __igt_spin_batch_factory(fd, &opts); if (!spin) { igt_warn("Failed to create spinner! (%s)\n", passname[pass]); diff --git a/tests/gem_exec_nop.c b/tests/gem_exec_nop.c index 0523b1c0..74d27522 100644 --- a/tests/gem_exec_nop.c +++ b/tests/gem_exec_nop.c @@ -709,7 +709,9 @@ static void preempt(int fd, uint32_t handle, clock_gettime(CLOCK_MONOTONIC, &start); do { igt_spin_t *spin = - __igt_spin_batch_new(fd, ctx[0], ring_id, 0); + __igt_spin_batch_new(fd, + .ctx = ctx[0], + .engine = ring_id); for (int loop = 0; loop < 1024; loop++) gem_execbuf(fd, &execbuf); diff --git a/tests/gem_exec_reloc.c b/tests/gem_exec_reloc.c index 91c6691a..837f60a6 100644 --- a/tests/gem_exec_reloc.c +++ b/tests/gem_exec_reloc.c @@ -388,7 +388,9 @@ static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags) } if (flags & ACTIVE) { - spin = igt_spin_batch_new(fd, 0, I915_EXEC_DEFAULT, obj.handle); + spin = igt_spin_batch_new(fd, + .engine = I915_EXEC_DEFAULT, + .dependency = obj.handle); if (!(flags & HANG)) igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100); igt_assert(gem_bo_busy(fd, obj.handle)); @@ -454,7 +456,9 @@ static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags) } if (flags & ACTIVE) { - spin = igt_spin_batch_new(fd, 0, I915_EXEC_DEFAULT, obj.handle); + spin = igt_spin_batch_new(fd, + .engine = I915_EXEC_DEFAULT, + .dependency = obj.handle); if (!(flags & HANG)) igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100); igt_assert(gem_bo_busy(fd, obj.handle)); @@ -581,7 +585,7 @@ static void basic_range(int fd, unsigned flags) execbuf.buffer_count = n + 1; if (flags & ACTIVE) { - spin = igt_spin_batch_new(fd, 0, 0, obj[n].handle); + spin = igt_spin_batch_new(fd, .dependency = obj[n].handle); if (!(flags & HANG)) igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100); igt_assert(gem_bo_busy(fd, obj[n].handle)); diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c index 1f43147f..35a44ab1 100644 --- a/tests/gem_exec_schedule.c +++ b/tests/gem_exec_schedule.c @@ -132,9 +132,12 @@ static void unplug_show_queue(int fd, struct igt_cork *c, unsigned int engine) igt_spin_t *spin[MAX_ELSP_QLEN]; for (int n = 0; n < ARRAY_SIZE(spin); n++) { - uint32_t ctx = create_highest_priority(fd); - spin[n] = __igt_spin_batch_new(fd, ctx, engine, 0); - gem_context_destroy(fd, ctx); + const struct igt_spin_factory opts = { + .ctx = create_highest_priority(fd), + .engine = engine, + }; + spin[n] = __igt_spin_batch_factory(fd, &opts); + gem_context_destroy(fd, opts.ctx); } igt_cork_unplug(c); /* batches will now be queued on the engine */ @@ -196,7 +199,7 @@ static void independent(int fd, unsigned int engine) continue; if (spin == NULL) { - spin = __igt_spin_batch_new(fd, 0, other, 0); + spin = __igt_spin_batch_new(fd, .engine = other); } else { struct drm_i915_gem_exec_object2 obj = { .handle = spin->handle, @@ -428,7 +431,9 @@ static void preempt(int fd, unsigned ring, unsigned flags) ctx[LO] = gem_context_create(fd); gem_context_set_priority(fd, ctx[LO], MIN_PRIO); } - spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0); + spin[n] = __igt_spin_batch_new(fd, + .ctx = ctx[LO], + .engine = ring); igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle); store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER); @@ -462,7 +467,9 @@ static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin) for_each_physical_engine(fd, other) { if (spin == NULL) { - spin = __igt_spin_batch_new(fd, ctx, other, 0); + spin = __igt_spin_batch_new(fd, + .ctx = ctx, + .engine = other); } else { struct drm_i915_gem_exec_object2 obj = { .handle = spin->handle, @@ -672,7 +679,9 @@ static void preempt_self(int fd, unsigned ring) n = 0; gem_context_set_priority(fd, ctx[HI], MIN_PRIO); for_each_physical_engine(fd, other) { - spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0); + spin[n] = __igt_spin_batch_new(fd, + .ctx = ctx[NOISE], + .engine = other); store_dword(fd, ctx[HI], other, result, (n + 1)*sizeof(uint32_t), n + 1, 0, I915_GEM_DOMAIN_RENDER); @@ -714,7 +723,9 @@ static void preemptive_hang(int fd, unsigned ring) ctx[LO] = gem_context_create(fd); gem_context_set_priority(fd, ctx[LO], MIN_PRIO); - spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0); + spin[n] = __igt_spin_batch_new(fd, + .ctx = ctx[LO], + .engine = ring); gem_context_destroy(fd, ctx[LO]); } diff --git a/tests/gem_exec_suspend.c b/tests/gem_exec_suspend.c index db2bca26..43c52d10 100644 --- a/tests/gem_exec_suspend.c +++ b/tests/gem_exec_suspend.c @@ -189,7 +189,7 @@ static void run_test(int fd, unsigned engine, unsigned flags) } if (flags & HANG) - spin = igt_spin_batch_new(fd, 0, engine, 0); + spin = igt_spin_batch_new(fd, .engine = engine); switch (mode(flags)) { case NOSLEEP: diff --git a/tests/gem_fenced_exec_thrash.c b/tests/gem_fenced_exec_thrash.c index 385790ad..7248d310 100644 --- a/tests/gem_fenced_exec_thrash.c +++ b/tests/gem_fenced_exec_thrash.c @@ -132,7 +132,7 @@ static void run_test(int fd, int num_fences, int expected_errno, igt_spin_t *spin = NULL; if (flags & BUSY_LOAD) - spin = __igt_spin_batch_new(fd, 0, 0, 0); + spin = __igt_spin_batch_new(fd); igt_while_interruptible(flags & INTERRUPTIBLE) { igt_assert_eq(__gem_execbuf(fd, &execbuf[i]), diff --git a/tests/gem_shrink.c b/tests/gem_shrink.c index 3d33453a..929e0426 100644 --- a/tests/gem_shrink.c +++ b/tests/gem_shrink.c @@ -346,9 +346,9 @@ static void reclaim(unsigned engine, int timeout) } while (!*shared); } - spin = igt_spin_batch_new(fd, 0, engine, 0); + spin = igt_spin_batch_new(fd, .engine = engine); igt_until_timeout(timeout) { - igt_spin_t *next = __igt_spin_batch_new(fd, 0, engine, 0); + igt_spin_t *next = __igt_spin_batch_new(fd, .engine = engine); igt_spin_batch_set_timeout(spin, timeout_100ms); gem_sync(fd, spin->handle); diff --git a/tests/gem_spin_batch.c b/tests/gem_spin_batch.c index cffeb6d7..52410010 100644 --- a/tests/gem_spin_batch.c +++ b/tests/gem_spin_batch.c @@ -41,9 +41,9 @@ static void spin(int fd, unsigned int engine, unsigned int timeout_sec) struct timespec itv = { }; uint64_t elapsed; - spin = __igt_spin_batch_new(fd, 0, engine, 0); + spin = __igt_spin_batch_new(fd, .engine = engine); while ((elapsed = igt_nsec_elapsed(&tv)) >> 30 < timeout_sec) { - igt_spin_t *next = __igt_spin_batch_new(fd, 0, engine, 0); + igt_spin_t *next = __igt_spin_batch_new(fd, .engine = engine); igt_spin_batch_set_timeout(spin, timeout_100ms - igt_nsec_elapsed(&itv)); diff --git a/tests/gem_sync.c b/tests/gem_sync.c index 1e2e089a..2fcb9aa0 100644 --- a/tests/gem_sync.c +++ b/tests/gem_sync.c @@ -715,9 +715,8 @@ preempt(int fd, unsigned ring, int num_children, int timeout) do { igt_spin_t *spin = __igt_spin_batch_new(fd, - ctx[0], - execbuf.flags, - 0); + .ctx = ctx[0], + .engine = execbuf.flags); do { gem_execbuf(fd, &execbuf); diff --git a/tests/gem_wait.c b/tests/gem_wait.c index 61d8a405..7914c936 100644 --- a/tests/gem_wait.c +++ b/tests/gem_wait.c @@ -74,7 +74,9 @@ static void basic(int fd, unsigned engine, unsigned flags) IGT_CORK_HANDLE(cork); uint32_t plug = flags & (WRITE | AWAIT) ? igt_cork_plug(&cork, fd) : 0; - igt_spin_t *spin = igt_spin_batch_new(fd, 0, engine, plug); + igt_spin_t *spin = igt_spin_batch_new(fd, + .engine = engine, + .dependency = plug); struct drm_i915_gem_wait wait = { flags & WRITE ? plug : spin->handle }; diff --git a/tests/kms_busy.c b/tests/kms_busy.c index 4a4e0e15..abf39828 100644 --- a/tests/kms_busy.c +++ b/tests/kms_busy.c @@ -84,7 +84,8 @@ static void flip_to_fb(igt_display_t *dpy, int pipe, struct drm_event_vblank ev; igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd, - 0, ring, fb->gem_handle); + .engine = ring, + .dependency = fb->gem_handle); if (modeset) { /* @@ -200,7 +201,8 @@ static void test_atomic_commit_hang(igt_display_t *dpy, igt_plane_t *primary, struct igt_fb *busy_fb, unsigned ring) { igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd, - 0, ring, busy_fb->gem_handle); + .engine = ring, + .dependency = busy_fb->gem_handle); struct pollfd pfd = { .fd = dpy->drm_fd, .events = POLLIN }; unsigned flags = 0; struct drm_event_vblank ev; @@ -287,7 +289,9 @@ static void test_pageflip_modeset_hang(igt_display_t *dpy, igt_display_commit2(dpy, dpy->is_atomic ? COMMIT_ATOMIC : COMMIT_LEGACY); - t = igt_spin_batch_new(dpy->drm_fd, 0, ring, fb.gem_handle); + t = igt_spin_batch_new(dpy->drm_fd, + .engine = ring, + .dependency = fb.gem_handle); do_or_die(drmModePageFlip(dpy->drm_fd, dpy->pipes[pipe].crtc_id, fb.fb_id, DRM_MODE_PAGE_FLIP_EVENT, &fb)); diff --git a/tests/kms_cursor_legacy.c b/tests/kms_cursor_legacy.c index d0a28b3c..85340d43 100644 --- a/tests/kms_cursor_legacy.c +++ b/tests/kms_cursor_legacy.c @@ -532,7 +532,8 @@ static void basic_flip_cursor(igt_display_t *display, spin = NULL; if (flags & BASIC_BUSY) - spin = igt_spin_batch_new(display->drm_fd, 0, 0, fb_info.gem_handle); + spin = igt_spin_batch_new(display->drm_fd, + .dependency = fb_info.gem_handle); /* Start with a synchronous query to align with the vblank */ vblank_start = get_vblank(display->drm_fd, pipe, DRM_VBLANK_NEXTONMISS); @@ -1323,8 +1324,8 @@ static void flip_vs_cursor_busy_crc(igt_display_t *display, bool atomic) for (int i = 1; i >= 0; i--) { igt_spin_t *spin; - spin = igt_spin_batch_new(display->drm_fd, 0, 0, - fb_info[1].gem_handle); + spin = igt_spin_batch_new(display->drm_fd, + .dependency = fb_info[1].gem_handle); vblank_start = get_vblank(display->drm_fd, pipe, DRM_VBLANK_NEXTONMISS); diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c index 4570f926..a1d36ac4 100644 --- a/tests/perf_pmu.c +++ b/tests/perf_pmu.c @@ -172,10 +172,15 @@ static unsigned int e2ring(int gem_fd, const struct intel_execution_engine2 *e) static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags) { + struct igt_spin_factory opts = { + .ctx = ctx, + .engine = flags, + }; + if (gem_can_store_dword(fd, flags)) - return __igt_spin_batch_new_poll(fd, ctx, flags); - else - return __igt_spin_batch_new(fd, ctx, flags, 0); + opts.flags |= IGT_SPIN_POLL_RUN; + + return __igt_spin_batch_factory(fd, &opts); } static unsigned long __spin_wait(int fd, igt_spin_t *spin) @@ -356,7 +361,9 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e) */ spin[0] = __spin_sync(gem_fd, 0, e2ring(gem_fd, e)); usleep(500e3); - spin[1] = __igt_spin_batch_new(gem_fd, ctx, e2ring(gem_fd, e), 0); + spin[1] = __igt_spin_batch_new(gem_fd, + .ctx = ctx, + .engine = e2ring(gem_fd, e)); /* * Open PMU as fast as possible after the second spin batch in attempt @@ -1045,8 +1052,8 @@ static void cpu_hotplug(int gem_fd) * Create two spinners so test can ensure shorter gaps in engine * busyness as it is terminating one and re-starting the other. */ - spin[0] = igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0); - spin[1] = __igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0); + spin[0] = igt_spin_batch_new(gem_fd, .engine = I915_EXEC_RENDER); + spin[1] = __igt_spin_batch_new(gem_fd, .engine = I915_EXEC_RENDER); val = __pmu_read_single(fd, &ts[0]); @@ -1129,8 +1136,8 @@ static void cpu_hotplug(int gem_fd) break; igt_spin_batch_free(gem_fd, spin[cur]); - spin[cur] = __igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, - 0); + spin[cur] = __igt_spin_batch_new(gem_fd, + .engine = I915_EXEC_RENDER); cur ^= 1; } @@ -1167,8 +1174,9 @@ test_interrupts(int gem_fd) /* Queue spinning batches. */ for (int i = 0; i < target; i++) { - spin[i] = __igt_spin_batch_new_fence(gem_fd, - 0, I915_EXEC_RENDER); + spin[i] = __igt_spin_batch_new(gem_fd, + .engine = I915_EXEC_RENDER, + .flags = IGT_SPIN_FENCE_OUT); if (i == 0) { fence_fd = spin[i]->out_fence; } else { @@ -1229,7 +1237,8 @@ test_interrupts_sync(int gem_fd) /* Queue spinning batches. */ for (int i = 0; i < target; i++) - spin[i] = __igt_spin_batch_new_fence(gem_fd, 0, 0); + spin[i] = __igt_spin_batch_new(gem_fd, + .flags = IGT_SPIN_FENCE_OUT); /* Wait for idle state. */ idle = pmu_read_single(fd); @@ -1550,7 +1559,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e, igt_spin_t *spin; /* Allocate our spin batch and idle it. */ - spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0); + spin = igt_spin_batch_new(gem_fd, .engine = e2ring(gem_fd, e)); igt_spin_batch_end(spin); gem_sync(gem_fd, spin->handle); diff --git a/tests/pm_rps.c b/tests/pm_rps.c index 006d084b..202132b1 100644 --- a/tests/pm_rps.c +++ b/tests/pm_rps.c @@ -235,9 +235,9 @@ static void load_helper_run(enum load load) igt_debug("Applying %s load...\n", lh.load ? "high" : "low"); - spin[0] = __igt_spin_batch_new(drm_fd, 0, 0, 0); + spin[0] = __igt_spin_batch_new(drm_fd); if (lh.load == HIGH) - spin[1] = __igt_spin_batch_new(drm_fd, 0, 0, 0); + spin[1] = __igt_spin_batch_new(drm_fd); while (!lh.exit) { handle = spin[0]->handle; igt_spin_batch_end(spin[0]); @@ -248,8 +248,7 @@ static void load_helper_run(enum load load) usleep(100); spin[0] = spin[1]; - spin[lh.load == HIGH] = - __igt_spin_batch_new(drm_fd, 0, 0, 0); + spin[lh.load == HIGH] = __igt_spin_batch_new(drm_fd); } handle = spin[0]->handle; @@ -510,7 +509,7 @@ static void boost_freq(int fd, int *boost_freqs) int64_t timeout = 1; igt_spin_t *load; - load = igt_spin_batch_new(fd, 0, 0, 0); + load = igt_spin_batch_new(fd); resubmit_batch(fd, load->handle, 16); /* Waiting will grant us a boost to maximum */ |