summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>2018-04-03 12:36:44 +0100
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>2018-04-03 16:39:57 +0100
commitcad5fc06f954546042a432202cbe7e5a20fe1132 (patch)
treeb7864de000943cd596a862062ecf302c1266a099
parent3df3dcf6fca6174c9dad49f37f019ce50aaf2885 (diff)
tests/gem_eio: Add reset and unwedge stress testing
Reset and unwedge stress testing is supposed to trigger wedging or resets at incovenient times and then re-use the context so either the context or driver tracking might get confused and break. v2: * Renamed for more sensible naming. * Added some comments to explain what the test is doing. (Chris Wilson) Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--tests/gem_eio.c74
1 files changed, 74 insertions, 0 deletions
diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index b7c5047f..9599e73d 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -591,6 +591,74 @@ static void test_inflight_internal(int fd, unsigned int wait)
close(fd);
}
+/*
+ * Verify that we can submit and execute work after unwedging the GPU.
+ */
+static void test_reset_stress(int fd, unsigned int flags)
+{
+ uint32_t ctx0 = gem_context_create(fd);
+
+ igt_until_timeout(5) {
+ struct drm_i915_gem_execbuffer2 execbuf = { };
+ struct drm_i915_gem_exec_object2 obj = { };
+ uint32_t bbe = MI_BATCH_BUFFER_END;
+ igt_spin_t *hang;
+ unsigned int i;
+ uint32_t ctx;
+
+ gem_quiescent_gpu(fd);
+
+ igt_require(i915_reset_control(flags & TEST_WEDGE ?
+ false : true));
+
+ ctx = context_create_safe(fd);
+
+ /*
+ * Start executing a spin batch with some queued batches
+ * against a different context after it.
+ */
+ hang = spin_sync(fd, ctx0, 0);
+
+ obj.handle = gem_create(fd, 4096);
+ gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+ execbuf.buffers_ptr = to_user_pointer(&obj);
+ execbuf.buffer_count = 1;
+ execbuf.rsvd1 = ctx0;
+
+ for (i = 0; i < 10; i++)
+ gem_execbuf(fd, &execbuf);
+
+ /* Wedge after a small delay. */
+ igt_assert_eq(__check_wait(fd, obj.handle, 100e3), 0);
+
+ /* Unwedge by forcing a reset. */
+ igt_assert(i915_reset_control(true));
+ trigger_reset(fd);
+
+ gem_quiescent_gpu(fd);
+
+ /*
+ * Verify that we are able to submit work after unwedging from
+ * both contexts.
+ */
+ execbuf.rsvd1 = ctx;
+ for (i = 0; i < 5; i++)
+ gem_execbuf(fd, &execbuf);
+
+ execbuf.rsvd1 = ctx0;
+ for (i = 0; i < 5; i++)
+ gem_execbuf(fd, &execbuf);
+
+ gem_sync(fd, obj.handle);
+ igt_spin_batch_free(fd, hang);
+ gem_context_destroy(fd, ctx);
+ gem_close(fd, obj.handle);
+ }
+
+ gem_context_destroy(fd, ctx0);
+}
+
static int fd = -1;
static void
@@ -635,6 +703,12 @@ igt_main
igt_subtest("in-flight-suspend")
test_inflight_suspend(fd);
+ igt_subtest("reset-stress")
+ test_reset_stress(fd, 0);
+
+ igt_subtest("unwedge-stress")
+ test_reset_stress(fd, TEST_WEDGE);
+
igt_subtest_group {
const struct {
unsigned int wait;