summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/selftests/i915_gem_context.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/i915_gem_context.c')
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c200
1 files changed, 149 insertions, 51 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7eb58a9d1319..4399ef9ebf15 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -556,7 +556,7 @@ static int igt_ctx_exec(void *arg)
ncontexts++;
}
pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
- ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords);
+ ncontexts, RUNTIME_INFO(i915)->num_engines, ndwords);
dw = 0;
list_for_each_entry(obj, &objects, st_link) {
@@ -923,7 +923,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
unsigned int flags)
{
struct intel_sseu default_sseu = intel_device_default_sseu(i915);
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
struct intel_sseu pg_sseu;
@@ -967,6 +967,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
ret = PTR_ERR(ctx);
goto out_unlock;
}
+ i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -1125,7 +1126,7 @@ static int igt_ctx_readonly(void *arg)
}
}
pr_info("Submitted %lu dwords (across %u engines)\n",
- ndwords, RUNTIME_INFO(i915)->num_rings);
+ ndwords, RUNTIME_INFO(i915)->num_engines);
dw = 0;
list_for_each_entry(obj, &objects, st_link) {
@@ -1432,7 +1433,7 @@ static int igt_vm_isolation(void *arg)
div64_u64_rem(i915_prandom_u64_state(&prng),
vm_total, &offset);
- offset &= ~sizeof(u32);
+ offset &= -sizeof(u32);
offset += I915_GTT_PAGE_SIZE;
err = write_to_scratch(ctx_a, engine,
@@ -1458,7 +1459,7 @@ static int igt_vm_isolation(void *arg)
count += this;
}
pr_info("Checked %lu scratch offsets across %d engines\n",
- count, RUNTIME_INFO(i915)->num_rings);
+ count, RUNTIME_INFO(i915)->num_engines);
out_rpm:
intel_runtime_pm_put(i915, wakeref);
@@ -1492,63 +1493,56 @@ static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
{
struct intel_engine_cs *engine;
unsigned int tmp;
- int err;
+ int pass;
GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
- for_each_engine_masked(engine, i915, engines, tmp) {
- struct i915_request *rq;
+ for (pass = 0; pass < 4; pass++) { /* Once busy; once idle; repeat */
+ bool from_idle = pass & 1;
+ int err;
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ if (!from_idle) {
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ struct i915_request *rq;
- i915_request_add(rq);
- }
-
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- return err;
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (!engine_has_kernel_context_barrier(engine)) {
- pr_err("kernel context not last on engine %s!\n",
- engine->name);
- return -EINVAL;
+ i915_request_add(rq);
+ }
}
- }
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- return err;
+ err = i915_gem_switch_to_kernel_context(i915,
+ i915->gt.active_engines);
+ if (err)
+ return err;
+
+ if (!from_idle) {
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+ }
- GEM_BUG_ON(i915->gt.active_requests);
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (engine->last_retired_context->gem_context != i915->kernel_context) {
- pr_err("engine %s not idling in kernel context!\n",
- engine->name);
+ if (i915->gt.active_requests) {
+ pr_err("%d active requests remain after switching to kernel context, pass %d (%s) on %s engine%s\n",
+ i915->gt.active_requests,
+ pass, from_idle ? "idle" : "busy",
+ __engine_name(i915, engines),
+ is_power_of_2(engines) ? "" : "s");
return -EINVAL;
}
- }
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- return err;
+ /* XXX Bonus points for proving we are the kernel context! */
- if (i915->gt.active_requests) {
- pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
- i915->gt.active_requests);
- return -EINVAL;
+ mutex_unlock(&i915->drm.struct_mutex);
+ drain_delayed_work(&i915->gt.idle_work);
+ mutex_lock(&i915->drm.struct_mutex);
}
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (!intel_engine_has_kernel_context(engine)) {
- pr_err("kernel context not last on engine %s!\n",
- engine->name);
- return -EINVAL;
- }
- }
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ return -EIO;
return 0;
}
@@ -1592,8 +1586,6 @@ static int igt_switch_to_kernel_context(void *arg)
out_unlock:
GEM_TRACE_DUMP_ON(err);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
@@ -1602,10 +1594,116 @@ out_unlock:
return err;
}
+static void mock_barrier_task(void *data)
+{
+ unsigned int *counter = data;
+
+ ++*counter;
+}
+
+static int mock_context_barrier(void *arg)
+{
+#undef pr_fmt
+#define pr_fmt(x) "context_barrier_task():" # x
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ unsigned int counter;
+ int err;
+
+ /*
+ * The context barrier provides us with a callback after it emits
+ * a request; useful for retiring old state after loading new.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ctx = mock_context(i915, "mock");
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto unlock;
+ }
+
+ counter = 0;
+ err = context_barrier_task(ctx, 0, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ if (counter == 0) {
+ pr_err("Did not retire immediately with 0 engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ counter = 0;
+ err = context_barrier_task(ctx,
+ ALL_ENGINES, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ if (counter == 0) {
+ pr_err("Did not retire immediately for all inactive engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(i915, wakeref)
+ rq = i915_request_alloc(i915->engine[RCS0], ctx);
+ if (IS_ERR(rq)) {
+ pr_err("Request allocation failed!\n");
+ goto out;
+ }
+ i915_request_add(rq);
+ GEM_BUG_ON(list_empty(&ctx->active_engines));
+
+ counter = 0;
+ context_barrier_inject_fault = BIT(RCS0);
+ err = context_barrier_task(ctx,
+ ALL_ENGINES, mock_barrier_task, &counter);
+ context_barrier_inject_fault = 0;
+ if (err == -ENXIO)
+ err = 0;
+ else
+ pr_err("Did not hit fault injection!\n");
+ if (counter != 0) {
+ pr_err("Invoked callback on error!\n");
+ err = -EIO;
+ }
+ if (err)
+ goto out;
+
+ counter = 0;
+ err = context_barrier_task(ctx,
+ ALL_ENGINES, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ mock_device_flush(i915);
+ if (counter == 0) {
+ pr_err("Did not retire on each active engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ mock_context_close(ctx);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+#undef pr_fmt
+#define pr_fmt(x) x
+}
+
int i915_gem_context_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_switch_to_kernel_context),
+ SUBTEST(mock_context_barrier),
};
struct drm_i915_private *i915;
int err;
@@ -1631,7 +1729,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(igt_vm_isolation),
};
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(dev_priv))
return 0;
return i915_subtests(tests, dev_priv);