diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
33 files changed, 2901 insertions, 1143 deletions
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 26c065c8d2c0..a9a2fa35876f 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -972,7 +972,6 @@ static int gpu_write(struct i915_vma *vma, { struct i915_request *rq; struct i915_vma *batch; - int flags = 0; int err; GEM_BUG_ON(!intel_engine_can_store_dword(engine)); @@ -981,14 +980,14 @@ static int gpu_write(struct i915_vma *vma, if (err) return err; - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) - return PTR_ERR(rq); - batch = gpu_write_dw(vma, dword * sizeof(u32), value); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto err_request; + if (IS_ERR(batch)) + return PTR_ERR(batch); + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_batch; } err = i915_vma_move_to_active(batch, rq, 0); @@ -996,21 +995,21 @@ static int gpu_write(struct i915_vma *vma, goto err_request; i915_gem_object_set_active_reference(batch->obj); - i915_vma_unpin(batch); - i915_vma_close(batch); - err = engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - flags); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); if (err) goto err_request; - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + 0); +err_request: if (err) i915_request_skip(rq, err); - -err_request: i915_request_add(rq); +err_batch: + i915_vma_unpin(batch); + i915_vma_close(batch); return err; } @@ -1450,7 +1449,7 @@ static int igt_ppgtt_pin_update(void *arg) * huge-gtt-pages. */ - if (!HAS_FULL_48BIT_PPGTT(dev_priv)) { + if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) { pr_info("48b PPGTT not supported, skipping\n"); return 0; } @@ -1703,7 +1702,6 @@ int i915_gem_huge_page_mock_selftests(void) }; struct drm_i915_private *dev_priv; struct i915_hw_ppgtt *ppgtt; - struct pci_dev *pdev; int err; dev_priv = mock_gem_device(); @@ -1713,9 +1711,6 @@ int i915_gem_huge_page_mock_selftests(void) /* Pretend to be a device which supports the 48b PPGTT */ mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL; - pdev = dev_priv->drm.pdev; - dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39)); - mutex_lock(&dev_priv->drm.struct_mutex); ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV)); if (IS_ERR(ppgtt)) { @@ -1761,6 +1756,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) }; struct drm_file *file; struct i915_gem_context *ctx; + intel_wakeref_t wakeref; int err; if (!HAS_PPGTT(dev_priv)) { @@ -1776,7 +1772,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) return PTR_ERR(file); mutex_lock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); ctx = live_context(dev_priv, file); if (IS_ERR(ctx)) { @@ -1790,7 +1786,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) err = i915_subtests(tests, ctx); out_unlock: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); mock_file_free(dev_priv, file); diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c new file mode 100644 index 000000000000..337b1f98b923 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -0,0 +1,157 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include "../i915_selftest.h" + +#include "igt_flush_test.h" +#include "lib_sw_fence.h" + +struct live_active { + struct i915_active base; + bool retired; +}; + +static void __live_active_retire(struct i915_active *base) +{ + struct live_active *active = container_of(base, typeof(*active), base); + + active->retired = true; +} + +static int __live_active_setup(struct drm_i915_private *i915, + struct live_active *active) +{ + struct intel_engine_cs *engine; + struct i915_sw_fence *submit; + enum intel_engine_id id; + unsigned int count = 0; + int err = 0; + + submit = heap_fence_create(GFP_KERNEL); + if (!submit) + return -ENOMEM; + + i915_active_init(i915, &active->base, __live_active_retire); + active->retired = false; + + if (!i915_active_acquire(&active->base)) { + pr_err("First i915_active_acquire should report being idle\n"); + err = -EINVAL; + goto out; + } + + for_each_engine(engine, i915, id) { + struct i915_request *rq; + + rq = i915_request_alloc(engine, i915->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, + submit, + GFP_KERNEL); + if (err >= 0) + err = i915_active_ref(&active->base, + rq->fence.context, rq); + i915_request_add(rq); + if (err) { + pr_err("Failed to track active ref!\n"); + break; + } + + count++; + } + + i915_active_release(&active->base); + if (active->retired && count) { + pr_err("i915_active retired before submission!\n"); + err = -EINVAL; + } + if (active->base.count != count) { + pr_err("i915_active not tracking all requests, found %d, expected %d\n", + active->base.count, count); + err = -EINVAL; + } + +out: + i915_sw_fence_commit(submit); + heap_fence_put(submit); + + return err; +} + +static int live_active_wait(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct live_active active; + intel_wakeref_t wakeref; + int err; + + /* Check that we get a callback when requests retire upon waiting */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + err = __live_active_setup(i915, &active); + + i915_active_wait(&active.base); + if (!active.retired) { + pr_err("i915_active not retired after waiting!\n"); + err = -EINVAL; + } + + i915_active_fini(&active.base); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + return err; +} + +static int live_active_retire(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct live_active active; + intel_wakeref_t wakeref; + int err; + + /* Check that we get a callback when requests are indirectly retired */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + err = __live_active_setup(i915, &active); + + /* waits for & retires all requests */ + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + if (!active.retired) { + pr_err("i915_active not retired after flushing!\n"); + err = -EINVAL; + } + + i915_active_fini(&active.base); + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + return err; +} + +int i915_active_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_active_wait), + SUBTEST(live_active_retire), + }; + + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index d0aa19d17653..e77b7ed449ae 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -16,9 +16,10 @@ static int switch_to_context(struct drm_i915_private *i915, { struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = 0; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); for_each_engine(engine, i915, id) { struct i915_request *rq; @@ -32,7 +33,7 @@ static int switch_to_context(struct drm_i915_private *i915, i915_request_add(rq); } - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); return err; } @@ -65,7 +66,9 @@ static void trash_stolen(struct drm_i915_private *i915) static void simulate_hibernate(struct drm_i915_private *i915) { - intel_runtime_pm_get(i915); + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(i915); /* * As a final sting in the tail, invalidate stolen. Under a real S4, @@ -76,7 +79,7 @@ static void simulate_hibernate(struct drm_i915_private *i915) */ trash_stolen(i915); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); } static int pm_prepare(struct drm_i915_private *i915) @@ -93,39 +96,39 @@ static int pm_prepare(struct drm_i915_private *i915) static void pm_suspend(struct drm_i915_private *i915) { - intel_runtime_pm_get(i915); - - i915_gem_suspend_gtt_mappings(i915); - i915_gem_suspend_late(i915); + intel_wakeref_t wakeref; - intel_runtime_pm_put(i915); + with_intel_runtime_pm(i915, wakeref) { + i915_gem_suspend_gtt_mappings(i915); + i915_gem_suspend_late(i915); + } } static void pm_hibernate(struct drm_i915_private *i915) { - intel_runtime_pm_get(i915); + intel_wakeref_t wakeref; - i915_gem_suspend_gtt_mappings(i915); + with_intel_runtime_pm(i915, wakeref) { + i915_gem_suspend_gtt_mappings(i915); - i915_gem_freeze(i915); - i915_gem_freeze_late(i915); - - intel_runtime_pm_put(i915); + i915_gem_freeze(i915); + i915_gem_freeze_late(i915); + } } static void pm_resume(struct drm_i915_private *i915) { + intel_wakeref_t wakeref; + /* * Both suspend and hibernate follow the same wakeup path and assume * that runtime-pm just works. */ - intel_runtime_pm_get(i915); - - intel_engines_sanitize(i915); - i915_gem_sanitize(i915); - i915_gem_resume(i915); - - intel_runtime_pm_put(i915); + with_intel_runtime_pm(i915, wakeref) { + intel_engines_sanitize(i915, false); + i915_gem_sanitize(i915); + i915_gem_resume(i915); + } } static int igt_gem_suspend(void *arg) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index f7392c1ffe75..fd89a5a33c1a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -279,6 +279,7 @@ static int igt_gem_coherency(void *arg) struct drm_i915_private *i915 = arg; const struct igt_coherency_mode *read, *write, *over; struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; unsigned long count, n; u32 *offsets, *values; int err = 0; @@ -298,7 +299,7 @@ static int igt_gem_coherency(void *arg) values = offsets + ncachelines; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); for (over = igt_coherency_mode; over->name; over++) { if (!over->set) continue; @@ -376,7 +377,7 @@ static int igt_gem_coherency(void *arg) } } unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); kfree(offsets); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 7d82043aff10..d00d0bb07784 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -24,9 +24,13 @@ #include <linux/prime_numbers.h> +#include "../i915_reset.h" #include "../i915_selftest.h" #include "i915_random.h" #include "igt_flush_test.h" +#include "igt_live_test.h" +#include "igt_reset.h" +#include "igt_spinner.h" #include "mock_drm.h" #include "mock_gem_device.h" @@ -34,84 +38,6 @@ #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32)) -struct live_test { - struct drm_i915_private *i915; - const char *func; - const char *name; - - unsigned int reset_global; - unsigned int reset_engine[I915_NUM_ENGINES]; -}; - -static int begin_live_test(struct live_test *t, - struct drm_i915_private *i915, - const char *func, - const char *name) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err; - - t->i915 = i915; - t->func = func; - t->name = name; - - err = i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (err) { - pr_err("%s(%s): failed to idle before, with err=%d!", - func, name, err); - return err; - } - - i915->gpu_error.missed_irq_rings = 0; - t->reset_global = i915_reset_count(&i915->gpu_error); - - for_each_engine(engine, i915, id) - t->reset_engine[id] = - i915_reset_engine_count(&i915->gpu_error, engine); - - return 0; -} - -static int end_live_test(struct live_test *t) -{ - struct drm_i915_private *i915 = t->i915; - struct intel_engine_cs *engine; - enum intel_engine_id id; - - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - return -EIO; - - if (t->reset_global != i915_reset_count(&i915->gpu_error)) { - pr_err("%s(%s): GPU was reset %d times!\n", - t->func, t->name, - i915_reset_count(&i915->gpu_error) - t->reset_global); - return -EIO; - } - - for_each_engine(engine, i915, id) { - if (t->reset_engine[id] == - i915_reset_engine_count(&i915->gpu_error, engine)) - continue; - - pr_err("%s(%s): engine '%s' was reset %d times!\n", - t->func, t->name, engine->name, - i915_reset_engine_count(&i915->gpu_error, engine) - - t->reset_engine[id]); - return -EIO; - } - - if (i915->gpu_error.missed_irq_rings) { - pr_err("%s(%s): Missed interrupts on engines %lx\n", - t->func, t->name, i915->gpu_error.missed_irq_rings); - return -EIO; - } - - return 0; -} - static int live_nop_switch(void *arg) { const unsigned int nctx = 1024; @@ -119,8 +45,9 @@ static int live_nop_switch(void *arg) struct intel_engine_cs *engine; struct i915_gem_context **ctx; enum intel_engine_id id; + intel_wakeref_t wakeref; + struct igt_live_test t; struct drm_file *file; - struct live_test t; unsigned long n; int err = -ENODEV; @@ -140,7 +67,7 @@ static int live_nop_switch(void *arg) return PTR_ERR(file); mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); if (!ctx) { @@ -184,7 +111,7 @@ static int live_nop_switch(void *arg) pr_info("Populated %d contexts on %s in %lluns\n", nctx, engine->name, ktime_to_ns(times[1] - times[0])); - err = begin_live_test(&t, i915, __func__, engine->name); + err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) goto out_unlock; @@ -232,7 +159,7 @@ static int live_nop_switch(void *arg) break; } - err = end_live_test(&t); + err = igt_live_test_end(&t); if (err) goto out_unlock; @@ -243,7 +170,7 @@ static int live_nop_switch(void *arg) } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); return err; @@ -553,10 +480,10 @@ static int igt_ctx_exec(void *arg) struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj = NULL; unsigned long ncontexts, ndwords, dw; + struct igt_live_test t; struct drm_file *file; IGT_TIMEOUT(end_time); LIST_HEAD(objects); - struct live_test t; int err = -ENODEV; /* @@ -574,7 +501,7 @@ static int igt_ctx_exec(void *arg) mutex_lock(&i915->drm.struct_mutex); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; @@ -593,6 +520,8 @@ static int igt_ctx_exec(void *arg) } for_each_engine(engine, i915, id) { + intel_wakeref_t wakeref; + if (!engine->context_size) continue; /* No logical context support in HW */ @@ -607,9 +536,9 @@ static int igt_ctx_exec(void *arg) } } - intel_runtime_pm_get(i915); - err = gpu_fill(obj, ctx, engine, dw); - intel_runtime_pm_put(i915); + err = 0; + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -627,7 +556,7 @@ static int igt_ctx_exec(void *arg) ncontexts++; } pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n", - ncontexts, INTEL_INFO(i915)->num_rings, ndwords); + ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords); dw = 0; list_for_each_entry(obj, &objects, st_link) { @@ -642,7 +571,7 @@ static int igt_ctx_exec(void *arg) } out_unlock: - if (end_live_test(&t)) + if (igt_live_test_end(&t)) err = -EIO; mutex_unlock(&i915->drm.struct_mutex); @@ -650,6 +579,469 @@ out_unlock: return err; } +static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) +{ + struct drm_i915_gem_object *obj; + u32 *cmd; + int err; + + if (INTEL_GEN(vma->vm->i915) < 8) + return ERR_PTR(-EINVAL); + + obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto err; + } + + *cmd++ = MI_STORE_REGISTER_MEM_GEN8; + *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE); + *cmd++ = lower_32_bits(vma->node.start); + *cmd++ = upper_32_bits(vma->node.start); + *cmd = MI_BATCH_BUFFER_END; + + i915_gem_object_unpin_map(obj); + + err = i915_gem_object_set_to_gtt_domain(obj, false); + if (err) + goto err; + + vma = i915_vma_instance(obj, vma->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err; + + return vma; + +err: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static int +emit_rpcs_query(struct drm_i915_gem_object *obj, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct i915_request **rq_out) +{ + struct i915_request *rq; + struct i915_vma *batch; + struct i915_vma *vma; + int err; + + GEM_BUG_ON(!intel_engine_can_store_dword(engine)); + + vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_gem_object_set_to_gtt_domain(obj, false); + if (err) + return err; + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + return err; + + batch = rpcs_query_batch(vma); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto err_vma; + } + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_batch; + } + + err = engine->emit_bb_start(rq, batch->node.start, batch->node.size, 0); + if (err) + goto err_request; + + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto skip_request; + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; + + i915_gem_object_set_active_reference(batch->obj); + i915_vma_unpin(batch); + i915_vma_close(batch); + + i915_vma_unpin(vma); + + *rq_out = i915_request_get(rq); + + i915_request_add(rq); + + return 0; + +skip_request: + i915_request_skip(rq, err); +err_request: + i915_request_add(rq); +err_batch: + i915_vma_unpin(batch); +err_vma: + i915_vma_unpin(vma); + + return err; +} + +#define TEST_IDLE BIT(0) +#define TEST_BUSY BIT(1) +#define TEST_RESET BIT(2) + +static int +__sseu_prepare(struct drm_i915_private *i915, + const char *name, + unsigned int flags, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct igt_spinner **spin_out) +{ + int ret = 0; + + if (flags & (TEST_BUSY | TEST_RESET)) { + struct igt_spinner *spin; + struct i915_request *rq; + + spin = kzalloc(sizeof(*spin), GFP_KERNEL); + if (!spin) { + ret = -ENOMEM; + goto out; + } + + ret = igt_spinner_init(spin, i915); + if (ret) + return ret; + + rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + igt_spinner_fini(spin); + kfree(spin); + goto out; + } + + i915_request_add(rq); + + if (!igt_wait_for_spinner(spin, rq)) { + pr_err("%s: Spinner failed to start!\n", name); + igt_spinner_end(spin); + igt_spinner_fini(spin); + kfree(spin); + ret = -ETIMEDOUT; + goto out; + } + + *spin_out = spin; + } + +out: + return ret; +} + +static int +__read_slice_count(struct drm_i915_private *i915, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct drm_i915_gem_object *obj, + struct igt_spinner *spin, + u32 *rpcs) +{ + struct i915_request *rq = NULL; + u32 s_mask, s_shift; + unsigned int cnt; + u32 *buf, val; + long ret; + + ret = emit_rpcs_query(obj, ctx, engine, &rq); + if (ret) + return ret; + + if (spin) + igt_spinner_end(spin); + + ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + if (ret < 0) + return ret; + + buf = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + return ret; + } + + if (INTEL_GEN(i915) >= 11) { + s_mask = GEN11_RPCS_S_CNT_MASK; + s_shift = GEN11_RPCS_S_CNT_SHIFT; + } else { + s_mask = GEN8_RPCS_S_CNT_MASK; + s_shift = GEN8_RPCS_S_CNT_SHIFT; + } + + val = *buf; + cnt = (val & s_mask) >> s_shift; + *rpcs = val; + + i915_gem_object_unpin_map(obj); + + return cnt; +} + +static int +__check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected, + const char *prefix, const char *suffix) +{ + if (slices == expected) + return 0; + + if (slices < 0) { + pr_err("%s: %s read slice count failed with %d%s\n", + name, prefix, slices, suffix); + return slices; + } + + pr_err("%s: %s slice count %d is not %u%s\n", + name, prefix, slices, expected, suffix); + + pr_info("RPCS=0x%x; %u%sx%u%s\n", + rpcs, slices, + (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "", + (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT, + (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : ""); + + return -EINVAL; +} + +static int +__sseu_finish(struct drm_i915_private *i915, + const char *name, + unsigned int flags, + struct i915_gem_context *ctx, + struct i915_gem_context *kctx, + struct intel_engine_cs *engine, + struct drm_i915_gem_object *obj, + unsigned int expected, + struct igt_spinner *spin) +{ + unsigned int slices = + hweight32(intel_device_default_sseu(i915).slice_mask); + u32 rpcs = 0; + int ret = 0; + + if (flags & TEST_RESET) { + ret = i915_reset_engine(engine, "sseu"); + if (ret) + goto out; + } + + ret = __read_slice_count(i915, ctx, engine, obj, + flags & TEST_RESET ? NULL : spin, &rpcs); + ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!"); + if (ret) + goto out; + + ret = __read_slice_count(i915, kctx, engine, obj, NULL, &rpcs); + ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!"); + +out: + if (spin) + igt_spinner_end(spin); + + if ((flags & TEST_IDLE) && ret == 0) { + ret = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (ret) + return ret; + + ret = __read_slice_count(i915, ctx, engine, obj, NULL, &rpcs); + ret = __check_rpcs(name, rpcs, ret, expected, + "Context", " after idle!"); + } + + return ret; +} + +static int +__sseu_test(struct drm_i915_private *i915, + const char *name, + unsigned int flags, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct drm_i915_gem_object *obj, + struct intel_sseu sseu) +{ + struct igt_spinner *spin = NULL; + struct i915_gem_context *kctx; + int ret; + + kctx = kernel_context(i915); + if (IS_ERR(kctx)) + return PTR_ERR(kctx); + + ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin); + if (ret) + goto out; + + ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu); + if (ret) + goto out; + + ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj, + hweight32(sseu.slice_mask), spin); + +out: + if (spin) { + igt_spinner_end(spin); + igt_spinner_fini(spin); + kfree(spin); + } + + kernel_context_close(kctx); + + return ret; +} + +static int +__igt_ctx_sseu(struct drm_i915_private *i915, + const char *name, + unsigned int flags) +{ + struct intel_sseu default_sseu = intel_device_default_sseu(i915); + struct intel_engine_cs *engine = i915->engine[RCS]; + struct drm_i915_gem_object *obj; + struct i915_gem_context *ctx; + struct intel_sseu pg_sseu; + intel_wakeref_t wakeref; + struct drm_file *file; + int ret; + + if (INTEL_GEN(i915) < 9) + return 0; + + if (!RUNTIME_INFO(i915)->sseu.has_slice_pg) + return 0; + + if (hweight32(default_sseu.slice_mask) < 2) + return 0; + + /* + * Gen11 VME friendly power-gated configuration with half enabled + * sub-slices. + */ + pg_sseu = default_sseu; + pg_sseu.slice_mask = 1; + pg_sseu.subslice_mask = + ~(~0 << (hweight32(default_sseu.subslice_mask) / 2)); + + pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n", + name, flags, hweight32(default_sseu.slice_mask), + hweight32(pg_sseu.slice_mask)); + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + if (flags & TEST_RESET) + igt_global_reset_lock(i915); + + mutex_lock(&i915->drm.struct_mutex); + + ctx = i915_gem_create_context(i915, file->driver_priv); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto out_unlock; + } + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); + goto out_unlock; + } + + wakeref = intel_runtime_pm_get(i915); + + /* First set the default mask. */ + ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu); + if (ret) + goto out_fail; + + /* Then set a power-gated configuration. */ + ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu); + if (ret) + goto out_fail; + + /* Back to defaults. */ + ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu); + if (ret) + goto out_fail; + + /* One last power-gated configuration for the road. */ + ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu); + if (ret) + goto out_fail; + +out_fail: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + ret = -EIO; + + i915_gem_object_put(obj); + + intel_runtime_pm_put(i915, wakeref); + +out_unlock: + mutex_unlock(&i915->drm.struct_mutex); + + if (flags & TEST_RESET) + igt_global_reset_unlock(i915); + + mock_file_free(i915, file); + + if (ret) + pr_err("%s: Failed with %d!\n", name, ret); + + return ret; +} + +static int igt_ctx_sseu(void *arg) +{ + struct { + const char *name; + unsigned int flags; + } *phase, phases[] = { + { .name = "basic", .flags = 0 }, + { .name = "idle", .flags = TEST_IDLE }, + { .name = "busy", .flags = TEST_BUSY }, + { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET }, + { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE }, + { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE }, + }; + unsigned int i; + int ret = 0; + + for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases); + i++, phase++) + ret = __igt_ctx_sseu(arg, phase->name, phase->flags); + + return ret; +} + static int igt_ctx_readonly(void *arg) { struct drm_i915_private *i915 = arg; @@ -657,11 +1049,11 @@ static int igt_ctx_readonly(void *arg) struct i915_gem_context *ctx; struct i915_hw_ppgtt *ppgtt; unsigned long ndwords, dw; + struct igt_live_test t; struct drm_file *file; I915_RND_STATE(prng); IGT_TIMEOUT(end_time); LIST_HEAD(objects); - struct live_test t; int err = -ENODEV; /* @@ -676,7 +1068,7 @@ static int igt_ctx_readonly(void *arg) mutex_lock(&i915->drm.struct_mutex); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; @@ -699,6 +1091,8 @@ static int igt_ctx_readonly(void *arg) unsigned int id; for_each_engine(engine, i915, id) { + intel_wakeref_t wakeref; + if (!intel_engine_can_store_dword(engine)) continue; @@ -713,9 +1107,9 @@ static int igt_ctx_readonly(void *arg) i915_gem_object_set_readonly(obj); } - intel_runtime_pm_get(i915); - err = gpu_fill(obj, ctx, engine, dw); - intel_runtime_pm_put(i915); + err = 0; + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -732,7 +1126,7 @@ static int igt_ctx_readonly(void *arg) } } pr_info("Submitted %lu dwords (across %u engines)\n", - ndwords, INTEL_INFO(i915)->num_rings); + ndwords, RUNTIME_INFO(i915)->num_rings); dw = 0; list_for_each_entry(obj, &objects, st_link) { @@ -752,7 +1146,7 @@ static int igt_ctx_readonly(void *arg) } out_unlock: - if (end_live_test(&t)) + if (igt_live_test_end(&t)) err = -EIO; mutex_unlock(&i915->drm.struct_mutex); @@ -976,10 +1370,11 @@ static int igt_vm_isolation(void *arg) struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_a, *ctx_b; struct intel_engine_cs *engine; + intel_wakeref_t wakeref; + struct igt_live_test t; struct drm_file *file; I915_RND_STATE(prng); unsigned long count; - struct live_test t; unsigned int id; u64 vm_total; int err; @@ -998,7 +1393,7 @@ static int igt_vm_isolation(void *arg) mutex_lock(&i915->drm.struct_mutex); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; @@ -1022,7 +1417,7 @@ static int igt_vm_isolation(void *arg) GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total); vm_total -= I915_GTT_PAGE_SIZE; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); count = 0; for_each_engine(engine, i915, id) { @@ -1064,12 +1459,12 @@ static int igt_vm_isolation(void *arg) count += this; } pr_info("Checked %lu scratch offsets across %d engines\n", - count, INTEL_INFO(i915)->num_rings); + count, RUNTIME_INFO(i915)->num_rings); out_rpm: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); out_unlock: - if (end_live_test(&t)) + if (igt_live_test_end(&t)) err = -EIO; mutex_unlock(&i915->drm.struct_mutex); @@ -1165,6 +1560,7 @@ static int igt_switch_to_kernel_context(void *arg) struct intel_engine_cs *engine; struct i915_gem_context *ctx; enum intel_engine_id id; + intel_wakeref_t wakeref; int err; /* @@ -1175,7 +1571,7 @@ static int igt_switch_to_kernel_context(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); ctx = kernel_context(i915); if (IS_ERR(ctx)) { @@ -1200,7 +1596,7 @@ out_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); kernel_context_close(ctx); @@ -1232,6 +1628,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) SUBTEST(live_nop_switch), SUBTEST(igt_ctx_exec), SUBTEST(igt_ctx_readonly), + SUBTEST(igt_ctx_sseu), SUBTEST(igt_vm_isolation), }; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 4365979d8222..32dce7176f63 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -29,11 +29,23 @@ #include "mock_drm.h" #include "mock_gem_device.h" -static int populate_ggtt(struct drm_i915_private *i915) +static void quirk_add(struct drm_i915_gem_object *obj, + struct list_head *objects) { + /* quirk is only for live tiled objects, use it to declare ownership */ + GEM_BUG_ON(obj->mm.quirked); + obj->mm.quirked = true; + list_add(&obj->st_link, objects); +} + +static int populate_ggtt(struct drm_i915_private *i915, + struct list_head *objects) +{ + unsigned long unbound, bound, count; struct drm_i915_gem_object *obj; u64 size; + count = 0; for (size = 0; size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; size += I915_GTT_PAGE_SIZE) { @@ -43,21 +55,36 @@ static int populate_ggtt(struct drm_i915_private *i915) if (IS_ERR(obj)) return PTR_ERR(obj); + quirk_add(obj, objects); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); if (IS_ERR(vma)) return PTR_ERR(vma); + + count++; } - if (!list_empty(&i915->mm.unbound_list)) { - size = 0; - list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) - size++; + unbound = 0; + list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) + if (obj->mm.quirked) + unbound++; + if (unbound) { + pr_err("%s: Found %lu objects unbound, expected %u!\n", + __func__, unbound, 0); + return -EINVAL; + } - pr_err("Found %lld objects unbound!\n", size); + bound = 0; + list_for_each_entry(obj, &i915->mm.bound_list, mm.link) + if (obj->mm.quirked) + bound++; + if (bound != count) { + pr_err("%s: Found %lu objects bound, expected %lu!\n", + __func__, bound, count); return -EINVAL; } - if (list_empty(&i915->ggtt.vm.inactive_list)) { + if (list_empty(&i915->ggtt.vm.bound_list)) { pr_err("No objects on the GGTT inactive list!\n"); return -EINVAL; } @@ -67,21 +94,26 @@ static int populate_ggtt(struct drm_i915_private *i915) static void unpin_ggtt(struct drm_i915_private *i915) { + struct i915_ggtt *ggtt = &i915->ggtt; struct i915_vma *vma; - list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link) - i915_vma_unpin(vma); + mutex_lock(&ggtt->vm.mutex); + list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link) + if (vma->obj->mm.quirked) + i915_vma_unpin(vma); + mutex_unlock(&ggtt->vm.mutex); } -static void cleanup_objects(struct drm_i915_private *i915) +static void cleanup_objects(struct drm_i915_private *i915, + struct list_head *list) { struct drm_i915_gem_object *obj, *on; - list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link) - i915_gem_object_put(obj); - - list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link) + list_for_each_entry_safe(obj, on, list, st_link) { + GEM_BUG_ON(!obj->mm.quirked); + obj->mm.quirked = false; i915_gem_object_put(obj); + } mutex_unlock(&i915->drm.struct_mutex); @@ -94,11 +126,12 @@ static int igt_evict_something(void *arg) { struct drm_i915_private *i915 = arg; struct i915_ggtt *ggtt = &i915->ggtt; + LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict one. */ - err = populate_ggtt(i915); + err = populate_ggtt(i915, &objects); if (err) goto cleanup; @@ -127,7 +160,7 @@ static int igt_evict_something(void *arg) } cleanup: - cleanup_objects(i915); + cleanup_objects(i915, &objects); return err; } @@ -136,13 +169,14 @@ static int igt_overcommit(void *arg) struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct i915_vma *vma; + LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and then try to pin one more. * We expect it to fail. */ - err = populate_ggtt(i915); + err = populate_ggtt(i915, &objects); if (err) goto cleanup; @@ -152,6 +186,8 @@ static int igt_overcommit(void *arg) goto cleanup; } + quirk_add(obj, &objects); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) { pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma)); @@ -160,7 +196,7 @@ static int igt_overcommit(void *arg) } cleanup: - cleanup_objects(i915); + cleanup_objects(i915, &objects); return err; } @@ -172,11 +208,12 @@ static int igt_evict_for_vma(void *arg) .start = 0, .size = 4096, }; + LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict a range. */ - err = populate_ggtt(i915); + err = populate_ggtt(i915, &objects); if (err) goto cleanup; @@ -199,7 +236,7 @@ static int igt_evict_for_vma(void *arg) } cleanup: - cleanup_objects(i915); + cleanup_objects(i915, &objects); return err; } @@ -222,6 +259,7 @@ static int igt_evict_for_cache_color(void *arg) }; struct drm_i915_gem_object *obj; struct i915_vma *vma; + LIST_HEAD(objects); int err; /* Currently the use of color_adjust is limited to cache domains within @@ -237,6 +275,7 @@ static int igt_evict_for_cache_color(void *arg) goto cleanup; } i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + quirk_add(obj, &objects); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, I915_GTT_PAGE_SIZE | flags); @@ -252,6 +291,7 @@ static int igt_evict_for_cache_color(void *arg) goto cleanup; } i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + quirk_add(obj, &objects); /* Neighbouring; same colour - should fit */ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, @@ -287,7 +327,7 @@ static int igt_evict_for_cache_color(void *arg) cleanup: unpin_ggtt(i915); - cleanup_objects(i915); + cleanup_objects(i915, &objects); ggtt->vm.mm.color_adjust = NULL; return err; } @@ -296,11 +336,12 @@ static int igt_evict_vm(void *arg) { struct drm_i915_private *i915 = arg; struct i915_ggtt *ggtt = &i915->ggtt; + LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict everything. */ - err = populate_ggtt(i915); + err = populate_ggtt(i915, &objects); if (err) goto cleanup; @@ -322,7 +363,7 @@ static int igt_evict_vm(void *arg) } cleanup: - cleanup_objects(i915); + cleanup_objects(i915, &objects); return err; } @@ -336,6 +377,7 @@ static int igt_evict_contexts(void *arg) struct drm_mm_node node; struct reserved *next; } *reserved = NULL; + intel_wakeref_t wakeref; struct drm_mm_node hole; unsigned long count; int err; @@ -355,7 +397,7 @@ static int igt_evict_contexts(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); /* Reserve a block so that we know we have enough to fit a few rq */ memset(&hole, 0, sizeof(hole)); @@ -400,8 +442,10 @@ static int igt_evict_contexts(void *arg) struct drm_file *file; file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); + if (IS_ERR(file)) { + err = PTR_ERR(file); + break; + } count = 0; mutex_lock(&i915->drm.struct_mutex); @@ -464,7 +508,7 @@ out_locked: } if (drm_mm_node_allocated(&hole)) drm_mm_remove_node(&hole); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -480,14 +524,17 @@ int i915_gem_evict_mock_selftests(void) SUBTEST(igt_overcommit), }; struct drm_i915_private *i915; - int err; + intel_wakeref_t wakeref; + int err = 0; i915 = mock_gem_device(); if (!i915) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, i915); + with_intel_runtime_pm(i915, wakeref) + err = i915_subtests(tests, i915); + mutex_unlock(&i915->drm.struct_mutex); drm_dev_put(&i915->drm); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index a9ed0ecc94e2..3850ef4a5ec8 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -275,6 +275,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, for (n = 0; n < count; n++) { u64 addr = hole_start + order[n] * BIT_ULL(size); + intel_wakeref_t wakeref; GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); @@ -293,9 +294,9 @@ static int lowlevel_hole(struct drm_i915_private *i915, mock_vma.node.size = BIT_ULL(size); mock_vma.node.start = addr; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); } count = n; @@ -1144,6 +1145,7 @@ static int igt_ggtt_page(void *arg) struct drm_i915_private *i915 = arg; struct i915_ggtt *ggtt = &i915->ggtt; struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; struct drm_mm_node tmp; unsigned int *order, n; int err; @@ -1169,7 +1171,7 @@ static int igt_ggtt_page(void *arg) if (err) goto out_unpin; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); for (n = 0; n < count; n++) { u64 offset = tmp.start + n * PAGE_SIZE; @@ -1216,7 +1218,7 @@ static int igt_ggtt_page(void *arg) kfree(order); out_remove: ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); drm_mm_remove_node(&tmp); out_unpin: i915_gem_object_unpin_pages(obj); @@ -1235,7 +1237,10 @@ static void track_vma_bind(struct i915_vma *vma) __i915_gem_object_pin_pages(obj); vma->pages = obj->mm.pages; - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + + mutex_lock(&vma->vm->mutex); + list_move_tail(&vma->vm_link, &vma->vm->bound_list); + mutex_unlock(&vma->vm->mutex); } static int exercise_mock(struct drm_i915_private *i915, @@ -1265,27 +1270,35 @@ static int exercise_mock(struct drm_i915_private *i915, static int igt_mock_fill(void *arg) { - return exercise_mock(arg, fill_hole); + struct i915_ggtt *ggtt = arg; + + return exercise_mock(ggtt->vm.i915, fill_hole); } static int igt_mock_walk(void *arg) { - return exercise_mock(arg, walk_hole); + struct i915_ggtt *ggtt = arg; + + return exercise_mock(ggtt->vm.i915, walk_hole); } static int igt_mock_pot(void *arg) { - return exercise_mock(arg, pot_hole); + struct i915_ggtt *ggtt = arg; + + return exercise_mock(ggtt->vm.i915, pot_hole); } static int igt_mock_drunk(void *arg) { - return exercise_mock(arg, drunk_hole); + struct i915_ggtt *ggtt = arg; + + return exercise_mock(ggtt->vm.i915, drunk_hole); } static int igt_gtt_reserve(void *arg) { - struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = arg; struct drm_i915_gem_object *obj, *on; LIST_HEAD(objects); u64 total; @@ -1298,11 +1311,12 @@ static int igt_gtt_reserve(void *arg) /* Start by filling the GGTT */ for (total = 0; - total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; - total += 2*I915_GTT_PAGE_SIZE) { + total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; + total += 2 * I915_GTT_PAGE_SIZE) { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + 2 * PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; @@ -1316,20 +1330,20 @@ static int igt_gtt_reserve(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, obj->base.size, total, obj->cache_level, 0); if (err) { pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1347,11 +1361,12 @@ static int igt_gtt_reserve(void *arg) /* Now we start forcing evictions */ for (total = I915_GTT_PAGE_SIZE; - total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; - total += 2*I915_GTT_PAGE_SIZE) { + total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; + total += 2 * I915_GTT_PAGE_SIZE) { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + 2 * PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; @@ -1365,20 +1380,20 @@ static int igt_gtt_reserve(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, obj->base.size, total, obj->cache_level, 0); if (err) { pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1399,7 +1414,7 @@ static int igt_gtt_reserve(void *arg) struct i915_vma *vma; u64 offset; - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; @@ -1411,18 +1426,18 @@ static int igt_gtt_reserve(void *arg) goto out; } - offset = random_offset(0, i915->ggtt.vm.total, + offset = random_offset(0, ggtt->vm.total, 2*I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT); - err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, obj->base.size, offset, obj->cache_level, 0); if (err) { pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1448,7 +1463,7 @@ out: static int igt_gtt_insert(void *arg) { - struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = arg; struct drm_i915_gem_object *obj, *on; struct drm_mm_node tmp = {}; const struct invalid_insert { @@ -1457,8 +1472,8 @@ static int igt_gtt_insert(void *arg) u64 start, end; } invalid_insert[] = { { - i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0, - 0, i915->ggtt.vm.total, + ggtt->vm.total + I915_GTT_PAGE_SIZE, 0, + 0, ggtt->vm.total, }, { 2*I915_GTT_PAGE_SIZE, 0, @@ -1488,7 +1503,7 @@ static int igt_gtt_insert(void *arg) /* Check a couple of obviously invalid requests */ for (ii = invalid_insert; ii->size; ii++) { - err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp, + err = i915_gem_gtt_insert(&ggtt->vm, &tmp, ii->size, ii->alignment, I915_COLOR_UNEVICTABLE, ii->start, ii->end, @@ -1503,11 +1518,12 @@ static int igt_gtt_insert(void *arg) /* Start by filling the GGTT */ for (total = 0; - total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; + total + I915_GTT_PAGE_SIZE <= ggtt->vm.total; total += I915_GTT_PAGE_SIZE) { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; @@ -1521,15 +1537,15 @@ static int igt_gtt_insert(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, obj->base.size, 0, obj->cache_level, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, 0); if (err == -ENOSPC) { /* maxed out the GGTT space */ @@ -1538,7 +1554,7 @@ static int igt_gtt_insert(void *arg) } if (err) { pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1550,7 +1566,7 @@ static int igt_gtt_insert(void *arg) list_for_each_entry(obj, &objects, st_link) { struct i915_vma *vma; - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; @@ -1570,7 +1586,7 @@ static int igt_gtt_insert(void *arg) struct i915_vma *vma; u64 offset; - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; @@ -1585,13 +1601,13 @@ static int igt_gtt_insert(void *arg) goto out; } - err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, obj->base.size, 0, obj->cache_level, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, 0); if (err) { pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1607,11 +1623,12 @@ static int igt_gtt_insert(void *arg) /* And then force evictions */ for (total = 0; - total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; - total += 2*I915_GTT_PAGE_SIZE) { + total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; + total += 2 * I915_GTT_PAGE_SIZE) { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + 2 * I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; @@ -1625,19 +1642,19 @@ static int igt_gtt_insert(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, obj->base.size, 0, obj->cache_level, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, 0); if (err) { pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1664,17 +1681,25 @@ int i915_gem_gtt_mock_selftests(void) SUBTEST(igt_gtt_insert), }; struct drm_i915_private *i915; + struct i915_ggtt ggtt; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; + mock_init_ggtt(i915, &ggtt); + mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, i915); + err = i915_subtests(tests, &ggtt); + mock_device_flush(i915); mutex_unlock(&i915->drm.struct_mutex); + i915_gem_drain_freed_objects(i915); + + mock_fini_ggtt(&ggtt); drm_dev_put(&i915->drm); + return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index c3999dd2021e..395ae878e0f7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -238,6 +238,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, u32 *cpu; GEM_BUG_ON(view.partial.size > nreal); + cond_resched(); err = i915_gem_object_set_to_gtt_domain(obj, true); if (err) { @@ -307,6 +308,7 @@ static int igt_partial_tiling(void *arg) const unsigned int nreal = 1 << 12; /* largest tile row x2 */ struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; int tiling; int err; @@ -332,7 +334,7 @@ static int igt_partial_tiling(void *arg) } mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (1) { IGT_TIMEOUT(end); @@ -443,7 +445,7 @@ next_tiling: ; } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_unpin_pages(obj); out: @@ -505,11 +507,13 @@ static void disable_retire_worker(struct drm_i915_private *i915) mutex_lock(&i915->drm.struct_mutex); if (!i915->gt.active_requests++) { - intel_runtime_pm_get(i915); - i915_gem_unpark(i915); - intel_runtime_pm_put(i915); + intel_wakeref_t wakeref; + + with_intel_runtime_pm(i915, wakeref) + i915_gem_unpark(i915); } mutex_unlock(&i915->drm.struct_mutex); + cancel_delayed_work_sync(&i915->gt.retire_work); cancel_delayed_work_sync(&i915->gt.idle_work); } @@ -577,6 +581,8 @@ static int igt_mmap_offset_exhaustion(void *arg) /* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { + intel_wakeref_t wakeref; + if (i915_terminally_wedged(&i915->gpu_error)) break; @@ -586,10 +592,10 @@ static int igt_mmap_offset_exhaustion(void *arg) goto out; } + err = 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); - err = make_obj_busy(obj); - intel_runtime_pm_put(i915); + with_intel_runtime_pm(i915, wakeref) + err = make_obj_busy(obj); mutex_unlock(&i915->drm.struct_mutex); if (err) { pr_err("[loop %d] Failed to busy the object\n", loop); diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index a15713cae3b3..6d766925ad04 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -12,7 +12,9 @@ selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */ selftest(uncore, intel_uncore_live_selftests) selftest(workarounds, intel_workarounds_live_selftests) +selftest(timelines, i915_timeline_live_selftests) selftest(requests, i915_request_live_selftests) +selftest(active, i915_active_live_selftests) selftest(objects, i915_gem_object_live_selftests) selftest(dmabuf, i915_gem_dmabuf_live_selftests) selftest(coherency, i915_gem_coherency_live_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index 1b70208eeea7..88e5ab586337 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -15,8 +15,7 @@ selftest(scatterlist, scatterlist_mock_selftests) selftest(syncmap, i915_syncmap_mock_selftests) selftest(uncore, intel_uncore_mock_selftests) selftest(engine, intel_engine_cs_mock_selftests) -selftest(breadcrumbs, intel_breadcrumbs_mock_selftests) -selftest(timelines, i915_gem_timeline_mock_selftests) +selftest(timelines, i915_timeline_mock_selftests) selftest(requests, i915_request_mock_selftests) selftest(objects, i915_gem_object_mock_selftests) selftest(dmabuf, i915_gem_dmabuf_mock_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c index 1f415ce47018..716a3f19f030 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.c +++ b/drivers/gpu/drm/i915/selftests/i915_random.c @@ -41,18 +41,37 @@ u64 i915_prandom_u64_state(struct rnd_state *rnd) return x; } -void i915_random_reorder(unsigned int *order, unsigned int count, - struct rnd_state *state) +void i915_prandom_shuffle(void *arr, size_t elsz, size_t count, + struct rnd_state *state) { - unsigned int i, j; + char stack[128]; + + if (WARN_ON(elsz > sizeof(stack) || count > U32_MAX)) + return; + + if (!elsz || !count) + return; + + /* Fisher-Yates shuffle courtesy of Knuth */ + while (--count) { + size_t swp; + + swp = i915_prandom_u32_max_state(count + 1, state); + if (swp == count) + continue; - for (i = 0; i < count; i++) { - BUILD_BUG_ON(sizeof(unsigned int) > sizeof(u32)); - j = i915_prandom_u32_max_state(count, state); - swap(order[i], order[j]); + memcpy(stack, arr + count * elsz, elsz); + memcpy(arr + count * elsz, arr + swp * elsz, elsz); + memcpy(arr + swp * elsz, stack, elsz); } } +void i915_random_reorder(unsigned int *order, unsigned int count, + struct rnd_state *state) +{ + i915_prandom_shuffle(order, sizeof(*order), count, state); +} + unsigned int *i915_random_order(unsigned int count, struct rnd_state *state) { unsigned int *order, i; diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h index 7dffedc501ca..8e1ff9c105b6 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.h +++ b/drivers/gpu/drm/i915/selftests/i915_random.h @@ -54,4 +54,7 @@ void i915_random_reorder(unsigned int *order, unsigned int count, struct rnd_state *state); +void i915_prandom_shuffle(void *arr, size_t elsz, size_t count, + struct rnd_state *state); + #endif /* !__I915_SELFTESTS_RANDOM_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 07e557815308..6733dc5b6b4c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -25,8 +25,12 @@ #include <linux/prime_numbers.h> #include "../i915_selftest.h" +#include "i915_random.h" +#include "igt_live_test.h" +#include "lib_sw_fence.h" #include "mock_context.h" +#include "mock_drm.h" #include "mock_gem_device.h" static int igt_add_request(void *arg) @@ -246,93 +250,285 @@ err_context_0: return err; } -int i915_request_mock_selftests(void) +struct smoketest { + struct intel_engine_cs *engine; + struct i915_gem_context **contexts; + atomic_long_t num_waits, num_fences; + int ncontexts, max_batch; + struct i915_request *(*request_alloc)(struct i915_gem_context *, + struct intel_engine_cs *); +}; + +static struct i915_request * +__mock_request_alloc(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) { - static const struct i915_subtest tests[] = { - SUBTEST(igt_add_request), - SUBTEST(igt_wait_request), - SUBTEST(igt_fence_wait), - SUBTEST(igt_request_rewind), - }; - struct drm_i915_private *i915; - int err; + return mock_request(engine, ctx, 0); +} - i915 = mock_gem_device(); - if (!i915) +static struct i915_request * +__live_request_alloc(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + return i915_request_alloc(engine, ctx); +} + +static int __igt_breadcrumbs_smoketest(void *arg) +{ + struct smoketest *t = arg; + struct mutex * const BKL = &t->engine->i915->drm.struct_mutex; + const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1; + const unsigned int total = 4 * t->ncontexts + 1; + unsigned int num_waits = 0, num_fences = 0; + struct i915_request **requests; + I915_RND_STATE(prng); + unsigned int *order; + int err = 0; + + /* + * A very simple test to catch the most egregious of list handling bugs. + * + * At its heart, we simply create oodles of requests running across + * multiple kthreads and enable signaling on them, for the sole purpose + * of stressing our breadcrumb handling. The only inspection we do is + * that the fences were marked as signaled. + */ + + requests = kmalloc_array(total, sizeof(*requests), GFP_KERNEL); + if (!requests) return -ENOMEM; - err = i915_subtests(tests, i915); - drm_dev_put(&i915->drm); + order = i915_random_order(total, &prng); + if (!order) { + err = -ENOMEM; + goto out_requests; + } - return err; -} + while (!kthread_should_stop()) { + struct i915_sw_fence *submit, *wait; + unsigned int n, count; -struct live_test { - struct drm_i915_private *i915; - const char *func; - const char *name; + submit = heap_fence_create(GFP_KERNEL); + if (!submit) { + err = -ENOMEM; + break; + } - unsigned int reset_count; -}; + wait = heap_fence_create(GFP_KERNEL); + if (!wait) { + i915_sw_fence_commit(submit); + heap_fence_put(submit); + err = ENOMEM; + break; + } -static int begin_live_test(struct live_test *t, - struct drm_i915_private *i915, - const char *func, - const char *name) -{ - int err; + i915_random_reorder(order, total, &prng); + count = 1 + i915_prandom_u32_max_state(max_batch, &prng); - t->i915 = i915; - t->func = func; - t->name = name; + for (n = 0; n < count; n++) { + struct i915_gem_context *ctx = + t->contexts[order[n] % t->ncontexts]; + struct i915_request *rq; - err = i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (err) { - pr_err("%s(%s): failed to idle before, with err=%d!", - func, name, err); - return err; + mutex_lock(BKL); + + rq = t->request_alloc(ctx, t->engine); + if (IS_ERR(rq)) { + mutex_unlock(BKL); + err = PTR_ERR(rq); + count = n; + break; + } + + err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, + submit, + GFP_KERNEL); + + requests[n] = i915_request_get(rq); + i915_request_add(rq); + + mutex_unlock(BKL); + + if (err >= 0) + err = i915_sw_fence_await_dma_fence(wait, + &rq->fence, + 0, + GFP_KERNEL); + + if (err < 0) { + i915_request_put(rq); + count = n; + break; + } + } + + i915_sw_fence_commit(submit); + i915_sw_fence_commit(wait); + + if (!wait_event_timeout(wait->wait, + i915_sw_fence_done(wait), + HZ / 2)) { + struct i915_request *rq = requests[count - 1]; + + pr_err("waiting for %d fences (last %llx:%lld) on %s timed out!\n", + count, + rq->fence.context, rq->fence.seqno, + t->engine->name); + i915_gem_set_wedged(t->engine->i915); + GEM_BUG_ON(!i915_request_completed(rq)); + i915_sw_fence_wait(wait); + err = -EIO; + } + + for (n = 0; n < count; n++) { + struct i915_request *rq = requests[n]; + + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &rq->fence.flags)) { + pr_err("%llu:%llu was not signaled!\n", + rq->fence.context, rq->fence.seqno); + err = -EINVAL; + } + + i915_request_put(rq); + } + + heap_fence_put(wait); + heap_fence_put(submit); + + if (err < 0) + break; + + num_fences += count; + num_waits++; + + cond_resched(); } - i915->gpu_error.missed_irq_rings = 0; - t->reset_count = i915_reset_count(&i915->gpu_error); + atomic_long_add(num_fences, &t->num_fences); + atomic_long_add(num_waits, &t->num_waits); - return 0; + kfree(order); +out_requests: + kfree(requests); + return err; } -static int end_live_test(struct live_test *t) +static int mock_breadcrumbs_smoketest(void *arg) { - struct drm_i915_private *i915 = t->i915; + struct drm_i915_private *i915 = arg; + struct smoketest t = { + .engine = i915->engine[RCS], + .ncontexts = 1024, + .max_batch = 1024, + .request_alloc = __mock_request_alloc + }; + unsigned int ncpus = num_online_cpus(); + struct task_struct **threads; + unsigned int n; + int ret = 0; + + /* + * Smoketest our breadcrumb/signal handling for requests across multiple + * threads. A very simple test to only catch the most egregious of bugs. + * See __igt_breadcrumbs_smoketest(); + */ - i915_retire_requests(i915); + threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL); + if (!threads) + return -ENOMEM; - if (wait_for(intel_engines_are_idle(i915), 10)) { - pr_err("%s(%s): GPU not idle\n", t->func, t->name); - return -EIO; + t.contexts = + kmalloc_array(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL); + if (!t.contexts) { + ret = -ENOMEM; + goto out_threads; } - if (t->reset_count != i915_reset_count(&i915->gpu_error)) { - pr_err("%s(%s): GPU was reset %d times!\n", - t->func, t->name, - i915_reset_count(&i915->gpu_error) - t->reset_count); - return -EIO; + mutex_lock(&t.engine->i915->drm.struct_mutex); + for (n = 0; n < t.ncontexts; n++) { + t.contexts[n] = mock_context(t.engine->i915, "mock"); + if (!t.contexts[n]) { + ret = -ENOMEM; + goto out_contexts; + } } + mutex_unlock(&t.engine->i915->drm.struct_mutex); + + for (n = 0; n < ncpus; n++) { + threads[n] = kthread_run(__igt_breadcrumbs_smoketest, + &t, "igt/%d", n); + if (IS_ERR(threads[n])) { + ret = PTR_ERR(threads[n]); + ncpus = n; + break; + } - if (i915->gpu_error.missed_irq_rings) { - pr_err("%s(%s): Missed interrupts on engines %lx\n", - t->func, t->name, i915->gpu_error.missed_irq_rings); - return -EIO; + get_task_struct(threads[n]); } - return 0; + msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); + + for (n = 0; n < ncpus; n++) { + int err; + + err = kthread_stop(threads[n]); + if (err < 0 && !ret) + ret = err; + + put_task_struct(threads[n]); + } + pr_info("Completed %lu waits for %lu fence across %d cpus\n", + atomic_long_read(&t.num_waits), + atomic_long_read(&t.num_fences), + ncpus); + + mutex_lock(&t.engine->i915->drm.struct_mutex); +out_contexts: + for (n = 0; n < t.ncontexts; n++) { + if (!t.contexts[n]) + break; + mock_context_close(t.contexts[n]); + } + mutex_unlock(&t.engine->i915->drm.struct_mutex); + kfree(t.contexts); +out_threads: + kfree(threads); + + return ret; +} + +int i915_request_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_add_request), + SUBTEST(igt_wait_request), + SUBTEST(igt_fence_wait), + SUBTEST(igt_request_rewind), + SUBTEST(mock_breadcrumbs_smoketest), + }; + struct drm_i915_private *i915; + intel_wakeref_t wakeref; + int err = 0; + + i915 = mock_gem_device(); + if (!i915) + return -ENOMEM; + + with_intel_runtime_pm(i915, wakeref) + err = i915_subtests(tests, i915); + + drm_dev_put(&i915->drm); + + return err; } static int live_nop_request(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; - struct live_test t; + intel_wakeref_t wakeref; + struct igt_live_test t; unsigned int id; int err = -ENODEV; @@ -342,7 +538,7 @@ static int live_nop_request(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); for_each_engine(engine, i915, id) { struct i915_request *request = NULL; @@ -350,7 +546,7 @@ static int live_nop_request(void *arg) IGT_TIMEOUT(end_time); ktime_t times[2] = {}; - err = begin_live_test(&t, i915, __func__, engine->name); + err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) goto out_unlock; @@ -392,7 +588,7 @@ static int live_nop_request(void *arg) break; } - err = end_live_test(&t); + err = igt_live_test_end(&t); if (err) goto out_unlock; @@ -403,7 +599,7 @@ static int live_nop_request(void *arg) } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -478,7 +674,8 @@ static int live_empty_request(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; - struct live_test t; + intel_wakeref_t wakeref; + struct igt_live_test t; struct i915_vma *batch; unsigned int id; int err = 0; @@ -489,7 +686,7 @@ static int live_empty_request(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); batch = empty_batch(i915); if (IS_ERR(batch)) { @@ -503,7 +700,7 @@ static int live_empty_request(void *arg) unsigned long n, prime; ktime_t times[2] = {}; - err = begin_live_test(&t, i915, __func__, engine->name); + err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) goto out_batch; @@ -539,7 +736,7 @@ static int live_empty_request(void *arg) break; } - err = end_live_test(&t); + err = igt_live_test_end(&t); if (err) goto out_batch; @@ -553,7 +750,7 @@ out_batch: i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -637,8 +834,9 @@ static int live_all_engines(void *arg) struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; struct i915_request *request[I915_NUM_ENGINES]; + intel_wakeref_t wakeref; + struct igt_live_test t; struct i915_vma *batch; - struct live_test t; unsigned int id; int err; @@ -648,9 +846,9 @@ static int live_all_engines(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; @@ -722,7 +920,7 @@ static int live_all_engines(void *arg) request[id] = NULL; } - err = end_live_test(&t); + err = igt_live_test_end(&t); out_request: for_each_engine(engine, i915, id) @@ -731,7 +929,7 @@ out_request: i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -742,7 +940,8 @@ static int live_sequential_engines(void *arg) struct i915_request *request[I915_NUM_ENGINES] = {}; struct i915_request *prev = NULL; struct intel_engine_cs *engine; - struct live_test t; + intel_wakeref_t wakeref; + struct igt_live_test t; unsigned int id; int err; @@ -753,9 +952,9 @@ static int live_sequential_engines(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; @@ -838,7 +1037,7 @@ static int live_sequential_engines(void *arg) GEM_BUG_ON(!i915_request_completed(request[id])); } - err = end_live_test(&t); + err = igt_live_test_end(&t); out_request: for_each_engine(engine, i915, id) { @@ -860,11 +1059,183 @@ out_request: i915_request_put(request[id]); } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } +static int +max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine) +{ + struct i915_request *rq; + int ret; + + /* + * Before execlists, all contexts share the same ringbuffer. With + * execlists, each context/engine has a separate ringbuffer and + * for the purposes of this test, inexhaustible. + * + * For the global ringbuffer though, we have to be very careful + * that we do not wrap while preventing the execution of requests + * with a unsignaled fence. + */ + if (HAS_EXECLISTS(ctx->i915)) + return INT_MAX; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + } else { + int sz; + + ret = rq->ring->size - rq->reserved_space; + i915_request_add(rq); + + sz = rq->ring->emit - rq->head; + if (sz < 0) + sz += rq->ring->size; + ret /= sz; + ret /= 2; /* leave half spare, in case of emergency! */ + } + + return ret; +} + +static int live_breadcrumbs_smoketest(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct smoketest t[I915_NUM_ENGINES]; + unsigned int ncpus = num_online_cpus(); + unsigned long num_waits, num_fences; + struct intel_engine_cs *engine; + struct task_struct **threads; + struct igt_live_test live; + enum intel_engine_id id; + intel_wakeref_t wakeref; + struct drm_file *file; + unsigned int n; + int ret = 0; + + /* + * Smoketest our breadcrumb/signal handling for requests across multiple + * threads. A very simple test to only catch the most egregious of bugs. + * See __igt_breadcrumbs_smoketest(); + * + * On real hardware this time. + */ + + wakeref = intel_runtime_pm_get(i915); + + file = mock_file(i915); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto out_rpm; + } + + threads = kcalloc(ncpus * I915_NUM_ENGINES, + sizeof(*threads), + GFP_KERNEL); + if (!threads) { + ret = -ENOMEM; + goto out_file; + } + + memset(&t[0], 0, sizeof(t[0])); + t[0].request_alloc = __live_request_alloc; + t[0].ncontexts = 64; + t[0].contexts = kmalloc_array(t[0].ncontexts, + sizeof(*t[0].contexts), + GFP_KERNEL); + if (!t[0].contexts) { + ret = -ENOMEM; + goto out_threads; + } + + mutex_lock(&i915->drm.struct_mutex); + for (n = 0; n < t[0].ncontexts; n++) { + t[0].contexts[n] = live_context(i915, file); + if (!t[0].contexts[n]) { + ret = -ENOMEM; + goto out_contexts; + } + } + + ret = igt_live_test_begin(&live, i915, __func__, ""); + if (ret) + goto out_contexts; + + for_each_engine(engine, i915, id) { + t[id] = t[0]; + t[id].engine = engine; + t[id].max_batch = max_batches(t[0].contexts[0], engine); + if (t[id].max_batch < 0) { + ret = t[id].max_batch; + mutex_unlock(&i915->drm.struct_mutex); + goto out_flush; + } + /* One ring interleaved between requests from all cpus */ + t[id].max_batch /= num_online_cpus() + 1; + pr_debug("Limiting batches to %d requests on %s\n", + t[id].max_batch, engine->name); + + for (n = 0; n < ncpus; n++) { + struct task_struct *tsk; + + tsk = kthread_run(__igt_breadcrumbs_smoketest, + &t[id], "igt/%d.%d", id, n); + if (IS_ERR(tsk)) { + ret = PTR_ERR(tsk); + mutex_unlock(&i915->drm.struct_mutex); + goto out_flush; + } + + get_task_struct(tsk); + threads[id * ncpus + n] = tsk; + } + } + mutex_unlock(&i915->drm.struct_mutex); + + msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); + +out_flush: + num_waits = 0; + num_fences = 0; + for_each_engine(engine, i915, id) { + for (n = 0; n < ncpus; n++) { + struct task_struct *tsk = threads[id * ncpus + n]; + int err; + + if (!tsk) + continue; + + err = kthread_stop(tsk); + if (err < 0 && !ret) + ret = err; + + put_task_struct(tsk); + } + + num_waits += atomic_long_read(&t[id].num_waits); + num_fences += atomic_long_read(&t[id].num_fences); + } + pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n", + num_waits, num_fences, RUNTIME_INFO(i915)->num_rings, ncpus); + + mutex_lock(&i915->drm.struct_mutex); + ret = igt_live_test_end(&live) ?: ret; +out_contexts: + mutex_unlock(&i915->drm.struct_mutex); + kfree(t[0].contexts); +out_threads: + kfree(threads); +out_file: + mock_file_free(i915, file); +out_rpm: + intel_runtime_pm_put(i915, wakeref); + + return ret; +} + int i915_request_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { @@ -872,6 +1243,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_all_engines), SUBTEST(live_sequential_engines), SUBTEST(live_empty_request), + SUBTEST(live_breadcrumbs_smoketest), }; if (i915_terminally_wedged(&i915->gpu_error)) diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index 86c54ea37f48..10ef0e636a24 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -197,6 +197,49 @@ int i915_live_selftests(struct pci_dev *pdev) return 0; } +static bool apply_subtest_filter(const char *caller, const char *name) +{ + char *filter, *sep, *tok; + bool result = true; + + filter = kstrdup(i915_selftest.filter, GFP_KERNEL); + for (sep = filter; (tok = strsep(&sep, ","));) { + bool allow = true; + char *sl; + + if (*tok == '!') { + allow = false; + tok++; + } + + if (*tok == '\0') + continue; + + sl = strchr(tok, '/'); + if (sl) { + *sl++ = '\0'; + if (strcmp(tok, caller)) { + if (allow) + result = false; + continue; + } + tok = sl; + } + + if (strcmp(tok, name)) { + if (allow) + result = false; + continue; + } + + result = allow; + break; + } + kfree(filter); + + return result; +} + int __i915_subtests(const char *caller, const struct i915_subtest *st, unsigned int count, @@ -209,6 +252,9 @@ int __i915_subtests(const char *caller, if (signal_pending(current)) return -EINTR; + if (!apply_subtest_filter(caller, st->name)) + continue; + pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name); GEM_TRACE("Running %s/%s\n", caller, st->name); @@ -244,6 +290,7 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...) module_param_named(st_random_seed, i915_selftest.random_seed, uint, 0400); module_param_named(st_timeout, i915_selftest.timeout_ms, uint, 0400); +module_param_named(st_filter, i915_selftest.filter, charp, 0400); module_param_named_unsafe(mock_selftests, i915_selftest.mock, int, 0400); MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardware (0:disabled [default], 1:run tests then load driver, -1:run tests then exit module)"); diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c index 19f1c6a5c8fb..12ea69b1a1e5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c @@ -4,12 +4,155 @@ * Copyright © 2017-2018 Intel Corporation */ +#include <linux/prime_numbers.h> + #include "../i915_selftest.h" #include "i915_random.h" +#include "igt_flush_test.h" #include "mock_gem_device.h" #include "mock_timeline.h" +static struct page *hwsp_page(struct i915_timeline *tl) +{ + struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + return sg_page(obj->mm.pages->sgl); +} + +static unsigned long hwsp_cacheline(struct i915_timeline *tl) +{ + unsigned long address = (unsigned long)page_address(hwsp_page(tl)); + + return (address + tl->hwsp_offset) / CACHELINE_BYTES; +} + +#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES) + +struct mock_hwsp_freelist { + struct drm_i915_private *i915; + struct radix_tree_root cachelines; + struct i915_timeline **history; + unsigned long count, max; + struct rnd_state prng; +}; + +enum { + SHUFFLE = BIT(0), +}; + +static void __mock_hwsp_record(struct mock_hwsp_freelist *state, + unsigned int idx, + struct i915_timeline *tl) +{ + tl = xchg(&state->history[idx], tl); + if (tl) { + radix_tree_delete(&state->cachelines, hwsp_cacheline(tl)); + i915_timeline_put(tl); + } +} + +static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, + unsigned int count, + unsigned int flags) +{ + struct i915_timeline *tl; + unsigned int idx; + + while (count--) { + unsigned long cacheline; + int err; + + tl = i915_timeline_create(state->i915, "mock", NULL); + if (IS_ERR(tl)) + return PTR_ERR(tl); + + cacheline = hwsp_cacheline(tl); + err = radix_tree_insert(&state->cachelines, cacheline, tl); + if (err) { + if (err == -EEXIST) { + pr_err("HWSP cacheline %lu already used; duplicate allocation!\n", + cacheline); + } + i915_timeline_put(tl); + return err; + } + + idx = state->count++ % state->max; + __mock_hwsp_record(state, idx, tl); + } + + if (flags & SHUFFLE) + i915_prandom_shuffle(state->history, + sizeof(*state->history), + min(state->count, state->max), + &state->prng); + + count = i915_prandom_u32_max_state(min(state->count, state->max), + &state->prng); + while (count--) { + idx = --state->count % state->max; + __mock_hwsp_record(state, idx, NULL); + } + + return 0; +} + +static int mock_hwsp_freelist(void *arg) +{ + struct mock_hwsp_freelist state; + const struct { + const char *name; + unsigned int flags; + } phases[] = { + { "linear", 0 }, + { "shuffled", SHUFFLE }, + { }, + }, *p; + unsigned int na; + int err = 0; + + INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL); + state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed); + + state.i915 = mock_gem_device(); + if (!state.i915) + return -ENOMEM; + + /* + * Create a bunch of timelines and check that their HWSP do not overlap. + * Free some, and try again. + */ + + state.max = PAGE_SIZE / sizeof(*state.history); + state.count = 0; + state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL); + if (!state.history) { + err = -ENOMEM; + goto err_put; + } + + mutex_lock(&state.i915->drm.struct_mutex); + for (p = phases; p->name; p++) { + pr_debug("%s(%s)\n", __func__, p->name); + for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) { + err = __mock_hwsp_timeline(&state, na, p->flags); + if (err) + goto out; + } + } + +out: + for (na = 0; na < state.max; na++) + __mock_hwsp_record(&state, na, NULL); + mutex_unlock(&state.i915->drm.struct_mutex); + kfree(state.history); +err_put: + drm_dev_put(&state.i915->drm); + return err; +} + struct __igt_sync { const char *name; u32 seqno; @@ -256,12 +399,331 @@ static int bench_sync(void *arg) return 0; } -int i915_gem_timeline_mock_selftests(void) +int i915_timeline_mock_selftests(void) { static const struct i915_subtest tests[] = { + SUBTEST(mock_hwsp_freelist), SUBTEST(igt_sync), SUBTEST(bench_sync), }; return i915_subtests(tests, NULL); } + +static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + if (INTEL_GEN(rq->i915) >= 8) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = addr; + *cs++ = 0; + *cs++ = value; + } else if (INTEL_GEN(rq->i915) >= 4) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = 0; + *cs++ = addr; + *cs++ = value; + } else { + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *cs++ = addr; + *cs++ = value; + *cs++ = MI_NOOP; + } + + intel_ring_advance(rq, cs); + + return 0; +} + +static struct i915_request * +tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value) +{ + struct i915_request *rq; + int err; + + lockdep_assert_held(&tl->i915->drm.struct_mutex); /* lazy rq refs */ + + err = i915_timeline_pin(tl); + if (err) { + rq = ERR_PTR(err); + goto out; + } + + rq = i915_request_alloc(engine, engine->i915->kernel_context); + if (IS_ERR(rq)) + goto out_unpin; + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value); + i915_request_add(rq); + if (err) + rq = ERR_PTR(err); + +out_unpin: + i915_timeline_unpin(tl); +out: + if (IS_ERR(rq)) + pr_err("Failed to write to timeline!\n"); + return rq; +} + +static struct i915_timeline * +checked_i915_timeline_create(struct drm_i915_private *i915) +{ + struct i915_timeline *tl; + + tl = i915_timeline_create(i915, "live", NULL); + if (IS_ERR(tl)) + return tl; + + if (*tl->hwsp_seqno != tl->seqno) { + pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n", + *tl->hwsp_seqno, tl->seqno); + i915_timeline_put(tl); + return ERR_PTR(-EINVAL); + } + + return tl; +} + +static int live_hwsp_engine(void *arg) +{ +#define NUM_TIMELINES 4096 + struct drm_i915_private *i915 = arg; + struct i915_timeline **timelines; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned long count, n; + int err = 0; + + /* + * Create a bunch of timelines and check we can write + * independently to each of their breadcrumb slots. + */ + + timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, + sizeof(*timelines), + GFP_KERNEL); + if (!timelines) + return -ENOMEM; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + count = 0; + for_each_engine(engine, i915, id) { + if (!intel_engine_can_store_dword(engine)) + continue; + + for (n = 0; n < NUM_TIMELINES; n++) { + struct i915_timeline *tl; + struct i915_request *rq; + + tl = checked_i915_timeline_create(i915); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + i915_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + timelines[count++] = tl; + } + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + for (n = 0; n < count; n++) { + struct i915_timeline *tl = timelines[n]; + + if (!err && *tl->hwsp_seqno != n) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + n, *tl->hwsp_seqno); + err = -EINVAL; + } + i915_timeline_put(tl); + } + + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + kvfree(timelines); + + return err; +#undef NUM_TIMELINES +} + +static int live_hwsp_alternate(void *arg) +{ +#define NUM_TIMELINES 4096 + struct drm_i915_private *i915 = arg; + struct i915_timeline **timelines; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned long count, n; + int err = 0; + + /* + * Create a bunch of timelines and check we can write + * independently to each of their breadcrumb slots with adjacent + * engines. + */ + + timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, + sizeof(*timelines), + GFP_KERNEL); + if (!timelines) + return -ENOMEM; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + count = 0; + for (n = 0; n < NUM_TIMELINES; n++) { + for_each_engine(engine, i915, id) { + struct i915_timeline *tl; + struct i915_request *rq; + + if (!intel_engine_can_store_dword(engine)) + continue; + + tl = checked_i915_timeline_create(i915); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + i915_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + timelines[count++] = tl; + } + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + for (n = 0; n < count; n++) { + struct i915_timeline *tl = timelines[n]; + + if (!err && *tl->hwsp_seqno != n) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + n, *tl->hwsp_seqno); + err = -EINVAL; + } + i915_timeline_put(tl); + } + + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + kvfree(timelines); + + return err; +#undef NUM_TIMELINES +} + +static int live_hwsp_recycle(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned long count; + int err = 0; + + /* + * Check seqno writes into one timeline at a time. We expect to + * recycle the breadcrumb slot between iterations and neither + * want to confuse ourselves or the GPU. + */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + count = 0; + for_each_engine(engine, i915, id) { + IGT_TIMEOUT(end_time); + + if (!intel_engine_can_store_dword(engine)) + continue; + + do { + struct i915_timeline *tl; + struct i915_request *rq; + + tl = checked_i915_timeline_create(i915); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + i915_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + if (i915_request_wait(rq, + I915_WAIT_LOCKED, + HZ / 5) < 0) { + pr_err("Wait for timeline writes timed out!\n"); + i915_timeline_put(tl); + err = -EIO; + goto out; + } + + if (*tl->hwsp_seqno != count) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + count, *tl->hwsp_seqno); + err = -EINVAL; + } + + i915_timeline_put(tl); + count++; + + if (err) + goto out; + + i915_timelines_park(i915); /* Encourage recycling! */ + } while (!__igt_timeout(end_time, NULL)); + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + +int i915_timeline_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_hwsp_recycle), + SUBTEST(live_hwsp_engine), + SUBTEST(live_hwsp_alternate), + }; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index ffa74290e054..cf1de82741fa 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -28,6 +28,7 @@ #include "mock_gem_device.h" #include "mock_context.h" +#include "mock_gtt.h" static bool assert_vma(struct i915_vma *vma, struct drm_i915_gem_object *obj, @@ -141,7 +142,8 @@ static int create_vmas(struct drm_i915_private *i915, static int igt_vma_create(void *arg) { - struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = arg; + struct drm_i915_private *i915 = ggtt->vm.i915; struct drm_i915_gem_object *obj, *on; struct i915_gem_context *ctx, *cn; unsigned long num_obj, num_ctx; @@ -245,7 +247,7 @@ static bool assert_pin_einval(const struct i915_vma *vma, static int igt_vma_pin1(void *arg) { - struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = arg; const struct pin_mode modes[] = { #define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " } #define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" } @@ -256,30 +258,30 @@ static int igt_vma_pin1(void *arg) VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 4096), VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192), - VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), - VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), - VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)), - - VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), - INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end), - VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)), - INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total), + VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)), + VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)), + VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)), + + VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)), + INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | ggtt->mappable_end), + VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)), + INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | ggtt->vm.total), INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)), VALID(4096, PIN_GLOBAL), VALID(8192, PIN_GLOBAL), - VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE), - VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE), - NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE), - VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL), - VALID(i915->ggtt.vm.total, PIN_GLOBAL), - NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL), + VALID(ggtt->mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE), + VALID(ggtt->mappable_end, PIN_GLOBAL | PIN_MAPPABLE), + NOSPACE(ggtt->mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE), + VALID(ggtt->vm.total - 4096, PIN_GLOBAL), + VALID(ggtt->vm.total, PIN_GLOBAL), + NOSPACE(ggtt->vm.total + 4096, PIN_GLOBAL), NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL), - INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), - INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)), + INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)), + INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)), INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)), - VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), + VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)), #if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) /* Misusing BIAS is a programming error (it is not controllable @@ -287,10 +289,10 @@ static int igt_vma_pin1(void *arg) * However, the tests are still quite interesting for checking * variable start, end and size. */ - NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end), - NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total), - NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), - NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)), + NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | ggtt->mappable_end), + NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | ggtt->vm.total), + NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)), + NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)), #endif { }, #undef NOSPACE @@ -306,13 +308,13 @@ static int igt_vma_pin1(void *arg) * focusing on error handling of boundary conditions. */ - GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm)); + GEM_BUG_ON(!drm_mm_clean(&ggtt->vm.mm)); - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); - vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = checked_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) goto out; @@ -403,8 +405,8 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a, static int igt_vma_rotate(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_address_space *vm = &i915->ggtt.vm; + struct i915_ggtt *ggtt = arg; + struct i915_address_space *vm = &ggtt->vm; struct drm_i915_gem_object *obj; const struct intel_rotation_plane_info planes[] = { { .width = 1, .height = 1, .stride = 1 }, @@ -431,7 +433,7 @@ static int igt_vma_rotate(void *arg) * that the page layout within the rotated VMA match our expectations. */ - obj = i915_gem_object_create_internal(i915, max_pages * PAGE_SIZE); + obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE); if (IS_ERR(obj)) goto out; @@ -602,8 +604,8 @@ static bool assert_pin(struct i915_vma *vma, static int igt_vma_partial(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_address_space *vm = &i915->ggtt.vm; + struct i915_ggtt *ggtt = arg; + struct i915_address_space *vm = &ggtt->vm; const unsigned int npages = 1021; /* prime! */ struct drm_i915_gem_object *obj; const struct phase { @@ -621,7 +623,7 @@ static int igt_vma_partial(void *arg) * we are returned the same VMA when we later request the same range. */ - obj = i915_gem_object_create_internal(i915, npages*PAGE_SIZE); + obj = i915_gem_object_create_internal(vm->i915, npages * PAGE_SIZE); if (IS_ERR(obj)) goto out; @@ -670,7 +672,7 @@ static int igt_vma_partial(void *arg) } count = 0; - list_for_each_entry(vma, &obj->vma_list, obj_link) + list_for_each_entry(vma, &obj->vma.list, obj_link) count++; if (count != nvma) { pr_err("(%s) All partial vma were not recorded on the obj->vma_list: found %u, expected %u\n", @@ -699,7 +701,7 @@ static int igt_vma_partial(void *arg) i915_vma_unpin(vma); count = 0; - list_for_each_entry(vma, &obj->vma_list, obj_link) + list_for_each_entry(vma, &obj->vma.list, obj_link) count++; if (count != nvma) { pr_err("(%s) allocated an extra full vma!\n", p->name); @@ -723,17 +725,24 @@ int i915_vma_mock_selftests(void) SUBTEST(igt_vma_partial), }; struct drm_i915_private *i915; + struct i915_ggtt ggtt; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; + mock_init_ggtt(i915, &ggtt); + mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, i915); + err = i915_subtests(tests, &ggtt); + mock_device_flush(i915); mutex_unlock(&i915->drm.struct_mutex); + i915_gem_drain_freed_objects(i915); + + mock_fini_ggtt(&ggtt); drm_dev_put(&i915->drm); + return err; } - diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c new file mode 100644 index 000000000000..3e902761cd16 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include "../i915_drv.h" + +#include "../i915_selftest.h" +#include "igt_flush_test.h" +#include "igt_live_test.h" + +int igt_live_test_begin(struct igt_live_test *t, + struct drm_i915_private *i915, + const char *func, + const char *name) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; + + lockdep_assert_held(&i915->drm.struct_mutex); + + t->i915 = i915; + t->func = func; + t->name = name; + + err = i915_gem_wait_for_idle(i915, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (err) { + pr_err("%s(%s): failed to idle before, with err=%d!", + func, name, err); + return err; + } + + t->reset_global = i915_reset_count(&i915->gpu_error); + + for_each_engine(engine, i915, id) + t->reset_engine[id] = + i915_reset_engine_count(&i915->gpu_error, engine); + + return 0; +} + +int igt_live_test_end(struct igt_live_test *t) +{ + struct drm_i915_private *i915 = t->i915; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + lockdep_assert_held(&i915->drm.struct_mutex); + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + return -EIO; + + if (t->reset_global != i915_reset_count(&i915->gpu_error)) { + pr_err("%s(%s): GPU was reset %d times!\n", + t->func, t->name, + i915_reset_count(&i915->gpu_error) - t->reset_global); + return -EIO; + } + + for_each_engine(engine, i915, id) { + if (t->reset_engine[id] == + i915_reset_engine_count(&i915->gpu_error, engine)) + continue; + + pr_err("%s(%s): engine '%s' was reset %d times!\n", + t->func, t->name, engine->name, + i915_reset_engine_count(&i915->gpu_error, engine) - + t->reset_engine[id]); + return -EIO; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h new file mode 100644 index 000000000000..c0e9f99d50de --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef IGT_LIVE_TEST_H +#define IGT_LIVE_TEST_H + +#include "../i915_gem.h" + +struct drm_i915_private; + +struct igt_live_test { + struct drm_i915_private *i915; + const char *func; + const char *name; + + unsigned int reset_global; + unsigned int reset_engine[I915_NUM_ENGINES]; +}; + +/* + * Flush the GPU state before and after the test to ensure that no residual + * code is running on the GPU that may affect this test. Also compare the + * state before and after the test and alert if it unexpectedly changes, + * e.g. if the GPU was reset. + */ +int igt_live_test_begin(struct igt_live_test *t, + struct drm_i915_private *i915, + const char *func, + const char *name); +int igt_live_test_end(struct igt_live_test *t); + +#endif /* IGT_LIVE_TEST_H */ diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 8cd34f6e6859..9ebd9225684e 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -68,48 +68,65 @@ static u64 hws_address(const struct i915_vma *hws, return hws->node.start + seqno_offset(rq->fence.context); } -static int emit_recurse_batch(struct igt_spinner *spin, - struct i915_request *rq, - u32 arbitration_command) +static int move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) { - struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; + int err; + + err = i915_vma_move_to_active(vma, rq, flags); + if (err) + return err; + + if (!i915_gem_object_has_active_reference(vma->obj)) { + i915_gem_object_get(vma->obj); + i915_gem_object_set_active_reference(vma->obj); + } + + return 0; +} + +struct i915_request * +igt_spinner_create_request(struct igt_spinner *spin, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u32 arbitration_command) +{ + struct i915_address_space *vm = &ctx->ppgtt->vm; + struct i915_request *rq = NULL; struct i915_vma *hws, *vma; u32 *batch; int err; vma = i915_vma_instance(spin->obj, vm, NULL); if (IS_ERR(vma)) - return PTR_ERR(vma); + return ERR_CAST(vma); hws = i915_vma_instance(spin->hws, vm, NULL); if (IS_ERR(hws)) - return PTR_ERR(hws); + return ERR_CAST(hws); err = i915_vma_pin(vma, 0, 0, PIN_USER); if (err) - return err; + return ERR_PTR(err); err = i915_vma_pin(hws, 0, 0, PIN_USER); if (err) goto unpin_vma; - err = i915_vma_move_to_active(vma, rq, 0); - if (err) + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); goto unpin_hws; - - if (!i915_gem_object_has_active_reference(vma->obj)) { - i915_gem_object_get(vma->obj); - i915_gem_object_set_active_reference(vma->obj); } - err = i915_vma_move_to_active(hws, rq, 0); + err = move_to_active(vma, rq, 0); if (err) - goto unpin_hws; + goto cancel_rq; - if (!i915_gem_object_has_active_reference(hws->obj)) { - i915_gem_object_get(hws->obj); - i915_gem_object_set_active_reference(hws->obj); - } + err = move_to_active(hws, rq, 0); + if (err) + goto cancel_rq; batch = spin->batch; @@ -127,35 +144,18 @@ static int emit_recurse_batch(struct igt_spinner *spin, i915_gem_chipset_flush(spin->i915); - err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); + err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); +cancel_rq: + if (err) { + i915_request_skip(rq, err); + i915_request_add(rq); + } unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); - return err; -} - -struct i915_request * -igt_spinner_create_request(struct igt_spinner *spin, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u32 arbitration_command) -{ - struct i915_request *rq; - int err; - - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) - return rq; - - err = emit_recurse_batch(spin, rq, arbitration_command); - if (err) { - i915_request_add(rq); - return ERR_PTR(err); - } - - return rq; + return err ? ERR_PTR(err) : rq; } static u32 @@ -185,11 +185,6 @@ void igt_spinner_fini(struct igt_spinner *spin) bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) { - if (!wait_event_timeout(rq->execute, - READ_ONCE(rq->global_seqno), - msecs_to_jiffies(10))) - return false; - return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), rq->fence.seqno), 10) && diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c deleted file mode 100644 index f03b407fdbe2..000000000000 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ /dev/null @@ -1,470 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "../i915_selftest.h" -#include "i915_random.h" - -#include "mock_gem_device.h" -#include "mock_engine.h" - -static int check_rbtree(struct intel_engine_cs *engine, - const unsigned long *bitmap, - const struct intel_wait *waiters, - const int count) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct rb_node *rb; - int n; - - if (&b->irq_wait->node != rb_first(&b->waiters)) { - pr_err("First waiter does not match first element of wait-tree\n"); - return -EINVAL; - } - - n = find_first_bit(bitmap, count); - for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { - struct intel_wait *w = container_of(rb, typeof(*w), node); - int idx = w - waiters; - - if (!test_bit(idx, bitmap)) { - pr_err("waiter[%d, seqno=%d] removed but still in wait-tree\n", - idx, w->seqno); - return -EINVAL; - } - - if (n != idx) { - pr_err("waiter[%d, seqno=%d] does not match expected next element in tree [%d]\n", - idx, w->seqno, n); - return -EINVAL; - } - - n = find_next_bit(bitmap, count, n + 1); - } - - return 0; -} - -static int check_completion(struct intel_engine_cs *engine, - const unsigned long *bitmap, - const struct intel_wait *waiters, - const int count) -{ - int n; - - for (n = 0; n < count; n++) { - if (intel_wait_complete(&waiters[n]) != !!test_bit(n, bitmap)) - continue; - - pr_err("waiter[%d, seqno=%d] is %s, but expected %s\n", - n, waiters[n].seqno, - intel_wait_complete(&waiters[n]) ? "complete" : "active", - test_bit(n, bitmap) ? "active" : "complete"); - return -EINVAL; - } - - return 0; -} - -static int check_rbtree_empty(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - - if (b->irq_wait) { - pr_err("Empty breadcrumbs still has a waiter\n"); - return -EINVAL; - } - - if (!RB_EMPTY_ROOT(&b->waiters)) { - pr_err("Empty breadcrumbs, but wait-tree not empty\n"); - return -EINVAL; - } - - return 0; -} - -static int igt_random_insert_remove(void *arg) -{ - const u32 seqno_bias = 0x1000; - I915_RND_STATE(prng); - struct intel_engine_cs *engine = arg; - struct intel_wait *waiters; - const int count = 4096; - unsigned int *order; - unsigned long *bitmap; - int err = -ENOMEM; - int n; - - mock_engine_reset(engine); - - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); - if (!waiters) - goto out_engines; - - bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), - GFP_KERNEL); - if (!bitmap) - goto out_waiters; - - order = i915_random_order(count, &prng); - if (!order) - goto out_bitmap; - - for (n = 0; n < count; n++) - intel_wait_init_for_seqno(&waiters[n], seqno_bias + n); - - err = check_rbtree(engine, bitmap, waiters, count); - if (err) - goto out_order; - - /* Add and remove waiters into the rbtree in random order. At each - * step, we verify that the rbtree is correctly ordered. - */ - for (n = 0; n < count; n++) { - int i = order[n]; - - intel_engine_add_wait(engine, &waiters[i]); - __set_bit(i, bitmap); - - err = check_rbtree(engine, bitmap, waiters, count); - if (err) - goto out_order; - } - - i915_random_reorder(order, count, &prng); - for (n = 0; n < count; n++) { - int i = order[n]; - - intel_engine_remove_wait(engine, &waiters[i]); - __clear_bit(i, bitmap); - - err = check_rbtree(engine, bitmap, waiters, count); - if (err) - goto out_order; - } - - err = check_rbtree_empty(engine); -out_order: - kfree(order); -out_bitmap: - kfree(bitmap); -out_waiters: - kvfree(waiters); -out_engines: - mock_engine_flush(engine); - return err; -} - -static int igt_insert_complete(void *arg) -{ - const u32 seqno_bias = 0x1000; - struct intel_engine_cs *engine = arg; - struct intel_wait *waiters; - const int count = 4096; - unsigned long *bitmap; - int err = -ENOMEM; - int n, m; - - mock_engine_reset(engine); - - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); - if (!waiters) - goto out_engines; - - bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), - GFP_KERNEL); - if (!bitmap) - goto out_waiters; - - for (n = 0; n < count; n++) { - intel_wait_init_for_seqno(&waiters[n], n + seqno_bias); - intel_engine_add_wait(engine, &waiters[n]); - __set_bit(n, bitmap); - } - err = check_rbtree(engine, bitmap, waiters, count); - if (err) - goto out_bitmap; - - /* On each step, we advance the seqno so that several waiters are then - * complete (we increase the seqno by increasingly larger values to - * retire more and more waiters at once). All retired waiters should - * be woken and removed from the rbtree, and so that we check. - */ - for (n = 0; n < count; n = m) { - int seqno = 2 * n; - - GEM_BUG_ON(find_first_bit(bitmap, count) != n); - - if (intel_wait_complete(&waiters[n])) { - pr_err("waiter[%d, seqno=%d] completed too early\n", - n, waiters[n].seqno); - err = -EINVAL; - goto out_bitmap; - } - - /* complete the following waiters */ - mock_seqno_advance(engine, seqno + seqno_bias); - for (m = n; m <= seqno; m++) { - if (m == count) - break; - - GEM_BUG_ON(!test_bit(m, bitmap)); - __clear_bit(m, bitmap); - } - - intel_engine_remove_wait(engine, &waiters[n]); - RB_CLEAR_NODE(&waiters[n].node); - - err = check_rbtree(engine, bitmap, waiters, count); - if (err) { - pr_err("rbtree corrupt after seqno advance to %d\n", - seqno + seqno_bias); - goto out_bitmap; - } - - err = check_completion(engine, bitmap, waiters, count); - if (err) { - pr_err("completions after seqno advance to %d failed\n", - seqno + seqno_bias); - goto out_bitmap; - } - } - - err = check_rbtree_empty(engine); -out_bitmap: - kfree(bitmap); -out_waiters: - kvfree(waiters); -out_engines: - mock_engine_flush(engine); - return err; -} - -struct igt_wakeup { - struct task_struct *tsk; - atomic_t *ready, *set, *done; - struct intel_engine_cs *engine; - unsigned long flags; -#define STOP 0 -#define IDLE 1 - wait_queue_head_t *wq; - u32 seqno; -}; - -static bool wait_for_ready(struct igt_wakeup *w) -{ - DEFINE_WAIT(ready); - - set_bit(IDLE, &w->flags); - if (atomic_dec_and_test(w->done)) - wake_up_var(w->done); - - if (test_bit(STOP, &w->flags)) - goto out; - - for (;;) { - prepare_to_wait(w->wq, &ready, TASK_INTERRUPTIBLE); - if (atomic_read(w->ready) == 0) - break; - - schedule(); - } - finish_wait(w->wq, &ready); - -out: - clear_bit(IDLE, &w->flags); - if (atomic_dec_and_test(w->set)) - wake_up_var(w->set); - - return !test_bit(STOP, &w->flags); -} - -static int igt_wakeup_thread(void *arg) -{ - struct igt_wakeup *w = arg; - struct intel_wait wait; - - while (wait_for_ready(w)) { - GEM_BUG_ON(kthread_should_stop()); - - intel_wait_init_for_seqno(&wait, w->seqno); - intel_engine_add_wait(w->engine, &wait); - for (;;) { - set_current_state(TASK_UNINTERRUPTIBLE); - if (i915_seqno_passed(intel_engine_get_seqno(w->engine), - w->seqno)) - break; - - if (test_bit(STOP, &w->flags)) /* emergency escape */ - break; - - schedule(); - } - intel_engine_remove_wait(w->engine, &wait); - __set_current_state(TASK_RUNNING); - } - - return 0; -} - -static void igt_wake_all_sync(atomic_t *ready, - atomic_t *set, - atomic_t *done, - wait_queue_head_t *wq, - int count) -{ - atomic_set(set, count); - atomic_set(ready, 0); - wake_up_all(wq); - - wait_var_event(set, !atomic_read(set)); - atomic_set(ready, count); - atomic_set(done, count); -} - -static int igt_wakeup(void *arg) -{ - I915_RND_STATE(prng); - struct intel_engine_cs *engine = arg; - struct igt_wakeup *waiters; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); - const int count = 4096; - const u32 max_seqno = count / 4; - atomic_t ready, set, done; - int err = -ENOMEM; - int n, step; - - mock_engine_reset(engine); - - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); - if (!waiters) - goto out_engines; - - /* Create a large number of threads, each waiting on a random seqno. - * Multiple waiters will be waiting for the same seqno. - */ - atomic_set(&ready, count); - for (n = 0; n < count; n++) { - waiters[n].wq = &wq; - waiters[n].ready = &ready; - waiters[n].set = &set; - waiters[n].done = &done; - waiters[n].engine = engine; - waiters[n].flags = BIT(IDLE); - - waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n], - "i915/igt:%d", n); - if (IS_ERR(waiters[n].tsk)) - goto out_waiters; - - get_task_struct(waiters[n].tsk); - } - - for (step = 1; step <= max_seqno; step <<= 1) { - u32 seqno; - - /* The waiter threads start paused as we assign them a random - * seqno and reset the engine. Once the engine is reset, - * we signal that the threads may begin their wait upon their - * seqno. - */ - for (n = 0; n < count; n++) { - GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags)); - waiters[n].seqno = - 1 + prandom_u32_state(&prng) % max_seqno; - } - mock_seqno_advance(engine, 0); - igt_wake_all_sync(&ready, &set, &done, &wq, count); - - /* Simulate the GPU doing chunks of work, with one or more - * seqno appearing to finish at the same time. A random number - * of threads will be waiting upon the update and hopefully be - * woken. - */ - for (seqno = 1; seqno <= max_seqno + step; seqno += step) { - usleep_range(50, 500); - mock_seqno_advance(engine, seqno); - } - GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno); - - /* With the seqno now beyond any of the waiting threads, they - * should all be woken, see that they are complete and signal - * that they are ready for the next test. We wait until all - * threads are complete and waiting for us (i.e. not a seqno). - */ - if (!wait_var_event_timeout(&done, - !atomic_read(&done), 10 * HZ)) { - pr_err("Timed out waiting for %d remaining waiters\n", - atomic_read(&done)); - err = -ETIMEDOUT; - break; - } - - err = check_rbtree_empty(engine); - if (err) - break; - } - -out_waiters: - for (n = 0; n < count; n++) { - if (IS_ERR(waiters[n].tsk)) - break; - - set_bit(STOP, &waiters[n].flags); - } - mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */ - igt_wake_all_sync(&ready, &set, &done, &wq, n); - - for (n = 0; n < count; n++) { - if (IS_ERR(waiters[n].tsk)) - break; - - kthread_stop(waiters[n].tsk); - put_task_struct(waiters[n].tsk); - } - - kvfree(waiters); -out_engines: - mock_engine_flush(engine); - return err; -} - -int intel_breadcrumbs_mock_selftests(void) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_random_insert_remove), - SUBTEST(igt_insert_complete), - SUBTEST(igt_wakeup), - }; - struct drm_i915_private *i915; - int err; - - i915 = mock_gem_device(); - if (!i915) - return -ENOMEM; - - err = i915_subtests(tests, i915->engine[RCS]); - drm_dev_put(&i915->drm); - - return err; -} diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 32cba4cae31a..c5e0a0e98fcb 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -137,12 +137,13 @@ static bool client_doorbell_in_sync(struct intel_guc_client *client) static int igt_guc_clients(void *args) { struct drm_i915_private *dev_priv = args; + intel_wakeref_t wakeref; struct intel_guc *guc; int err = 0; GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); guc = &dev_priv->guc; if (!guc) { @@ -225,7 +226,7 @@ out: guc_clients_create(guc); guc_clients_enable(guc); unlock: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } @@ -238,13 +239,14 @@ unlock: static int igt_guc_doorbells(void *arg) { struct drm_i915_private *dev_priv = arg; + intel_wakeref_t wakeref; struct intel_guc *guc; int i, err = 0; u16 db_id; GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); guc = &dev_priv->guc; if (!guc) { @@ -337,7 +339,7 @@ out: guc_client_free(clients[i]); } unlock: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 40efbed611de..7b6f3bea9ef8 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -103,52 +103,87 @@ static u64 hws_address(const struct i915_vma *hws, return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context); } -static int emit_recurse_batch(struct hang *h, - struct i915_request *rq) +static int move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) +{ + int err; + + err = i915_vma_move_to_active(vma, rq, flags); + if (err) + return err; + + if (!i915_gem_object_has_active_reference(vma->obj)) { + i915_gem_object_get(vma->obj); + i915_gem_object_set_active_reference(vma->obj); + } + + return 0; +} + +static struct i915_request * +hang_create_request(struct hang *h, struct intel_engine_cs *engine) { struct drm_i915_private *i915 = h->i915; struct i915_address_space *vm = - rq->gem_context->ppgtt ? - &rq->gem_context->ppgtt->vm : - &i915->ggtt.vm; + h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm; + struct i915_request *rq = NULL; struct i915_vma *hws, *vma; unsigned int flags; u32 *batch; int err; + if (i915_gem_object_is_active(h->obj)) { + struct drm_i915_gem_object *obj; + void *vaddr; + + obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vaddr = i915_gem_object_pin_map(obj, + i915_coherent_map_type(h->i915)); + if (IS_ERR(vaddr)) { + i915_gem_object_put(obj); + return ERR_CAST(vaddr); + } + + i915_gem_object_unpin_map(h->obj); + i915_gem_object_put(h->obj); + + h->obj = obj; + h->batch = vaddr; + } + vma = i915_vma_instance(h->obj, vm, NULL); if (IS_ERR(vma)) - return PTR_ERR(vma); + return ERR_CAST(vma); hws = i915_vma_instance(h->hws, vm, NULL); if (IS_ERR(hws)) - return PTR_ERR(hws); + return ERR_CAST(hws); err = i915_vma_pin(vma, 0, 0, PIN_USER); if (err) - return err; + return ERR_PTR(err); err = i915_vma_pin(hws, 0, 0, PIN_USER); if (err) goto unpin_vma; - err = i915_vma_move_to_active(vma, rq, 0); - if (err) + rq = i915_request_alloc(engine, h->ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); goto unpin_hws; - - if (!i915_gem_object_has_active_reference(vma->obj)) { - i915_gem_object_get(vma->obj); - i915_gem_object_set_active_reference(vma->obj); } - err = i915_vma_move_to_active(hws, rq, 0); + err = move_to_active(vma, rq, 0); if (err) - goto unpin_hws; + goto cancel_rq; - if (!i915_gem_object_has_active_reference(hws->obj)) { - i915_gem_object_get(hws->obj); - i915_gem_object_set_active_reference(hws->obj); - } + err = move_to_active(hws, rq, 0); + if (err) + goto cancel_rq; batch = h->batch; if (INTEL_GEN(i915) >= 8) { @@ -213,52 +248,16 @@ static int emit_recurse_batch(struct hang *h, err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); +cancel_rq: + if (err) { + i915_request_skip(rq, err); + i915_request_add(rq); + } unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); - return err; -} - -static struct i915_request * -hang_create_request(struct hang *h, struct intel_engine_cs *engine) -{ - struct i915_request *rq; - int err; - - if (i915_gem_object_is_active(h->obj)) { - struct drm_i915_gem_object *obj; - void *vaddr; - - obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - vaddr = i915_gem_object_pin_map(obj, - i915_coherent_map_type(h->i915)); - if (IS_ERR(vaddr)) { - i915_gem_object_put(obj); - return ERR_CAST(vaddr); - } - - i915_gem_object_unpin_map(h->obj); - i915_gem_object_put(h->obj); - - h->obj = obj; - h->batch = vaddr; - } - - rq = i915_request_alloc(engine, h->ctx); - if (IS_ERR(rq)) - return rq; - - err = emit_recurse_batch(h, rq); - if (err) { - i915_request_add(rq); - return ERR_PTR(err); - } - - return rq; + return err ? ERR_PTR(err) : rq; } static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) @@ -364,9 +363,7 @@ static int igt_global_reset(void *arg) /* Check that we can issue a global GPU reset */ igt_global_reset_lock(i915); - set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags); - mutex_lock(&i915->drm.struct_mutex); reset_count = i915_reset_count(&i915->gpu_error); i915_reset(i915, ALL_ENGINES, NULL); @@ -375,9 +372,7 @@ static int igt_global_reset(void *arg) pr_err("No GPU reset recorded!\n"); err = -EINVAL; } - mutex_unlock(&i915->drm.struct_mutex); - GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); igt_global_reset_unlock(i915); if (i915_terminally_wedged(&i915->gpu_error)) @@ -386,6 +381,29 @@ static int igt_global_reset(void *arg) return err; } +static int igt_wedged_reset(void *arg) +{ + struct drm_i915_private *i915 = arg; + intel_wakeref_t wakeref; + + /* Check that we can recover a wedged device with a GPU reset */ + + igt_global_reset_lock(i915); + wakeref = intel_runtime_pm_get(i915); + + i915_gem_set_wedged(i915); + + mutex_lock(&i915->drm.struct_mutex); + GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error)); + i915_reset(i915, ALL_ENGINES, NULL); + mutex_unlock(&i915->drm.struct_mutex); + + intel_runtime_pm_put(i915, wakeref); + igt_global_reset_unlock(i915); + + return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0; +} + static bool wait_for_idle(struct intel_engine_cs *engine) { return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0; @@ -431,8 +449,6 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); do { - u32 seqno = intel_engine_get_seqno(engine); - if (active) { struct i915_request *rq; @@ -451,7 +467,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("%s: Failed to start request %x, at %x\n", + pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(engine, &p, "%s\n", engine->name); @@ -461,8 +477,6 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) break; } - GEM_BUG_ON(!rq->global_seqno); - seqno = rq->global_seqno - 1; i915_request_put(rq); } @@ -478,16 +492,15 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) break; } - reset_engine_count += active; if (i915_reset_engine_count(&i915->gpu_error, engine) != - reset_engine_count) { - pr_err("%s engine reset %srecorded!\n", - engine->name, active ? "not " : ""); + ++reset_engine_count) { + pr_err("%s engine reset not recorded!\n", + engine->name); err = -EINVAL; break; } - if (!wait_for_idle(engine)) { + if (!i915_reset_flush(i915)) { struct drm_printer p = drm_info_printer(i915->drm.dev); @@ -552,7 +565,7 @@ static int active_request_put(struct i915_request *rq) return 0; if (i915_request_wait(rq, 0, 5 * HZ) < 0) { - GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n", + GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld, seqno %d.\n", rq->engine->name, rq->fence.context, rq->fence.seqno, @@ -710,7 +723,6 @@ static int __igt_reset_engines(struct drm_i915_private *i915, set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); do { - u32 seqno = intel_engine_get_seqno(engine); struct i915_request *rq = NULL; if (flags & TEST_ACTIVE) { @@ -729,7 +741,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("%s: Failed to start request %x, at %x\n", + pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(engine, &p, "%s\n", engine->name); @@ -738,9 +750,6 @@ static int __igt_reset_engines(struct drm_i915_private *i915, err = -EIO; break; } - - GEM_BUG_ON(!rq->global_seqno); - seqno = rq->global_seqno - 1; } err = i915_reset_engine(engine, NULL); @@ -777,10 +786,9 @@ static int __igt_reset_engines(struct drm_i915_private *i915, reported = i915_reset_engine_count(&i915->gpu_error, engine); reported -= threads[engine->id].resets; - if (reported != (flags & TEST_ACTIVE ? count : 0)) { - pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu, expected %lu reported\n", - engine->name, test_name, count, reported, - (flags & TEST_ACTIVE ? count : 0)); + if (reported != count) { + pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n", + engine->name, test_name, count, reported); if (!err) err = -EINVAL; } @@ -879,20 +887,13 @@ static int igt_reset_engines(void *arg) return 0; } -static u32 fake_hangcheck(struct i915_request *rq, u32 mask) +static u32 fake_hangcheck(struct drm_i915_private *i915, u32 mask) { - struct i915_gpu_error *error = &rq->i915->gpu_error; - u32 reset_count = i915_reset_count(error); - - error->stalled_mask = mask; - - /* set_bit() must be after we have setup the backchannel (mask) */ - smp_mb__before_atomic(); - set_bit(I915_RESET_HANDOFF, &error->flags); + u32 count = i915_reset_count(&i915->gpu_error); - wake_up_all(&error->wait_queue); + i915_reset(i915, mask, NULL); - return reset_count; + return count; } static int igt_reset_wait(void *arg) @@ -928,7 +929,7 @@ static int igt_reset_wait(void *arg) if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("%s: Failed to start request %x, at %x\n", + pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); @@ -938,7 +939,7 @@ static int igt_reset_wait(void *arg) goto out_rq; } - reset_count = fake_hangcheck(rq, ALL_ENGINES); + reset_count = fake_hangcheck(i915, ALL_ENGINES); timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10); if (timeout < 0) { @@ -948,7 +949,6 @@ static int igt_reset_wait(void *arg) goto out_rq; } - GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); if (i915_reset_count(&i915->gpu_error) == reset_count) { pr_err("No GPU reset recorded!\n"); err = -EINVAL; @@ -1107,7 +1107,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("%s: Failed to start request %x, at %x\n", + pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); @@ -1127,7 +1127,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, wait_for_completion(&arg.completion); - if (wait_for(waitqueue_active(&rq->execute), 10)) { + if (wait_for(!list_empty(&rq->fence.cb_list), 10)) { struct drm_printer p = drm_info_printer(i915->drm.dev); pr_err("igt/evict_vma kthread did not wait\n"); @@ -1138,7 +1138,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, } out_reset: - fake_hangcheck(rq, intel_engine_flag(rq->engine)); + fake_hangcheck(rq->i915, intel_engine_flag(rq->engine)); if (tsk) { struct igt_wedge_me w; @@ -1302,7 +1302,7 @@ static int igt_reset_queue(void *arg) if (!wait_until_running(&h, prev)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("%s(%s): Failed to start request %x, at %x\n", + pr_err("%s(%s): Failed to start request %llx, at %x\n", __func__, engine->name, prev->fence.seqno, hws_seqno(&h, prev)); intel_engine_dump(engine, &p, @@ -1317,12 +1317,7 @@ static int igt_reset_queue(void *arg) goto fini; } - reset_count = fake_hangcheck(prev, ENGINE_MASK(id)); - - i915_reset(i915, ENGINE_MASK(id), NULL); - - GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, - &i915->gpu_error.flags)); + reset_count = fake_hangcheck(i915, ENGINE_MASK(id)); if (prev->fence.error != -EIO) { pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n", @@ -1413,7 +1408,7 @@ static int igt_handle_error(void *arg) if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("%s: Failed to start request %x, at %x\n", + pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); @@ -1449,10 +1444,203 @@ err_unlock: return err; } +static void __preempt_begin(void) +{ + preempt_disable(); +} + +static void __preempt_end(void) +{ + preempt_enable(); +} + +static void __softirq_begin(void) +{ + local_bh_disable(); +} + +static void __softirq_end(void) +{ + local_bh_enable(); +} + +static void __hardirq_begin(void) +{ + local_irq_disable(); +} + +static void __hardirq_end(void) +{ + local_irq_enable(); +} + +struct atomic_section { + const char *name; + void (*critical_section_begin)(void); + void (*critical_section_end)(void); +}; + +static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, + const struct atomic_section *p, + const char *mode) +{ + struct tasklet_struct * const t = &engine->execlists.tasklet; + int err; + + GEM_TRACE("i915_reset_engine(%s:%s) under %s\n", + engine->name, mode, p->name); + + tasklet_disable_nosync(t); + p->critical_section_begin(); + + err = i915_reset_engine(engine, NULL); + + p->critical_section_end(); + tasklet_enable(t); + + if (err) + pr_err("i915_reset_engine(%s:%s) failed under %s\n", + engine->name, mode, p->name); + + return err; +} + +static int igt_atomic_reset_engine(struct intel_engine_cs *engine, + const struct atomic_section *p) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_request *rq; + struct hang h; + int err; + + err = __igt_atomic_reset_engine(engine, p, "idle"); + if (err) + return err; + + err = hang_init(&h, i915); + if (err) + return err; + + rq = hang_create_request(&h, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (wait_until_running(&h, rq)) { + err = __igt_atomic_reset_engine(engine, p, "active"); + } else { + pr_err("%s(%s): Failed to start request %llx, at %x\n", + __func__, engine->name, + rq->fence.seqno, hws_seqno(&h, rq)); + i915_gem_set_wedged(i915); + err = -EIO; + } + + if (err == 0) { + struct igt_wedge_me w; + + igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/) + i915_request_wait(rq, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (i915_terminally_wedged(&i915->gpu_error)) + err = -EIO; + } + + i915_request_put(rq); +out: + hang_fini(&h); + return err; +} + +static void force_reset(struct drm_i915_private *i915) +{ + i915_gem_set_wedged(i915); + i915_reset(i915, 0, NULL); +} + +static int igt_atomic_reset(void *arg) +{ + static const struct atomic_section phases[] = { + { "preempt", __preempt_begin, __preempt_end }, + { "softirq", __softirq_begin, __softirq_end }, + { "hardirq", __hardirq_begin, __hardirq_end }, + { } + }; + struct drm_i915_private *i915 = arg; + intel_wakeref_t wakeref; + int err = 0; + + /* Check that the resets are usable from atomic context */ + + if (USES_GUC_SUBMISSION(i915)) + return 0; /* guc is dead; long live the guc */ + + igt_global_reset_lock(i915); + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + /* Flush any requests before we get started and check basics */ + force_reset(i915); + if (i915_terminally_wedged(&i915->gpu_error)) + goto unlock; + + if (intel_has_gpu_reset(i915)) { + const typeof(*phases) *p; + + for (p = phases; p->name; p++) { + GEM_TRACE("intel_gpu_reset under %s\n", p->name); + + p->critical_section_begin(); + err = intel_gpu_reset(i915, ALL_ENGINES); + p->critical_section_end(); + + if (err) { + pr_err("intel_gpu_reset failed under %s\n", + p->name); + goto out; + } + } + + force_reset(i915); + } + + if (intel_has_reset_engine(i915)) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) { + const typeof(*phases) *p; + + for (p = phases; p->name; p++) { + err = igt_atomic_reset_engine(engine, p); + if (err) + goto out; + } + } + } + +out: + /* As we poke around the guts, do a full reset before continuing. */ + force_reset(i915); + +unlock: + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + igt_global_reset_unlock(i915); + + return err; +} + int intel_hangcheck_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_global_reset), /* attempt to recover GPU first */ + SUBTEST(igt_wedged_reset), SUBTEST(igt_hang_sanitycheck), SUBTEST(igt_reset_idle_engine), SUBTEST(igt_reset_active_engine), @@ -1463,7 +1651,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_reset_evict_ppgtt), SUBTEST(igt_reset_evict_fence), SUBTEST(igt_handle_error), + SUBTEST(igt_atomic_reset), }; + intel_wakeref_t wakeref; bool saved_hangcheck; int err; @@ -1473,8 +1663,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) if (i915_terminally_wedged(&i915->gpu_error)) return -EIO; /* we're long past hope of a successful reset */ - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); + drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */ err = i915_subtests(tests, i915); @@ -1483,7 +1674,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) mutex_unlock(&i915->drm.struct_mutex); i915_modparams.enable_hangcheck = saved_hangcheck; - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index ca461e3a5f27..58144e024751 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -4,6 +4,10 @@ * Copyright © 2018 Intel Corporation */ +#include <linux/prime_numbers.h> + +#include "../i915_reset.h" + #include "../i915_selftest.h" #include "igt_flush_test.h" #include "igt_spinner.h" @@ -18,13 +22,14 @@ static int live_sanitycheck(void *arg) struct i915_gem_context *ctx; enum intel_engine_id id; struct igt_spinner spin; + intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_CONTEXTS(i915)) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (igt_spinner_init(&spin, i915)) goto err_unlock; @@ -65,7 +70,7 @@ err_spin: igt_spinner_fini(&spin); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -77,13 +82,14 @@ static int live_preempt(void *arg) struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_PREEMPTION(i915)) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -158,7 +164,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -171,13 +177,14 @@ static int live_late_preempt(void *arg) struct intel_engine_cs *engine; struct i915_sched_attr attr = {}; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_PREEMPTION(i915)) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -251,7 +258,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -263,6 +270,243 @@ err_wedged: goto err_ctx_lo; } +struct preempt_client { + struct igt_spinner spin; + struct i915_gem_context *ctx; +}; + +static int preempt_client_init(struct drm_i915_private *i915, + struct preempt_client *c) +{ + c->ctx = kernel_context(i915); + if (!c->ctx) + return -ENOMEM; + + if (igt_spinner_init(&c->spin, i915)) + goto err_ctx; + + return 0; + +err_ctx: + kernel_context_close(c->ctx); + return -ENOMEM; +} + +static void preempt_client_fini(struct preempt_client *c) +{ + igt_spinner_fini(&c->spin); + kernel_context_close(c->ctx); +} + +static int live_suppress_self_preempt(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX) + }; + struct preempt_client a, b; + enum intel_engine_id id; + intel_wakeref_t wakeref; + int err = -ENOMEM; + + /* + * Verify that if a preemption request does not cause a change in + * the current execution order, the preempt-to-idle injection is + * skipped and that we do not accidentally apply it after the CS + * completion event. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + if (USES_GUC_SUBMISSION(i915)) + return 0; /* presume black blox */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + if (preempt_client_init(i915, &a)) + goto err_unlock; + if (preempt_client_init(i915, &b)) + goto err_client_a; + + for_each_engine(engine, i915, id) { + struct i915_request *rq_a, *rq_b; + int depth; + + engine->execlists.preempt_hang.count = 0; + + rq_a = igt_spinner_create_request(&a.spin, + a.ctx, engine, + MI_NOOP); + if (IS_ERR(rq_a)) { + err = PTR_ERR(rq_a); + goto err_client_b; + } + + i915_request_add(rq_a); + if (!igt_wait_for_spinner(&a.spin, rq_a)) { + pr_err("First client failed to start\n"); + goto err_wedged; + } + + for (depth = 0; depth < 8; depth++) { + rq_b = igt_spinner_create_request(&b.spin, + b.ctx, engine, + MI_NOOP); + if (IS_ERR(rq_b)) { + err = PTR_ERR(rq_b); + goto err_client_b; + } + i915_request_add(rq_b); + + GEM_BUG_ON(i915_request_completed(rq_a)); + engine->schedule(rq_a, &attr); + igt_spinner_end(&a.spin); + + if (!igt_wait_for_spinner(&b.spin, rq_b)) { + pr_err("Second client failed to start\n"); + goto err_wedged; + } + + swap(a, b); + rq_a = rq_b; + } + igt_spinner_end(&a.spin); + + if (engine->execlists.preempt_hang.count) { + pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n", + engine->execlists.preempt_hang.count, + depth); + err = -EINVAL; + goto err_client_b; + } + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + goto err_wedged; + } + + err = 0; +err_client_b: + preempt_client_fini(&b); +err_client_a: + preempt_client_fini(&a); +err_unlock: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + return err; + +err_wedged: + igt_spinner_end(&b.spin); + igt_spinner_end(&a.spin); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_client_b; +} + +static int live_chain_preempt(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct preempt_client hi, lo; + enum intel_engine_id id; + intel_wakeref_t wakeref; + int err = -ENOMEM; + + /* + * Build a chain AB...BA between two contexts (A, B) and request + * preemption of the last request. It should then complete before + * the previously submitted spinner in B. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + if (preempt_client_init(i915, &hi)) + goto err_unlock; + + if (preempt_client_init(i915, &lo)) + goto err_client_hi; + + for_each_engine(engine, i915, id) { + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), + }; + int count, i; + + for_each_prime_number_from(count, 1, 32) { /* must fit ring! */ + struct i915_request *rq; + + rq = igt_spinner_create_request(&hi.spin, + hi.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + if (!igt_wait_for_spinner(&hi.spin, rq)) + goto err_wedged; + + rq = igt_spinner_create_request(&lo.spin, + lo.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + + for (i = 0; i < count; i++) { + rq = i915_request_alloc(engine, lo.ctx); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + } + + rq = i915_request_alloc(engine, hi.ctx); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + engine->schedule(rq, &attr); + + igt_spinner_end(&hi.spin); + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(i915->drm.dev); + + pr_err("Failed to preempt over chain of %d\n", + count); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + goto err_wedged; + } + igt_spinner_end(&lo.spin); + } + } + + err = 0; +err_client_lo: + preempt_client_fini(&lo); +err_client_hi: + preempt_client_fini(&hi); +err_unlock: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + return err; + +err_wedged: + igt_spinner_end(&hi.spin); + igt_spinner_end(&lo.spin); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_client_lo; +} + static int live_preempt_hang(void *arg) { struct drm_i915_private *i915 = arg; @@ -270,6 +514,7 @@ static int live_preempt_hang(void *arg) struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_PREEMPTION(i915)) @@ -279,7 +524,7 @@ static int live_preempt_hang(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -374,7 +619,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -522,7 +767,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", count, flags, - INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext); + RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext); return 0; } @@ -550,7 +795,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", count, flags, - INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext); + RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext); return 0; } @@ -562,6 +807,7 @@ static int live_preempt_smoke(void *arg) .ncontext = 1024, }; const unsigned int phase[] = { 0, BATCH }; + intel_wakeref_t wakeref; int err = -ENOMEM; u32 *cs; int n; @@ -576,7 +822,7 @@ static int live_preempt_smoke(void *arg) return -ENOMEM; mutex_lock(&smoke.i915->drm.struct_mutex); - intel_runtime_pm_get(smoke.i915); + wakeref = intel_runtime_pm_get(smoke.i915); smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); if (IS_ERR(smoke.batch)) { @@ -627,7 +873,7 @@ err_ctx: err_batch: i915_gem_object_put(smoke.batch); err_unlock: - intel_runtime_pm_put(smoke.i915); + intel_runtime_pm_put(smoke.i915, wakeref); mutex_unlock(&smoke.i915->drm.struct_mutex); kfree(smoke.contexts); @@ -640,6 +886,8 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_sanitycheck), SUBTEST(live_preempt), SUBTEST(live_late_preempt), + SUBTEST(live_suppress_self_preempt), + SUBTEST(live_chain_preempt), SUBTEST(live_preempt_hang), SUBTEST(live_preempt_smoke), }; diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index 67017d5175b8..b15c4f26c593 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -5,6 +5,7 @@ */ #include "../i915_selftest.h" +#include "../i915_reset.h" #include "igt_flush_test.h" #include "igt_reset.h" @@ -12,13 +13,59 @@ #include "igt_wedge_me.h" #include "mock_context.h" +#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4) +struct wa_lists { + struct i915_wa_list gt_wa_list; + struct { + char name[REF_NAME_MAX]; + struct i915_wa_list wa_list; + } engine[I915_NUM_ENGINES]; +}; + +static void +reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + memset(lists, 0, sizeof(*lists)); + + wa_init_start(&lists->gt_wa_list, "GT_REF"); + gt_init_workarounds(i915, &lists->gt_wa_list); + wa_init_finish(&lists->gt_wa_list); + + for_each_engine(engine, i915, id) { + struct i915_wa_list *wal = &lists->engine[id].wa_list; + char *name = lists->engine[id].name; + + snprintf(name, REF_NAME_MAX, "%s_REF", engine->name); + + wa_init_start(wal, name); + engine_init_workarounds(engine, wal); + wa_init_finish(wal); + } +} + +static void +reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) + intel_wa_list_free(&lists->engine[id].wa_list); + + intel_wa_list_free(&lists->gt_wa_list); +} + static struct drm_i915_gem_object * read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { + const u32 base = engine->mmio_base; struct drm_i915_gem_object *result; + intel_wakeref_t wakeref; struct i915_request *rq; struct i915_vma *vma; - const u32 base = engine->mmio_base; u32 srm, *cs; int err; int i; @@ -47,9 +94,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) if (err) goto err_obj; - intel_runtime_pm_get(engine->i915); - rq = i915_request_alloc(engine, ctx); - intel_runtime_pm_put(engine->i915); + rq = ERR_PTR(-ENODEV); + with_intel_runtime_pm(engine->i915, wakeref) + rq = i915_request_alloc(engine, ctx); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_pin; @@ -167,7 +214,6 @@ out_put: static int do_device_reset(struct intel_engine_cs *engine) { - set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags); i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds"); return 0; } @@ -183,20 +229,22 @@ switch_to_scratch_context(struct intel_engine_cs *engine, { struct i915_gem_context *ctx; struct i915_request *rq; + intel_wakeref_t wakeref; int err = 0; ctx = kernel_context(engine->i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); - intel_runtime_pm_get(engine->i915); - - if (spin) - rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); - else - rq = i915_request_alloc(engine, ctx); - - intel_runtime_pm_put(engine->i915); + rq = ERR_PTR(-ENODEV); + with_intel_runtime_pm(engine->i915, wakeref) { + if (spin) + rq = igt_spinner_create_request(spin, + ctx, engine, + MI_NOOP); + else + rq = i915_request_alloc(engine, ctx); + } kernel_context_close(ctx); @@ -228,6 +276,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, bool want_spin = reset == do_engine_reset; struct i915_gem_context *ctx; struct igt_spinner spin; + intel_wakeref_t wakeref; int err; pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n", @@ -253,9 +302,8 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, if (err) goto out; - intel_runtime_pm_get(i915); - err = reset(engine); - intel_runtime_pm_put(i915); + with_intel_runtime_pm(i915, wakeref) + err = reset(engine); if (want_spin) { igt_spinner_end(&spin); @@ -326,16 +374,17 @@ out: return err; } -static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str) +static bool verify_gt_engine_wa(struct drm_i915_private *i915, + struct wa_lists *lists, const char *str) { struct intel_engine_cs *engine; enum intel_engine_id id; bool ok = true; - ok &= intel_gt_verify_workarounds(i915, str); + ok &= wa_list_verify(i915, &lists->gt_wa_list, str); for_each_engine(engine, i915, id) - ok &= intel_engine_verify_workarounds(engine, str); + ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str); return ok; } @@ -344,7 +393,8 @@ static int live_gpu_reset_gt_engine_workarounds(void *arg) { struct drm_i915_private *i915 = arg; - struct i915_gpu_error *error = &i915->gpu_error; + intel_wakeref_t wakeref; + struct wa_lists lists; bool ok; if (!intel_has_gpu_reset(i915)) @@ -353,19 +403,21 @@ live_gpu_reset_gt_engine_workarounds(void *arg) pr_info("Verifying after GPU reset...\n"); igt_global_reset_lock(i915); + wakeref = intel_runtime_pm_get(i915); - ok = verify_gt_engine_wa(i915, "before reset"); + reference_lists_init(i915, &lists); + + ok = verify_gt_engine_wa(i915, &lists, "before reset"); if (!ok) goto out; - intel_runtime_pm_get(i915); - set_bit(I915_RESET_HANDOFF, &error->flags); i915_reset(i915, ALL_ENGINES, "live_workarounds"); - intel_runtime_pm_put(i915); - ok = verify_gt_engine_wa(i915, "after reset"); + ok = verify_gt_engine_wa(i915, &lists, "after reset"); out: + reference_lists_fini(i915, &lists); + intel_runtime_pm_put(i915, wakeref); igt_global_reset_unlock(i915); return ok ? 0 : -ESRCH; @@ -380,6 +432,8 @@ live_engine_reset_gt_engine_workarounds(void *arg) struct igt_spinner spin; enum intel_engine_id id; struct i915_request *rq; + intel_wakeref_t wakeref; + struct wa_lists lists; int ret = 0; if (!intel_has_reset_engine(i915)) @@ -390,23 +444,24 @@ live_engine_reset_gt_engine_workarounds(void *arg) return PTR_ERR(ctx); igt_global_reset_lock(i915); + wakeref = intel_runtime_pm_get(i915); + + reference_lists_init(i915, &lists); for_each_engine(engine, i915, id) { bool ok; pr_info("Verifying after %s reset...\n", engine->name); - ok = verify_gt_engine_wa(i915, "before reset"); + ok = verify_gt_engine_wa(i915, &lists, "before reset"); if (!ok) { ret = -ESRCH; goto err; } - intel_runtime_pm_get(i915); i915_reset_engine(engine, "live_workarounds"); - intel_runtime_pm_put(i915); - ok = verify_gt_engine_wa(i915, "after idle reset"); + ok = verify_gt_engine_wa(i915, &lists, "after idle reset"); if (!ok) { ret = -ESRCH; goto err; @@ -416,13 +471,10 @@ live_engine_reset_gt_engine_workarounds(void *arg) if (ret) goto err; - intel_runtime_pm_get(i915); - rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); if (IS_ERR(rq)) { ret = PTR_ERR(rq); igt_spinner_fini(&spin); - intel_runtime_pm_put(i915); goto err; } @@ -431,19 +483,16 @@ live_engine_reset_gt_engine_workarounds(void *arg) if (!igt_wait_for_spinner(&spin, rq)) { pr_err("Spinner failed to start\n"); igt_spinner_fini(&spin); - intel_runtime_pm_put(i915); ret = -ETIMEDOUT; goto err; } i915_reset_engine(engine, "live_workarounds"); - intel_runtime_pm_put(i915); - igt_spinner_end(&spin); igt_spinner_fini(&spin); - ok = verify_gt_engine_wa(i915, "after busy reset"); + ok = verify_gt_engine_wa(i915, &lists, "after busy reset"); if (!ok) { ret = -ESRCH; goto err; @@ -451,6 +500,8 @@ live_engine_reset_gt_engine_workarounds(void *arg) } err: + reference_lists_fini(i915, &lists); + intel_runtime_pm_put(i915, wakeref); igt_global_reset_unlock(i915); kernel_context_close(ctx); diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c index b26f07b55d86..2bfa72c1654b 100644 --- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c @@ -76,3 +76,57 @@ void timed_fence_fini(struct timed_fence *tf) destroy_timer_on_stack(&tf->timer); i915_sw_fence_fini(&tf->fence); } + +struct heap_fence { + struct i915_sw_fence fence; + union { + struct kref ref; + struct rcu_head rcu; + }; +}; + +static int __i915_sw_fence_call +heap_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) +{ + struct heap_fence *h = container_of(fence, typeof(*h), fence); + + switch (state) { + case FENCE_COMPLETE: + break; + + case FENCE_FREE: + heap_fence_put(&h->fence); + } + + return NOTIFY_DONE; +} + +struct i915_sw_fence *heap_fence_create(gfp_t gfp) +{ + struct heap_fence *h; + + h = kmalloc(sizeof(*h), gfp); + if (!h) + return NULL; + + i915_sw_fence_init(&h->fence, heap_fence_notify); + refcount_set(&h->ref.refcount, 2); + + return &h->fence; +} + +static void heap_fence_release(struct kref *ref) +{ + struct heap_fence *h = container_of(ref, typeof(*h), ref); + + i915_sw_fence_fini(&h->fence); + + kfree_rcu(h, rcu); +} + +void heap_fence_put(struct i915_sw_fence *fence) +{ + struct heap_fence *h = container_of(fence, typeof(*h), fence); + + kref_put(&h->ref, heap_fence_release); +} diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h index 474aafb92ae1..1f9927e10f3a 100644 --- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h +++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h @@ -39,4 +39,7 @@ struct timed_fence { void timed_fence_init(struct timed_fence *tf, unsigned long expires); void timed_fence_fini(struct timed_fence *tf); +struct i915_sw_fence *heap_fence_create(gfp_t gfp); +void heap_fence_put(struct i915_sw_fence *fence); + #endif /* _LIB_SW_FENCE_H_ */ diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c index d937bdff26f9..b646cdcdd602 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/selftests/mock_context.c @@ -45,11 +45,8 @@ mock_context(struct drm_i915_private *i915, INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->hw_id_link); - for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { - struct intel_context *ce = &ctx->__engine[n]; - - ce->gem_context = ctx; - } + for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) + intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]); ret = i915_gem_context_pin_hw_id(ctx); if (ret < 0) diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index d0c44c18db42..08f0cab02e0f 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -30,6 +30,52 @@ struct mock_ring { struct i915_timeline timeline; }; +static void mock_timeline_pin(struct i915_timeline *tl) +{ + tl->pin_count++; +} + +static void mock_timeline_unpin(struct i915_timeline *tl) +{ + GEM_BUG_ON(!tl->pin_count); + tl->pin_count--; +} + +static struct intel_ring *mock_ring(struct intel_engine_cs *engine) +{ + const unsigned long sz = PAGE_SIZE / 2; + struct mock_ring *ring; + + ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); + if (!ring) + return NULL; + + if (i915_timeline_init(engine->i915, + &ring->timeline, engine->name, + NULL)) { + kfree(ring); + return NULL; + } + + ring->base.size = sz; + ring->base.effective_size = sz; + ring->base.vaddr = (void *)(ring + 1); + ring->base.timeline = &ring->timeline; + + INIT_LIST_HEAD(&ring->base.request_list); + intel_ring_update_space(&ring->base); + + return &ring->base; +} + +static void mock_ring_free(struct intel_ring *base) +{ + struct mock_ring *ring = container_of(base, typeof(*ring), base); + + i915_timeline_fini(&ring->timeline); + kfree(ring); +} + static struct mock_request *first_request(struct mock_engine *engine) { return list_first_entry_or_null(&engine->hw_queue, @@ -37,24 +83,29 @@ static struct mock_request *first_request(struct mock_engine *engine) link); } -static void advance(struct mock_engine *engine, - struct mock_request *request) +static void advance(struct mock_request *request) { list_del_init(&request->link); - mock_seqno_advance(&engine->base, request->base.global_seqno); + intel_engine_write_global_seqno(request->base.engine, + request->base.global_seqno); + i915_request_mark_complete(&request->base); + GEM_BUG_ON(!i915_request_completed(&request->base)); + + intel_engine_queue_breadcrumbs(request->base.engine); } static void hw_delay_complete(struct timer_list *t) { struct mock_engine *engine = from_timer(engine, t, hw_delay); struct mock_request *request; + unsigned long flags; - spin_lock(&engine->hw_lock); + spin_lock_irqsave(&engine->hw_lock, flags); /* Timer fired, first request is complete */ request = first_request(engine); if (request) - advance(engine, request); + advance(request); /* * Also immediately signal any subsequent 0-delay requests, but @@ -66,20 +117,24 @@ static void hw_delay_complete(struct timer_list *t) break; } - advance(engine, request); + advance(request); } - spin_unlock(&engine->hw_lock); + spin_unlock_irqrestore(&engine->hw_lock, flags); } static void mock_context_unpin(struct intel_context *ce) { + mock_timeline_unpin(ce->ring->timeline); i915_gem_context_put(ce->gem_context); } static void mock_context_destroy(struct intel_context *ce) { GEM_BUG_ON(ce->pin_count); + + if (ce->ring) + mock_ring_free(ce->ring); } static const struct intel_context_ops mock_context_ops = { @@ -92,14 +147,26 @@ mock_context_pin(struct intel_engine_cs *engine, struct i915_gem_context *ctx) { struct intel_context *ce = to_intel_context(ctx, engine); + int err = -ENOMEM; - if (!ce->pin_count++) { - i915_gem_context_get(ctx); - ce->ring = engine->buffer; - ce->ops = &mock_context_ops; + if (ce->pin_count++) + return ce; + + if (!ce->ring) { + ce->ring = mock_ring(engine); + if (!ce->ring) + goto err; } + mock_timeline_pin(ce->ring->timeline); + + ce->ops = &mock_context_ops; + i915_gem_context_get(ctx); return ce; + +err: + ce->pin_count = 0; + return ERR_PTR(err); } static int mock_request_alloc(struct i915_request *request) @@ -118,9 +185,9 @@ static int mock_emit_flush(struct i915_request *request, return 0; } -static void mock_emit_breadcrumb(struct i915_request *request, - u32 *flags) +static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs) { + return cs; } static void mock_submit_request(struct i915_request *request) @@ -128,51 +195,20 @@ static void mock_submit_request(struct i915_request *request) struct mock_request *mock = container_of(request, typeof(*mock), base); struct mock_engine *engine = container_of(request->engine, typeof(*engine), base); + unsigned long flags; i915_request_submit(request); GEM_BUG_ON(!request->global_seqno); - spin_lock_irq(&engine->hw_lock); + spin_lock_irqsave(&engine->hw_lock, flags); list_add_tail(&mock->link, &engine->hw_queue); if (mock->link.prev == &engine->hw_queue) { if (mock->delay) mod_timer(&engine->hw_delay, jiffies + mock->delay); else - advance(engine, mock); + advance(mock); } - spin_unlock_irq(&engine->hw_lock); -} - -static struct intel_ring *mock_ring(struct intel_engine_cs *engine) -{ - const unsigned long sz = PAGE_SIZE / 2; - struct mock_ring *ring; - - BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz); - - ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); - if (!ring) - return NULL; - - i915_timeline_init(engine->i915, &ring->timeline, engine->name); - - ring->base.size = sz; - ring->base.effective_size = sz; - ring->base.vaddr = (void *)(ring + 1); - ring->base.timeline = &ring->timeline; - - INIT_LIST_HEAD(&ring->base.request_list); - intel_ring_update_space(&ring->base); - - return &ring->base; -} - -static void mock_ring_free(struct intel_ring *base) -{ - struct mock_ring *ring = container_of(base, typeof(*ring), base); - - i915_timeline_fini(&ring->timeline); - kfree(ring); + spin_unlock_irqrestore(&engine->hw_lock, flags); } struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, @@ -191,39 +227,37 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.i915 = i915; snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); engine->base.id = id; - engine->base.status_page.page_addr = (void *)(engine + 1); + engine->base.status_page.addr = (void *)(engine + 1); engine->base.context_pin = mock_context_pin; engine->base.request_alloc = mock_request_alloc; engine->base.emit_flush = mock_emit_flush; - engine->base.emit_breadcrumb = mock_emit_breadcrumb; + engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb; engine->base.submit_request = mock_submit_request; - i915_timeline_init(i915, &engine->base.timeline, engine->base.name); + if (i915_timeline_init(i915, + &engine->base.timeline, + engine->base.name, + NULL)) + goto err_free; i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE); intel_engine_init_breadcrumbs(&engine->base); - engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ /* fake hw queue */ spin_lock_init(&engine->hw_lock); timer_setup(&engine->hw_delay, hw_delay_complete, 0); INIT_LIST_HEAD(&engine->hw_queue); - engine->base.buffer = mock_ring(&engine->base); - if (!engine->base.buffer) - goto err_breadcrumbs; - if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base))) - goto err_ring; + goto err_breadcrumbs; return &engine->base; -err_ring: - mock_ring_free(engine->base.buffer); err_breadcrumbs: intel_engine_fini_breadcrumbs(&engine->base); i915_timeline_fini(&engine->base.timeline); +err_free: kfree(engine); return NULL; } @@ -237,16 +271,14 @@ void mock_engine_flush(struct intel_engine_cs *engine) del_timer_sync(&mock->hw_delay); spin_lock_irq(&mock->hw_lock); - list_for_each_entry_safe(request, rn, &mock->hw_queue, link) { - list_del_init(&request->link); - mock_seqno_advance(&mock->base, request->base.global_seqno); - } + list_for_each_entry_safe(request, rn, &mock->hw_queue, link) + advance(request); spin_unlock_irq(&mock->hw_lock); } void mock_engine_reset(struct intel_engine_cs *engine) { - intel_write_status_page(engine, I915_GEM_HWS_INDEX, 0); + intel_engine_write_global_seqno(engine, 0); } void mock_engine_free(struct intel_engine_cs *engine) @@ -263,8 +295,6 @@ void mock_engine_free(struct intel_engine_cs *engine) __intel_context_unpin(engine->i915->kernel_context, engine); - mock_ring_free(engine->buffer); - intel_engine_fini_breadcrumbs(engine); i915_timeline_fini(&engine->timeline); diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.h b/drivers/gpu/drm/i915/selftests/mock_engine.h index 133d0c21790d..b9cc3a245f16 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.h +++ b/drivers/gpu/drm/i915/selftests/mock_engine.h @@ -46,10 +46,4 @@ void mock_engine_flush(struct intel_engine_cs *engine); void mock_engine_reset(struct intel_engine_cs *engine); void mock_engine_free(struct intel_engine_cs *engine); -static inline void mock_seqno_advance(struct intel_engine_cs *engine, u32 seqno) -{ - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); - intel_engine_wakeup(engine); -} - #endif /* !__MOCK_ENGINE_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 43ed8b28aeaa..14ae46fda49f 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -58,8 +58,8 @@ static void mock_device_release(struct drm_device *dev) i915_gem_contexts_lost(i915); mutex_unlock(&i915->drm.struct_mutex); - cancel_delayed_work_sync(&i915->gt.retire_work); - cancel_delayed_work_sync(&i915->gt.idle_work); + drain_delayed_work(&i915->gt.retire_work); + drain_delayed_work(&i915->gt.idle_work); i915_gem_drain_workqueue(i915); mutex_lock(&i915->drm.struct_mutex); @@ -68,13 +68,14 @@ static void mock_device_release(struct drm_device *dev) i915_gem_contexts_fini(i915); mutex_unlock(&i915->drm.struct_mutex); + i915_timelines_fini(i915); + drain_workqueue(i915->wq); i915_gem_drain_freed_objects(i915); mutex_lock(&i915->drm.struct_mutex); - mock_fini_ggtt(i915); + mock_fini_ggtt(&i915->ggtt); mutex_unlock(&i915->drm.struct_mutex); - WARN_ON(!list_empty(&i915->gt.timelines)); destroy_workqueue(i915->wq); @@ -147,22 +148,24 @@ struct drm_i915_private *mock_gem_device(void) pdev->class = PCI_BASE_CLASS_DISPLAY << 16; pdev->dev.release = release_dev; dev_set_name(&pdev->dev, "mock"); - dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) /* hack to disable iommu for the fake device; force identity mapping */ pdev->dev.archdata.iommu = (void *)-1; #endif + i915 = (struct drm_i915_private *)(pdev + 1); + pci_set_drvdata(pdev, i915); + + intel_runtime_pm_init_early(i915); + dev_pm_domain_set(&pdev->dev, &pm_domain); pm_runtime_enable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); if (pm_runtime_enabled(&pdev->dev)) WARN_ON(pm_runtime_get_sync(&pdev->dev)); - i915 = (struct drm_i915_private *)(pdev + 1); - pci_set_drvdata(pdev, i915); - err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev); if (err) { pr_err("Failed to initialise mock GEM device: err=%d\n", err); @@ -186,6 +189,7 @@ struct drm_i915_private *mock_gem_device(void) init_waitqueue_head(&i915->gpu_error.wait_queue); init_waitqueue_head(&i915->gpu_error.reset_queue); + mutex_init(&i915->gpu_error.wedge_mutex); i915->wq = alloc_ordered_workqueue("mock", 0); if (!i915->wq) @@ -223,13 +227,14 @@ struct drm_i915_private *mock_gem_device(void) if (!i915->priorities) goto err_dependencies; - INIT_LIST_HEAD(&i915->gt.timelines); + i915_timelines_init(i915); + INIT_LIST_HEAD(&i915->gt.active_rings); INIT_LIST_HEAD(&i915->gt.closed_vma); mutex_lock(&i915->drm.struct_mutex); - mock_init_ggtt(i915); + mock_init_ggtt(i915, &i915->ggtt); mkwrite_device_info(i915)->ring_mask = BIT(0); i915->kernel_context = mock_context(i915, NULL); @@ -250,6 +255,7 @@ err_context: i915_gem_contexts_fini(i915); err_unlock: mutex_unlock(&i915->drm.struct_mutex); + i915_timelines_fini(i915); kmem_cache_destroy(i915->priorities); err_dependencies: kmem_cache_destroy(i915->dependencies); diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 6ae418c76015..cd83929fde8e 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -70,7 +70,7 @@ mock_ppgtt(struct drm_i915_private *i915, ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.file = ERR_PTR(-ENODEV); - i915_address_space_init(&ppgtt->vm, i915); + i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); ppgtt->vm.clear_range = nop_clear_range; ppgtt->vm.insert_page = mock_insert_page; @@ -97,11 +97,12 @@ static void mock_unbind_ggtt(struct i915_vma *vma) { } -void mock_init_ggtt(struct drm_i915_private *i915) +void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; + memset(ggtt, 0, sizeof(*ggtt)); ggtt->vm.i915 = i915; + ggtt->vm.is_ggtt = true; ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE); ggtt->mappable_end = resource_size(&ggtt->gmadr); @@ -117,14 +118,10 @@ void mock_init_ggtt(struct drm_i915_private *i915) ggtt->vm.vma_ops.set_pages = ggtt_set_pages; ggtt->vm.vma_ops.clear_pages = clear_pages; - i915_address_space_init(&ggtt->vm, i915); - - ggtt->vm.is_ggtt = true; + i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); } -void mock_fini_ggtt(struct drm_i915_private *i915) +void mock_fini_ggtt(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; - i915_address_space_fini(&ggtt->vm); } diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h index 9a0a833bb545..40d544bde1d5 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.h +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h @@ -25,8 +25,8 @@ #ifndef __MOCK_GTT_H #define __MOCK_GTT_H -void mock_init_ggtt(struct drm_i915_private *i915); -void mock_fini_ggtt(struct drm_i915_private *i915); +void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt); +void mock_fini_ggtt(struct i915_ggtt *ggtt); struct i915_hw_ppgtt * mock_ppgtt(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c index dcf3b16f5a07..d2de9ece2118 100644 --- a/drivers/gpu/drm/i915/selftests/mock_timeline.c +++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c @@ -10,11 +10,13 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context) { + timeline->i915 = NULL; timeline->fence_context = context; spin_lock_init(&timeline->lock); - init_request_active(&timeline->last_request, NULL); + INIT_ACTIVE_REQUEST(&timeline->barrier); + INIT_ACTIVE_REQUEST(&timeline->last_request); INIT_LIST_HEAD(&timeline->requests); i915_syncmap_init(&timeline->sync); @@ -24,5 +26,5 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context) void mock_timeline_fini(struct i915_timeline *timeline) { - i915_timeline_fini(timeline); + i915_syncmap_free(&timeline->sync); } |