summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRamalingam C <ramalingam.c@intel.com>2022-03-16 19:05:56 +0530
committerRamalingam C <ramalingam.c@intel.com>2022-03-16 19:29:40 +0530
commit600265b9b33c316406727770389d97bc2cb77dc0 (patch)
treee60674c9081027e15978232ec9dd7a375841e2cd
parent19b489bd04559215dcbcc2464ae391f33a8a0a33 (diff)
selftest: ccs_clearflat-ccs-ww12.02
Signed-off-by: Ramalingam C <ramalingam.c@intel.com>
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c233
1 files changed, 233 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index c9c4f391c5cc..34f5e0ad254d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -670,3 +670,236 @@ int intel_migrate_perf_selftests(struct drm_i915_private *i915)
return intel_gt_live_subtests(tests, gt);
}
+
+static int emit_copy_ccs(struct i915_request *rq, u64 offset, int size)
+{
+ struct drm_i915_private *i915 = rq->engine->i915;
+ u32 num_ccs_blks, ccs_ring_size;
+ int mocs = rq->engine->gt->mocs.uc_index << 1;
+ u32 *cs;
+
+ GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
+
+ offset += (u64)rq->engine->instance << 32;
+
+ ccs_ring_size = calc_ctrl_surf_instr_size(i915, size);
+ cs = intel_ring_begin(rq, round_up(ccs_ring_size, 2));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ num_ccs_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
+ NUM_CCS_BYTES_PER_BLOCK);
+
+ /*
+ * Flat CCS surface can only be accessed via
+ * XY_CTRL_SURF_COPY_BLT CMD and using indirect
+ * mapping of associated LMEM.
+ */
+ cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
+ cs = _i915_ctrl_surf_copy_blt(cs, offset, offset,
+ INDIRECT_ACCESS, DIRECT_ACCESS,
+ mocs, mocs, num_ccs_blks);
+ cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
+
+ if (ccs_ring_size & 1)
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
+static int intel_context_copy_ccs(struct intel_context *ce,
+ const struct i915_deps *deps,
+ struct scatterlist *sg,
+ enum i915_cache_level cache_level,
+ struct i915_request **out)
+{
+ struct sgt_dma it = sg_sgt(sg);
+ struct i915_request *rq;
+ int err;
+
+ GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
+ *out = NULL;
+
+ GEM_BUG_ON(ce->ring->size < SZ_64K);
+
+ do {
+ u32 offset;
+ int len;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ if (deps) {
+ err = i915_request_await_deps(rq, deps);
+ if (err)
+ goto out_rq;
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ deps = NULL;
+ }
+
+ /* The PTE updates + clear must not be interrupted. */
+ err = emit_no_arbitration(rq);
+ if (err)
+ goto out_rq;
+
+ offset = 0;
+ if (HAS_64K_PAGES(ce->engine->i915))
+ offset = CHUNK_SZ;
+
+ len = emit_pte(rq, &it, cache_level, true, offset, CHUNK_SZ);
+ if (len <= 0) {
+ err = len;
+ goto out_rq;
+ }
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ err = emit_copy_ccs(rq, offset, len);
+
+ /* Arbitration is re-enabled between requests. */
+out_rq:
+ if (*out)
+ i915_request_put(*out);
+ *out = i915_request_get(rq);
+ i915_request_add(rq);
+ if (err || !it.sg || !sg_dma_len(it.sg))
+ break;
+
+ cond_resched();
+ } while (1);
+
+out_ce:
+ return err;
+}
+
+static int ccs_clear_blt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ static const unsigned long size = SZ_64K;
+ struct i915_request *rq;
+ struct drm_i915_gem_object *dst;
+ enum i915_map_type map_type;
+ u32 *cur;
+ int err, count, i;
+
+ dst = i915_gem_object_create_lmem(gt->i915, size, 0);
+ if (IS_ERR_OR_NULL(dst)) {
+ return PTR_ERR(dst);
+ }
+
+ count = GET_CCS_BYTES(gt->i915, dst->base.size) / sizeof(u32);
+
+ map_type = i915_coherent_map_type(gt->i915, dst, false);
+
+ i915_gem_object_trylock(dst, NULL);
+ err = i915_gem_object_pin_pages(dst);
+ if (err)
+ goto obj_lock_exit;
+
+ err = intel_context_copy_ccs(gt->migrate.context, NULL,
+ dst->mm.pages->sgl, I915_CACHE_NONE,
+ &rq);
+ if (rq) {
+ if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ goto obj_lock_exit;
+ }
+
+ cur = i915_gem_object_pin_map(dst, map_type);
+ if (IS_ERR(cur))
+ return PTR_ERR(cur);
+ for (i = 0; i < count; ++i)
+ if (*cur++) {
+ pr_err("CCS content is not cleared at location %d of %d\n", i, count);
+ err = -EINVAL;
+ break;
+ }
+
+ i915_gem_object_unpin_map(dst);
+
+ err = intel_context_migrate_clear(gt->migrate.context, NULL,
+ dst->mm.pages->sgl, I915_CACHE_NONE,
+ i915_gem_object_is_lmem(dst),
+ 0x5a5a5a5a, &rq);
+ if (rq) {
+ if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ }
+
+ cur = i915_gem_object_pin_map(dst, map_type);
+ if (IS_ERR(cur))
+ return PTR_ERR(cur);
+
+ for (i = 0; i < count; ++i)
+ if (*cur++ != 0x5a5a5a5a) {
+ pr_err("CCS content is not cleared at location %d of %d\n", i, count);
+ err = -EINVAL;
+ break;
+ }
+
+ i915_gem_object_unpin_map(dst);
+
+ err = intel_context_migrate_clear(gt->migrate.context, NULL,
+ dst->mm.pages->sgl, I915_CACHE_NONE,
+ i915_gem_object_is_lmem(dst),
+ 0, &rq);
+ if (rq) {
+ if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ }
+
+ cur = i915_gem_object_pin_map(dst, map_type);
+ if (IS_ERR(cur))
+ return PTR_ERR(cur);
+
+ for (i = 0; i < count; ++i)
+ if (*cur++) {
+ pr_err("CCS content is not cleared at location %d of %d\n", i, count);
+ err = -EINVAL;
+ break;
+ }
+
+ i915_gem_object_unpin_map(dst);
+
+obj_lock_exit:
+ i915_gem_object_unlock(dst);
+ i915_gem_object_put(dst);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int intel_ccs_fast_clear_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(ccs_clear_blt),
+ };
+ struct intel_gt *gt = to_gt(i915);
+
+ if (!HAS_FLAT_CCS(i915))
+ return 0;
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ if (!gt->migrate.context)
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}