summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2022-08-04 15:26:17 +0200
committerZbigniew KempczyƄski <zbigniew.kempczynski@intel.com>2022-08-10 14:53:28 +0200
commit90549cfba1606b695fe60f78ef87b7d54586eab7 (patch)
treebf97ffe448f00e3f9d85dc835e52f5a77fcd17be
parentec2ab8e3a151ce05bd2726319c528c2ab99e8a96 (diff)
i915/i915_pm_rps: Don't use gem_write in batch_create
In fence_order and engine_order we are repeatedly creating big mmaps that are very slow when done via gem_write, causing test timeouts. Rewrite batch_create to mmap the buffer directly instead of using gem_write. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Karolina Drobnik <karolina.drobnik@intel.com> Reviewed-by: Kamil Konieczny <kamil.konieczny@linux.intel.com>
-rw-r--r--tests/i915/i915_pm_rps.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/tests/i915/i915_pm_rps.c b/tests/i915/i915_pm_rps.c
index a45a69059..d06ade27e 100644
--- a/tests/i915/i915_pm_rps.c
+++ b/tests/i915/i915_pm_rps.c
@@ -621,11 +621,15 @@ static uint32_t batch_create(int i915, uint64_t sz)
const uint32_t bbe = MI_BATCH_BUFFER_END;
const uint32_t chk = 0x5 << 23;
uint32_t handle = gem_create(i915, sz);
+ uint32_t *map;
- for (uint64_t pg = 4096; pg + 4096 < sz; pg += 4096)
- gem_write(i915, handle, pg, &chk, sizeof(chk));
+ map = gem_mmap__device_coherent(i915, handle, 0, sz, PROT_WRITE);
- gem_write(i915, handle, sz - sizeof(bbe), &bbe, sizeof(bbe));
+ for (uint64_t pg = 1; pg * 4096 < sz; pg++)
+ map[(pg * 4096) / sizeof(*map)] = chk;
+
+ map[sz / sizeof(*map) - 1] = bbe;
+ munmap(map, sz);
return handle;
}