summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Gordon <david.s.gordon@intel.com>2015-10-23 18:02:05 +0100
committerJohn Harrison <John.C.Harrison@Intel.com>2016-06-28 17:19:20 +0100
commit80737b4289e4b78f2dd19e556a778746b07cbd6a (patch)
tree4cd72bb48080da4dfb73a35d5fbd032880db7900
parent97764b08658612e5e86f87075563482603588cd8 (diff)
drm/i915/sched: set request 'head' on at start of ring submission
With the scheduler, request allocation can happen long before the ring is filled in, and in a different order. So for that case, we update the request head at the start of _final (the initialisation on allocation is stull useful for the direct-submission mode). v2: Updated to use locally cached request pointer. For: VIZ-2021 Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c3
2 files changed, 6 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2113491b5251..0715f4bdeaff 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1349,6 +1349,9 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
if (ret)
goto error;
+ /* record where we start filling the ring */
+ req->head = intel_ring_get_tail(req->ringbuf);
+
/*
* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 1cd34c9ee4b5..4977e84b81a3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1096,6 +1096,9 @@ int intel_execlists_submission_final(struct i915_execbuffer_params *params)
if (ret)
goto err;
+ /* record where we start filling the ring */
+ req->head = intel_ring_get_tail(ringbuf);
+
/*
* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.