diff options
author | Dave Gordon <david.s.gordon@intel.com> | 2015-10-14 19:50:18 +0100 |
---|---|---|
committer | John Harrison <John.C.Harrison@Intel.com> | 2016-06-28 17:19:24 +0100 |
commit | 12ea5dad3fd5712d674566eb179e1768e8399132 (patch) | |
tree | 6ff4fa235751bfbf1aaaabb05f3a57610df5ef1c | |
parent | 1740b23334e0cb5a676994d0de1c79deb9db78c8 (diff) |
drm/i915/preempt: Refactor intel_lr_context_reset()
After preemption, we need to empty out the ringbuffers associated
with preempted requests, so that the scheduler has a clean ring
into which to (re-)insert requests (not necessarily in the same
order as before they were preempted).
So this patch refactors the existing routine intel_lr_context_reset()
into a new inner core intel_lr_context_resync() which just updates
a context and the associated ringbuffer, and an outer wrapper which
implements the original operation of intel_lr_context_reset() in
terms of resync().
For: VIZ-2021
Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_context.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 81 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.h | 6 |
3 files changed, 59 insertions, 30 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index abf99e3978b8..899e6142ddcb 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -359,7 +359,7 @@ void i915_gem_context_reset(struct drm_device *dev) struct intel_context *ctx; list_for_each_entry(ctx, &dev_priv->context_list, link) - intel_lr_context_reset(dev, ctx); + intel_lr_context_reset(ctx); } for (i = 0; i < I915_NUM_ENGINES; i++) { diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 93512a868cc9..764cf2de96c6 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2852,39 +2852,66 @@ error_deref_obj: return ret; } -void intel_lr_context_reset(struct drm_device *dev, - struct intel_context *ctx) +/* + * Empty the ringbuffer associated with the specified request + * by updating the ringbuffer 'head' to the value of 'tail', or, + * if 'rezero' is true, setting both 'head' and 'tail' to zero. + * Then propagate the change to the associated context image. + */ +void intel_lr_context_resync(struct intel_context *ctx, + struct intel_engine_cs *engine, + bool rezero) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine; - int i; + enum intel_engine_id engine_id = engine->id; + struct drm_i915_gem_object *ctx_obj; + struct intel_ringbuffer *ringbuf; + struct page *page; + uint32_t *reg_state; - for_each_engine(engine, dev_priv, i) { - struct drm_i915_gem_object *ctx_obj = - ctx->engine[engine->id].state; - struct intel_ringbuffer *ringbuf = - ctx->engine[engine->id].ringbuf; - uint32_t *reg_state; - struct page *page; + ctx_obj = ctx->engine[engine_id].state; + ringbuf = ctx->engine[engine_id].ringbuf; - if (!ctx_obj) - continue; + /* + * When resetting, a hardware context might be as-yet-unused + * and therefore not-yet-allocated. In other situations, the + * ringbuffer and context object must already exist. + */ + if (WARN_ON(!ringbuf != !ctx_obj)) + return; + if (!i915_reset_in_progress(&ctx->i915->gpu_error)) + WARN_ON(!ringbuf || !ctx_obj); + if (!ringbuf || !ctx_obj) + return; + if (WARN_ON(i915_gem_object_get_pages(ctx_obj))) + return; - if (i915_gem_object_get_pages(ctx_obj)) { - WARN(1, "Failed get_pages for context obj\n"); - continue; - } - page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); - reg_state = kmap_atomic(page); + if (i915_gem_object_get_pages(ctx_obj)) { + WARN(1, "Failed get_pages for context obj\n"); + return; + } + page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); + reg_state = kmap_atomic(page); - reg_state[CTX_RING_HEAD+1] = 0; - reg_state[CTX_RING_TAIL+1] = 0; + if (rezero) + ringbuf->tail = 0; + ringbuf->head = ringbuf->tail; + ringbuf->last_retired_head = -1; + intel_ring_update_space(ringbuf); - kunmap_atomic(reg_state); + reg_state[CTX_RING_HEAD+1] = ringbuf->head; + reg_state[CTX_RING_TAIL+1] = ringbuf->tail; - ringbuf->head = 0; - ringbuf->tail = 0; - ringbuf->last_retired_head = -1; - intel_ring_update_space(ringbuf); + kunmap_atomic(reg_state); +} + +void intel_lr_context_reset(struct intel_context *ctx) +{ + struct drm_i915_private *dev_priv = ctx->i915; + struct intel_engine_cs *engine; + int i; + + for_each_engine(engine, dev_priv, i) { + intel_lr_context_resync(ctx, engine, true); } } + diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 5c3fdf42938f..f35491915f7e 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -103,8 +103,10 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, struct intel_engine_cs *engine); void intel_lr_context_unpin(struct intel_context *ctx, struct intel_engine_cs *engine); -void intel_lr_context_reset(struct drm_device *dev, - struct intel_context *ctx); +void intel_lr_context_resync(struct intel_context *ctx, + struct intel_engine_cs *ring, + bool rezero); +void intel_lr_context_reset(struct intel_context *ctx); uint64_t intel_lr_context_descriptor(struct intel_context *ctx, struct intel_engine_cs *engine); |