summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Gordon <david.s.gordon@intel.com>2015-10-26 18:53:49 +0000
committerJohn Harrison <John.C.Harrison@Intel.com>2016-06-28 17:19:22 +0100
commit865ae2e85c514eec987e497b521b8965bd9a1e82 (patch)
tree46cb3c1d8ed4c2e937103027115b62ffad6bcdba
parent88268ceaff26678db46c0bcca8d77ceccfd61095 (diff)
drm/i915/preempt: scheduler logic for preventing recursive preemption
Once a preemptive request has been dispatched to the hardware-layer submission mechanism, the scheduler must not send any further requests to the same ring until the preemption completes. Here we add the logic that ensure that only one preemption per ring can be in progress at one time. Actually-preemptive requests are still disabled via a module parameter at this early stage, as the logic to process completion isn't in place yet. *v?* Added documentation. For: VIZ-2021 Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c63
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h1
2 files changed, 62 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 1d24be142575..366927a5042b 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -639,6 +639,15 @@ static int i915_scheduler_submit(struct intel_engine_cs *engine)
WARN_ON(scheduler->flags[engine->id] & I915_SF_SUBMITTING);
scheduler->flags[engine->id] |= I915_SF_SUBMITTING;
+ /*
+ * If pre-emption is in progress on an engine then no further work
+ * may be submitted to that same engine. Come back later...
+ */
+ if (i915_scheduler_is_engine_preempting(engine)) {
+ ret = -EAGAIN;
+ goto error;
+ }
+
/* First time around, complain if anything unexpected occurs: */
ret = i915_scheduler_pop_from_queue_locked(engine, &node);
if (ret)
@@ -681,7 +690,18 @@ static int i915_scheduler_submit(struct intel_engine_cs *engine)
*/
i915_scheduler_node_fly(node);
- scheduler->stats[engine->id].submitted++;
+ if (req->scheduler_flags & I915_REQ_SF_PREEMPT) {
+ /*
+ * If this batch is pre-emptive then it will tie the
+ * hardware up at least until it has begun to be
+ * executed. That is, if a pre-emption request is in
+ * flight then no other work may be submitted until
+ * it resolves.
+ */
+ scheduler->flags[engine->id] |= I915_SF_PREEMPTING;
+ scheduler->stats[engine->id].preempts_submitted++;
+ } else
+ scheduler->stats[engine->id].submitted++;
spin_unlock_irq(&scheduler->lock);
ret = dev_priv->gt.execbuf_final(&node->params);
@@ -698,8 +718,10 @@ static int i915_scheduler_submit(struct intel_engine_cs *engine)
/*
* Oh dear! Either the node is broken or the engine is
* busy. So need to kill the node or requeue it and try
- * again later as appropriate.
+ * again later as appropriate. Either way, clear the
+ * pre-emption flag as it ain't happening.
*/
+ scheduler->flags[engine->id] &= ~I915_SF_PREEMPTING;
switch (-ret) {
case ENODEV:
@@ -741,6 +763,10 @@ static int i915_scheduler_submit(struct intel_engine_cs *engine)
}
}
+ /* If pre-emption is now in progress then stop launching */
+ if (i915_scheduler_is_engine_preempting(engine))
+ break;
+
/* Keep launching until the sky is sufficiently full. */
flying = i915_scheduler_count_flying(scheduler, engine);
if (flying >= scheduler->min_flying)
@@ -1075,6 +1101,11 @@ bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
/* Node was in flight so mark it as complete. */
if (req->cancelled) {
+ /* If a preemption was in progress, it won't complete now. */
+ if (node->status == I915_SQS_OVERTAKING)
+ scheduler->flags[req->engine->id] &=
+ ~(I915_SF_PREEMPTING | I915_SF_PREEMPTED);
+
node->status = I915_SQS_DEAD;
scheduler->stats[req->engine->id].kill_flying++;
} else {
@@ -1915,6 +1946,34 @@ void i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file)
}
/**
+ * i915_scheduler_is_engine_preempting - is a pre-emption event in progress?
+ * @engine: Engine to query
+ * Returns true if a pre-emption event is currently in progress (which would
+ * mean that various other operations may be unsafe) or false if not.
+ */
+bool i915_scheduler_is_engine_preempting(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+ uint32_t sched_flags = scheduler->flags[engine->id];
+
+ /*
+ * The scheduler is prevented from sending batches to the hardware
+ * while preemption is in progress (flag bit I915_SF_PREEMPTING).
+ *
+ * Post-preemption (I915_SF_PREEMPTED), the hardware engine will be
+ * empty, and the scheduler therefore needs a chance to run the
+ * delayed work task to retire completed work and restart submission
+ *
+ * Therefore, if either flag is set, the scheduler is busy.
+ */
+ if (sched_flags & (I915_SF_PREEMPTING | I915_SF_PREEMPTED))
+ return true;
+
+ return false;
+}
+
+/**
* i915_scheduler_is_engine_flying - does the given engine have in flight batches?
* @engine: Engine to query
* Used by TDR to distinguish hung engines (not moving but with work to do)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index cc89de816dc0..0cb6b1105183 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -212,6 +212,7 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
bool i915_scheduler_notify_request(struct drm_i915_gem_request *req);
void i915_scheduler_wakeup(struct drm_device *dev);
bool i915_scheduler_is_engine_flying(struct intel_engine_cs *engine);
+bool i915_scheduler_is_engine_preempting(struct intel_engine_cs *engine);
void i915_scheduler_work_handler(struct work_struct *work);
int i915_scheduler_flush(struct intel_engine_cs *engine, bool is_locked);
int i915_scheduler_flush_stamp(struct intel_engine_cs *engine,