summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Gordon <david.s.gordon@intel.com>2015-10-21 16:22:27 +0100
committerJohn Harrison <John.C.Harrison@Intel.com>2016-06-28 17:19:23 +0100
commitdc1d4b78c7d728d9c1140257a21beb1d99f7380f (patch)
tree3c356e5823aadae9ce940735cc77abf4f7763915
parent865ae2e85c514eec987e497b521b8965bd9a1e82 (diff)
drm/i915/preempt: don't allow nonbatch ctx init when the scheduler is busy
If the scheduler is busy (e.g. processing a preemption) it will need to be able to acquire the struct_mutex, so we can't allow untracked requests to bypass the scheduler and go directly to the hardware (much confusion will result). Since untracked requests are used only for initialisation of logical contexts, we can avoid the problem by forcing any thread trying to initialise a context at an unfortunate time to drop the mutex and retry later. *v?* Add documentation. For: VIZ-2021 Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c13
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c8
3 files changed, 22 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 366927a5042b..769be9ba84a2 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -1974,6 +1974,19 @@ bool i915_scheduler_is_engine_preempting(struct intel_engine_cs *engine)
}
/**
+ * i915_scheduler_is_engine_busy - is the scheduler busy on the given engine?
+ * @engine: Engine to query
+ * Returns true if the scheduler is busy and cannot immediately perform
+ * operations such as submitting a batch buffer to the hardware or false
+ * if it is not.
+ */
+bool i915_scheduler_is_engine_busy(struct intel_engine_cs *engine)
+{
+ /* Currently only pre-emption ties up the scheduler. */
+ return i915_scheduler_is_engine_preempting(engine);
+}
+
+/**
* i915_scheduler_is_engine_flying - does the given engine have in flight batches?
* @engine: Engine to query
* Used by TDR to distinguish hung engines (not moving but with work to do)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 0cb6b1105183..6ceb352d4ad8 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -213,6 +213,7 @@ bool i915_scheduler_notify_request(struct drm_i915_gem_request *req);
void i915_scheduler_wakeup(struct drm_device *dev);
bool i915_scheduler_is_engine_flying(struct intel_engine_cs *engine);
bool i915_scheduler_is_engine_preempting(struct intel_engine_cs *engine);
+bool i915_scheduler_is_engine_busy(struct intel_engine_cs *engine);
void i915_scheduler_work_handler(struct work_struct *work);
int i915_scheduler_flush(struct intel_engine_cs *engine, bool is_locked);
int i915_scheduler_flush_stamp(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4977e84b81a3..93512a868cc9 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2770,6 +2770,14 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
WARN_ON(ctx->engine[engine->id].state);
+ /* Don't submit non-scheduler requests while the scheduler is busy */
+ if (i915_scheduler_is_engine_busy(engine)) {
+ mutex_unlock(&dev->struct_mutex);
+ msleep(1);
+ mutex_lock(&dev->struct_mutex);
+ return -EAGAIN;
+ }
+
context_size = round_up(intel_lr_context_size(engine), 4096);
/* One extra page as the sharing data between driver and GuC */