summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Harrison <John.C.Harrison@Intel.com>2014-11-12 16:28:16 +0000
committerJohn Harrison <John.C.Harrison@Intel.com>2016-05-06 14:12:59 +0100
commitbf144aff24c79fa2ab4801d07a8371579f3229d1 (patch)
tree20a902a15aaabee8d1b734f9de0df883eb58e368
parent0815f31c418b3302ba5ffdc64bcb1a436c34ba0a (diff)
drm/i915: Support for 'unflushed' ring idle
When the seqno wraps around zero, the entire GPU is forced to be idle for some reason (possibly only to work around issues with hardware semaphores but no-one seems too sure!). This causes a problem if the force idle occurs at an inopportune moment such as in the middle of submitting a batch buffer. Specifically, it would lead to recursive submits - submitting work requires a new seqno, the new seqno requires idling the ring, idling the ring requires submitting work, submitting work requires a new seqno... This change adds a 'flush' parameter to the idle function call which specifies whether the scheduler queues should be flushed out. I.e. is the call intended to just idle the ring as it is right now (no flush) or is it intended to force all outstanding work out of the system (with flush). In the seqno wrap case, pending work is not an issue because the next operation will be to submit it. However, in other cases, the intention is to make sure everything that could be done has been done. v6: Updated to newer nightly (lots of ring -> engine renaming). Added kerneldoc for intel_engine_idle(). Wrapped boolean 'flush' parameter with an _flush() macro. [review feedback from Joonas Lahtinen] For: VIZ-1587 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c31
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h4
4 files changed, 34 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 992a3ea83b73..1e8237fbf2d1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3896,7 +3896,7 @@ int i915_gpu_idle(struct drm_device *dev)
i915_add_request_no_flush(req);
}
- ret = intel_engine_idle(engine);
+ ret = intel_engine_idle_flush(engine);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0a4ef615955a..d67b08bd1d57 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1091,7 +1091,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
if (!intel_engine_initialized(engine))
return;
- ret = intel_engine_idle(engine);
+ ret = intel_engine_idle_flush(engine);
if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6021655bf519..f5bcd24df186 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2359,10 +2359,37 @@ static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
intel_ring_update_space(ringbuf);
}
-int intel_engine_idle(struct intel_engine_cs *engine)
+/**
+ * __intel_engine_idle - Force the engine to be idle.
+ * @engine: Engine to be idled
+ * @flush: Should queued scheduler work also be flushed
+ * Waits for all outstanding requests that have been sent to the given engine
+ * to complete. Can optionally also force all unsent requests that are queued
+ * in the scheduler to be sent first.
+ * Returns zero on success otherwise a negative error code.
+ *
+ * NB: Flushing can lead to recursion if called at the wrong time. E.g. flush
+ * causes the scheduler to submit requests to the hardware, submitting
+ * requests requires allocating a new seqno, when the seqno wraps around it
+ * idles the engine, idling with flush causes the scheduler to submit requests...
+ */
+int __intel_engine_idle(struct intel_engine_cs *engine, bool flush)
{
struct drm_i915_gem_request *req;
uint32_t flags;
+ int ret;
+
+ /*
+ * NB: Must not flush the scheduler if this idle request is from
+ * within an execbuff submission (i.e. due to 'get_seqno' calling
+ * 'wrap_seqno' calling 'idle'). As that would lead to recursive
+ * flushes!
+ */
+ if (flush) {
+ ret = i915_scheduler_flush(engine, true);
+ if (ret)
+ return ret;
+ }
/* Wait upon the last request to be completed */
if (list_empty(&engine->request_list))
@@ -3202,7 +3229,7 @@ intel_stop_engine(struct intel_engine_cs *engine)
if (!intel_engine_initialized(engine))
return;
- ret = intel_engine_idle(engine);
+ ret = intel_engine_idle_flush(engine);
if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c75c5e1eb7ca..2e7daef8aacf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -474,7 +474,9 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_engine_stopped(struct intel_engine_cs *engine);
-int __must_check intel_engine_idle(struct intel_engine_cs *engine);
+#define intel_engine_idle(engine) __intel_engine_idle((engine), false)
+#define intel_engine_idle_flush(engine) __intel_engine_idle((engine), true)
+int __must_check __intel_engine_idle(struct intel_engine_cs *engine, bool flush);
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);