summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-04-27 09:02:01 +0100
committerJani Nikula <jani.nikula@intel.com>2016-05-23 11:17:13 +0300
commite32da7adaf3c511e7e6ffa0d5b680e07201630b8 (patch)
tree01f4b2e9dce97143311ad00b4189d5781c0f40c9 /drivers/gpu
parent5fbd0418eef283269f2cc1b90b63d88f20c887bf (diff)
drm/i915: Protect gen7 irq_seqno_barrier with uncore lock
Faced with sporadic machine hangs on gen7, that mimic the issue of concurrent writes to the same cacheline and seem to start with commit 9b9ed3093613 (drm/i915: Remove forcewake dance from seqno/irq barrier on legacy gen6+), let us restore the spinlock around the mmio read. Fixes: 9b9ed3093613 (drm/i915: Remove forcewake dance from seqno/irq...) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1461744121-27051-1-git-send-email-chris@chris-wilson.co.uk Tested-by: Mika Kuoppala <mika.kuoppala@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> (cherry picked from commit bcbdb6d01150dcc1769ddc9baaaf7f102f27f919) Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 245386e20c52..fd93987b16f2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1573,6 +1573,8 @@ pc_render_add_request(struct drm_i915_gem_request *req)
static void
gen6_seqno_barrier(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page.
@@ -1584,9 +1586,13 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
* the write time to land, but that would incur a delay after every
* batch i.e. much more frequent than a delay when waiting for the
* interrupt (with the same net latency).
+ *
+ * Also note that to prevent whole machine hangs on gen7, we have to
+ * take the spinlock to guard against concurrent cacheline access.
*/
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ spin_lock_irq(&dev_priv->uncore.lock);
POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
+ spin_unlock_irq(&dev_priv->uncore.lock);
}
static u32