summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Harrison <John.C.Harrison@Intel.com>2015-03-16 11:14:57 +0000
committerJohn Harrison <John.C.Harrison@Intel.com>2016-05-06 14:12:54 +0100
commit3d201990c9927dbe16b5b358eb943a56a6b8217c (patch)
tree64d51934db0ee634336b16513d9f12539f535720
parent5b5b8960b1a6609161a7c69a3524209d54e78a2f (diff)
drm/i915: Keep the reserved space mechanism happy
Ring space is reserved when constructing a request to ensure that the subsequent 'add_request()' call cannot fail due to waiting for space on a busy or broken GPU. However, the scheduler jumps in to the middle of the execbuffer process between request creation and request submission. Thus it needs to cancel the reserved space when the request is simply added to the scheduler's queue and not yet submitted. Similarly, it needs to re-reserve the space when it finally does want to send the batch buffer to the hardware. v3: Updated to use locally cached request pointer. v5: Updated due to changes to earlier patches in series - for runtime PM calls and splitting bypass mode into a separate function. For: VIZ-1587 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c20
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c13
3 files changed, 29 insertions, 8 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1565ee5631d7..1e47fa450d0c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1299,18 +1299,22 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
/* The mutex must be acquired before calling this function */
WARN_ON(!mutex_is_locked(&params->dev->struct_mutex));
+ ret = intel_ring_reserve_space(req);
+ if (ret)
+ goto error;
+
/*
* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
ret = intel_ring_invalidate_all_caches(req);
if (ret)
- return ret;
+ goto error;
/* Switch to the correct context for the batch */
ret = i915_switch_context(req);
if (ret)
- return ret;
+ goto error;
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
"%s didn't clear reload\n", engine->name);
@@ -1319,7 +1323,7 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
params->instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(req, 4);
if (ret)
- return ret;
+ goto error;
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
@@ -1333,7 +1337,7 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
if (params->args_flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(params->dev, req);
if (ret)
- return ret;
+ goto error;
}
exec_len = params->args_batch_len;
@@ -1347,13 +1351,17 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
exec_start, exec_len,
params->dispatch_flags);
if (ret)
- return ret;
+ goto error;
trace_i915_gem_ring_dispatch(req, params->dispatch_flags);
i915_gem_execbuffer_retire_commands(params);
- return 0;
+error:
+ if (ret)
+ intel_ring_reserved_space_cancel(req->ringbuf);
+
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 2dc5597941ea..2fb3f52466dc 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -519,6 +519,8 @@ static int i915_scheduler_queue_execbuffer_bypass(struct i915_scheduler_queue_en
struct i915_scheduler *scheduler = dev_priv->scheduler;
int ret;
+ intel_ring_reserved_space_cancel(qe->params.request->ringbuf);
+
scheduler->flags[qe->params.engine->id] |= I915_SF_SUBMITTING;
ret = dev_priv->gt.execbuf_final(&qe->params);
scheduler->flags[qe->params.engine->id] &= ~I915_SF_SUBMITTING;
@@ -584,6 +586,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
node->stamp = jiffies;
i915_gem_request_reference(node->params.request);
+ intel_ring_reserved_space_cancel(node->params.request->ringbuf);
+
WARN_ON(node->params.request->scheduler_qe);
node->params.request->scheduler_qe = node;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6bf0e3f21342..0a4ef615955a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1010,13 +1010,17 @@ int intel_execlists_submission_final(struct i915_execbuffer_params *params)
/* The mutex must be acquired before calling this function */
WARN_ON(!mutex_is_locked(&params->dev->struct_mutex));
+ ret = intel_logical_ring_reserve_space(req);
+ if (ret)
+ goto err;
+
/*
* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
ret = logical_ring_invalidate_all_caches(req);
if (ret)
- return ret;
+ goto err;
if (engine == &dev_priv->engine[RCS] &&
params->instp_mode != dev_priv->relative_constants_mode) {
@@ -1038,13 +1042,18 @@ int intel_execlists_submission_final(struct i915_execbuffer_params *params)
ret = engine->emit_bb_start(req, exec_start, params->dispatch_flags);
if (ret)
- return ret;
+ goto err;
trace_i915_gem_ring_dispatch(req, params->dispatch_flags);
i915_gem_execbuffer_retire_commands(params);
return 0;
+
+err:
+ intel_ring_reserved_space_cancel(params->request->ringbuf);
+
+ return ret;
}
void intel_execlists_retire_requests(struct intel_engine_cs *engine)