summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJohn Harrison <John.C.Harrison@Intel.com>2014-04-09 13:19:05 +0100
committerJohn Harrison <John.C.Harrison@Intel.com>2016-06-28 17:17:08 +0100
commit8f43178b1fc5ff21e4c1b5d7210905723962bb5c (patch)
tree2c99cda0ce3c7731c21f466a04ef82ac855f09d6 /drivers
parenta150fb78a8aa62b595dbc6972160f6d2355a5803 (diff)
drm/i915: Added tracking/locking of batch buffer objects
The scheduler needs to track interdependencies between batch buffers. These are calculated by analysing the object lists of the buffers and looking for commonality. The scheduler also needs to keep those buffers locked long after the initial IOCTL call has returned to user land. v3: Updated to support read-read optimisation. v5: Updated due to changes to earlier patches in series for splitting bypass mode into a separate function and consoliding the clean up code. For: VIZ-1587 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c45
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c15
2 files changed, 58 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7568e5f0c98c..df86c00ddeac 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1480,7 +1480,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_execbuffer_params *params = &qe.params;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 dispatch_flags;
- int ret;
+ int ret, i;
bool need_relocs;
if (!i915_gem_check_execbuffer(args))
@@ -1553,6 +1553,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
+ qe.objs = kzalloc(sizeof(*qe.objs) * args->buffer_count, GFP_KERNEL);
+ if (!qe.objs) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
/* Look up object handles */
ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
@@ -1677,9 +1683,32 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->args_DR1 = args->DR1;
params->args_DR4 = args->DR4;
params->batch_obj = batch_obj;
- params->ctx = ctx;
params->request = req;
+ /*
+ * Save away the list of objects used by this batch buffer for the
+ * purpose of tracking inter-buffer dependencies.
+ */
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ /*
+ * NB: 'drm_gem_object_lookup()' increments the object's
+ * reference count and so must be matched by a
+ * 'drm_gem_object_unreference' call.
+ */
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ qe.objs[i].obj = obj;
+ qe.objs[i].read_only = obj->base.pending_write_domain == 0;
+
+ }
+ qe.num_objs = i;
+
+ /* Lock and save the context object as well. */
+ i915_gem_context_reference(ctx);
+ params->ctx = ctx;
+
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
if (ret)
goto err_client;
@@ -1715,6 +1744,18 @@ err:
i915_gem_context_unreference(ctx);
eb_destroy(eb);
+ /* Need to release the objects: */
+ if (qe.objs) {
+ for (i = 0; i < qe.num_objs; i++)
+ drm_gem_object_unreference(&qe.objs[i].obj->base);
+
+ kfree(qe.objs);
+ }
+
+ /* Context too */
+ if (params->ctx)
+ i915_gem_context_unreference(params->ctx);
+
/*
* If the request was created but not successfully submitted then it
* must be freed again. If it was submitted then it is being tracked
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index ef2ccec8618a..08f945daadc3 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -758,6 +758,8 @@ void i915_scheduler_wakeup(struct drm_device *dev)
*/
void i915_scheduler_clean_node(struct i915_scheduler_queue_entry *node)
{
+ int i;
+
if (!I915_SQS_IS_COMPLETE(node)) {
WARN(!node->params.request->cancelled,
"Cleaning active node: %d!\n", node->status);
@@ -775,6 +777,19 @@ void i915_scheduler_clean_node(struct i915_scheduler_queue_entry *node)
node->params.batch_obj = NULL;
}
+ /* Release the locked buffers: */
+ for (i = 0; i < node->num_objs; i++)
+ drm_gem_object_unreference(&node->objs[i].obj->base);
+ kfree(node->objs);
+ node->objs = NULL;
+ node->num_objs = 0;
+
+ /* Context too: */
+ if (node->params.ctx) {
+ i915_gem_context_unreference(node->params.ctx);
+ node->params.ctx = NULL;
+ }
+
/* And anything else owned by the node: */
if (node->params.cliprects) {
kfree(node->params.cliprects);