summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c79
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c52
5 files changed, 112 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index b882bf2a2388..2a4ccac66b5a 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -848,6 +848,56 @@ finish:
return (u32*)addr;
}
+/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
+static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
+ struct drm_i915_gem_object *src_obj)
+{
+ int ret = 0;
+ int needs_clflush = 0;
+ u32 *src_addr, *dest_addr = NULL;
+
+ ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+ return ERR_PTR(ret);
+ }
+
+ src_addr = vmap_batch(src_obj);
+ if (!src_addr) {
+ DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
+ ret = -ENOMEM;
+ goto unpin_src;
+ }
+
+ if (needs_clflush)
+ drm_clflush_virt_range((char *)src_addr, src_obj->base.size);
+
+ ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
+ if (ret) {
+ DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n");
+ goto unmap_src;
+ }
+
+ dest_addr = vmap_batch(dest_obj);
+ if (!dest_addr) {
+ DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
+ ret = -ENOMEM;
+ goto unmap_src;
+ }
+
+ memcpy(dest_addr, src_addr, src_obj->base.size);
+ if (dest_obj->base.size > src_obj->base.size)
+ memset((u8 *)dest_addr + src_obj->base.size, 0,
+ dest_obj->base.size - src_obj->base.size);
+
+unmap_src:
+ vunmap(src_addr);
+unpin_src:
+ i915_gem_object_unpin_pages(src_obj);
+
+ return ret ? ERR_PTR(ret) : dest_addr;
+}
+
/**
* i915_needs_cmd_parser() - should a given ring use software command parsing?
* @ring: the ring in question
@@ -964,6 +1014,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ring: the ring on which the batch is to execute
* @batch_obj: the batch buffer in question
+ * @shadow_batch_obj: copy of the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts
* @is_master: is the submitting process the drm master?
*
@@ -975,32 +1026,28 @@ static bool check_cmd(const struct intel_engine_cs *ring,
*/
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
+ struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
bool is_master)
{
int ret = 0;
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
- int needs_clflush = 0;
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
- ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
- if (ret) {
- DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
- return ret;
+ batch_base = copy_batch(shadow_batch_obj, batch_obj);
+ if (IS_ERR(batch_base)) {
+ DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
+ return PTR_ERR(batch_base);
}
- batch_base = vmap_batch(batch_obj);
- if (!batch_base) {
- DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
- i915_gem_object_unpin_pages(batch_obj);
- return -ENOMEM;
- }
-
- if (needs_clflush)
- drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
-
cmd = batch_base + (batch_start_offset / sizeof(*cmd));
+
+ /*
+ * We use the source object's size because the shadow object is as
+ * large or larger and copy_batch() will write MI_NOPs to the extra
+ * space. Parsing should be faster in some cases this way.
+ */
batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
while (cmd < batch_end) {
@@ -1062,8 +1109,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
vunmap(batch_base);
- i915_gem_object_unpin_pages(batch_obj);
-
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 887d88f4c688..52730ed86385 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -928,6 +928,7 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6490fef7c847..25832e507d55 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2963,6 +2963,7 @@ void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
+ struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
bool is_master);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b100cec93a58..a7bf31a041f4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5007,6 +5007,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
register_oom_notifier(&dev_priv->mm.oom_notifier);
+ i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
+
mutex_init(&dev_priv->fb_tracking.lock);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3927d931ad73..cadb04d964e0 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1283,6 +1283,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
+ struct drm_i915_gem_object *shadow_batch_obj = NULL;
+ struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
@@ -1399,28 +1401,66 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = -EINVAL;
goto err;
}
- batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
if (i915_needs_cmd_parser(ring)) {
+ shadow_batch_obj =
+ i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
+ batch_obj->base.size);
+ if (IS_ERR(shadow_batch_obj)) {
+ ret = PTR_ERR(shadow_batch_obj);
+ /* Don't try to clean up the obj in the error path */
+ shadow_batch_obj = NULL;
+ goto err;
+ }
+
+ ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
+ if (ret)
+ goto err;
+
ret = i915_parse_cmds(ring,
batch_obj,
+ shadow_batch_obj,
args->batch_start_offset,
file->is_master);
+ i915_gem_object_ggtt_unpin(shadow_batch_obj);
+
if (ret) {
if (ret != -EACCES)
goto err;
} else {
+ struct i915_vma *vma;
+
+ memset(&shadow_exec_entry, 0,
+ sizeof(shadow_exec_entry));
+
+ vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
+ vma->exec_entry = &shadow_exec_entry;
+ drm_gem_object_reference(&shadow_batch_obj->base);
+ list_add_tail(&vma->exec_list, &eb->vmas);
+
+ shadow_batch_obj->base.pending_read_domains =
+ batch_obj->base.pending_read_domains;
+
+ batch_obj = shadow_batch_obj;
+
/*
- * XXX: Actually do this when enabling batch copy...
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+ * bit from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically
+ * don't want that set when the command parser is
+ * enabled.
*
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
- * from MI_BATCH_BUFFER_START commands issued in the
- * dispatch_execbuffer implementations. We specifically don't
- * want that set when the command parser is enabled.
+ * FIXME: with aliasing ppgtt, buffers that should only
+ * be in ggtt still end up in the aliasing ppgtt. remove
+ * this check when that is fixed.
*/
+ if (USES_FULL_PPGTT(dev))
+ flags |= I915_DISPATCH_SECURE;
}
}
+ batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */