summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_dmabuf.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-08-16 10:15:34 +1000
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-08-17 10:10:06 +0200
commitec6f1bb90c9865d4a5af01b1c643bcf020d88706 (patch)
treeba4ab858b7d96ea08ace354332eb9c79bafb65eb /drivers/gpu/drm/i915/i915_gem_dmabuf.c
parent0826874a664f9e9479d50fda23923b87a50cda0d (diff)
drm/i915: implement dma buf begin_cpu_access (v2)
In order for udl vmap to work properly, we need to push the object into the CPU domain before we start copying the data to the USB device. This along with the udl change avoids userspace explicit mapping to be used. v2: add a flag for userspace to query to know if Intel kernel driver can deal with the vmap flushing properly. In theory udl would need a flag also, but I intend to push the patches very close to each other and other drivers should do the right thing from the start. I've added a test to my intel-gpu-tools prime branch, however testing this is a bit messy since the only way to get udl to vmap is to rendering something. I've tested this with real code as well to make sure it works. Signed-off-by: Dave Airlie <airlied@redhat.com> [danvet: resolved conflict, which required reallocating the PARAM number to 21.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_dmabuf.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index aa308e1337db..ceaad5af01a4 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -151,6 +151,22 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
return -EINVAL;
}
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+ bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, write);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -162,6 +178,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
+ .begin_cpu_access = i915_gem_begin_cpu_access,
};
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,