summaryrefslogtreecommitdiff
path: root/drivers/dma-buf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/dma-buf.c9
-rw-r--r--drivers/dma-buf/dma-fence-array.c14
-rw-r--r--drivers/dma-buf/reservation.c66
3 files changed, 58 insertions, 31 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 12b62d0aac27..539450713838 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -351,13 +351,13 @@ static inline int is_dma_buf_file(struct file *file)
*
* 2. Userspace passes this file-descriptors to all drivers it wants this buffer
* to share with: First the filedescriptor is converted to a &dma_buf using
- * dma_buf_get(). The the buffer is attached to the device using
+ * dma_buf_get(). Then the buffer is attached to the device using
* dma_buf_attach().
*
* Up to this stage the exporter is still free to migrate or reallocate the
* backing storage.
*
- * 3. Once the buffer is attached to all devices userspace can inniate DMA
+ * 3. Once the buffer is attached to all devices userspace can initiate DMA
* access to the shared buffer. In the kernel this is done by calling
* dma_buf_map_attachment() and dma_buf_unmap_attachment().
*
@@ -617,7 +617,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
* Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
* on error. May return -EINTR if it is interrupted by a signal.
*
- * A mapping must be unmapped again using dma_buf_map_attachment(). Note that
+ * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
* the underlying backing storage is pinned for as long as a mapping exists,
* therefore users/importers should not hold onto a mapping for undue amounts of
* time.
@@ -1179,8 +1179,7 @@ static int dma_buf_init_debugfs(void)
static void dma_buf_uninit_debugfs(void)
{
- if (dma_buf_debugfs_dir)
- debugfs_remove_recursive(dma_buf_debugfs_dir);
+ debugfs_remove_recursive(dma_buf_debugfs_dir);
}
#else
static inline int dma_buf_init_debugfs(void)
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index 0350829ba62e..dd1edfb27b61 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -31,6 +31,14 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
return "unbound";
}
+static void irq_dma_fence_array_work(struct irq_work *wrk)
+{
+ struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
+
+ dma_fence_signal(&array->base);
+ dma_fence_put(&array->base);
+}
+
static void dma_fence_array_cb_func(struct dma_fence *f,
struct dma_fence_cb *cb)
{
@@ -39,8 +47,9 @@ static void dma_fence_array_cb_func(struct dma_fence *f,
struct dma_fence_array *array = array_cb->array;
if (atomic_dec_and_test(&array->num_pending))
- dma_fence_signal(&array->base);
- dma_fence_put(&array->base);
+ irq_work_queue(&array->work);
+ else
+ dma_fence_put(&array->base);
}
static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
@@ -136,6 +145,7 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
spin_lock_init(&array->lock);
dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
context, seqno);
+ init_irq_work(&array->work, irq_dma_fence_array_work);
array->num_fences = num_fences;
atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index b44d9d7db347..04ebe2204c12 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -104,7 +104,8 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
struct reservation_object_list *fobj,
struct dma_fence *fence)
{
- u32 i;
+ struct dma_fence *signaled = NULL;
+ u32 i, signaled_idx;
dma_fence_get(fence);
@@ -126,17 +127,28 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
dma_fence_put(old_fence);
return;
}
+
+ if (!signaled && dma_fence_is_signaled(old_fence)) {
+ signaled = old_fence;
+ signaled_idx = i;
+ }
}
/*
* memory barrier is added by write_seqcount_begin,
* fobj->shared_count is protected by this lock too
*/
- RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
- fobj->shared_count++;
+ if (signaled) {
+ RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
+ } else {
+ RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
+ fobj->shared_count++;
+ }
write_seqcount_end(&obj->seq);
preempt_enable();
+
+ dma_fence_put(signaled);
}
static void
@@ -145,8 +157,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
struct reservation_object_list *fobj,
struct dma_fence *fence)
{
- unsigned i;
- struct dma_fence *old_fence = NULL;
+ unsigned i, j, k;
dma_fence_get(fence);
@@ -162,24 +173,21 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
* references from the old struct are carried over to
* the new.
*/
- fobj->shared_count = old->shared_count;
-
- for (i = 0; i < old->shared_count; ++i) {
+ for (i = 0, j = 0, k = fobj->shared_max; i < old->shared_count; ++i) {
struct dma_fence *check;
check = rcu_dereference_protected(old->shared[i],
reservation_object_held(obj));
- if (!old_fence && check->context == fence->context) {
- old_fence = check;
- RCU_INIT_POINTER(fobj->shared[i], fence);
- } else
- RCU_INIT_POINTER(fobj->shared[i], check);
- }
- if (!old_fence) {
- RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
- fobj->shared_count++;
+ if (check->context == fence->context ||
+ dma_fence_is_signaled(check))
+ RCU_INIT_POINTER(fobj->shared[--k], check);
+ else
+ RCU_INIT_POINTER(fobj->shared[j++], check);
}
+ fobj->shared_count = j;
+ RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
+ fobj->shared_count++;
done:
preempt_disable();
@@ -192,10 +200,18 @@ done:
write_seqcount_end(&obj->seq);
preempt_enable();
- if (old)
- kfree_rcu(old, rcu);
+ if (!old)
+ return;
- dma_fence_put(old_fence);
+ /* Drop the references to the signaled fences */
+ for (i = k; i < fobj->shared_max; ++i) {
+ struct dma_fence *f;
+
+ f = rcu_dereference_protected(fobj->shared[i],
+ reservation_object_held(obj));
+ dma_fence_put(f);
+ }
+ kfree_rcu(old, rcu);
}
/**
@@ -318,7 +334,7 @@ retry:
continue;
}
- dst_list->shared[dst_list->shared_count++] = fence;
+ rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
}
} else {
dst_list = NULL;
@@ -455,13 +471,15 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
unsigned long timeout)
{
struct dma_fence *fence;
- unsigned seq, shared_count, i = 0;
+ unsigned seq, shared_count;
long ret = timeout ? timeout : 1;
+ int i;
retry:
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
rcu_read_lock();
+ i = -1;
fence = rcu_dereference(obj->fence_excl);
if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
@@ -477,14 +495,14 @@ retry:
fence = NULL;
}
- if (!fence && wait_all) {
+ if (wait_all) {
struct reservation_object_list *fobj =
rcu_dereference(obj->fence);
if (fobj)
shared_count = fobj->shared_count;
- for (i = 0; i < shared_count; ++i) {
+ for (i = 0; !fence && i < shared_count; ++i) {
struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,