diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2013-05-07 09:48:30 +1000 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2013-06-13 10:38:43 +1000 |
commit | 7acf7b5a5e539a24cf5aea3fc29234b6e4c3aeb7 (patch) | |
tree | 6571c28a1e578eb22ec4d0f52eea9f82aa7dd54e | |
parent | e6248cf52fa437e65d95031e29b1b0da8516ad15 (diff) |
drm: delay busy bo vma removal until fence signals
As opposed to an explicit wait. Allows userspace to not stall waiting
on buffer deletion.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r-- | drm/nouveau_bo.c | 7 | ||||
-rw-r--r-- | drm/nouveau_fence.c | 73 | ||||
-rw-r--r-- | drm/nouveau_fence.h | 2 | ||||
-rw-r--r-- | drm/nouveau_gem.c | 41 |
4 files changed, 108 insertions, 15 deletions
diff --git a/drm/nouveau_bo.c b/drm/nouveau_bo.c index 86eef685..a1cf8255 100644 --- a/drm/nouveau_bo.c +++ b/drm/nouveau_bo.c @@ -1550,13 +1550,8 @@ void nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) { if (vma->node) { - if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) { - spin_lock(&nvbo->bo.bdev->fence_lock); - ttm_bo_wait(&nvbo->bo, false, false, false); - spin_unlock(&nvbo->bo.bdev->fence_lock); + if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) nouveau_vm_unmap(vma); - } - nouveau_vm_put(vma); list_del(&vma->head); } diff --git a/drm/nouveau_fence.c b/drm/nouveau_fence.c index 6c946837..1680d918 100644 --- a/drm/nouveau_fence.c +++ b/drm/nouveau_fence.c @@ -35,15 +35,34 @@ #include <engine/fifo.h> +struct fence_work { + struct work_struct base; + struct list_head head; + void (*func)(void *); + void *data; +}; + +static void +nouveau_fence_signal(struct nouveau_fence *fence) +{ + struct fence_work *work, *temp; + + list_for_each_entry_safe(work, temp, &fence->work, head) { + schedule_work(&work->base); + list_del(&work->head); + } + + fence->channel = NULL; + list_del(&fence->head); +} + void nouveau_fence_context_del(struct nouveau_fence_chan *fctx) { struct nouveau_fence *fence, *fnext; spin_lock(&fctx->lock); list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { - fence->channel = NULL; - list_del(&fence->head); - nouveau_fence_unref(&fence); + nouveau_fence_signal(fence); } spin_unlock(&fctx->lock); } @@ -57,6 +76,50 @@ nouveau_fence_context_new(struct nouveau_fence_chan *fctx) } static void +nouveau_fence_work_handler(struct work_struct *kwork) +{ + struct fence_work *work = container_of(kwork, typeof(*work), base); + work->func(work->data); + kfree(work); +} + +void +nouveau_fence_work(struct nouveau_fence *fence, + void (*func)(void *), void *data) +{ + struct nouveau_channel *chan = fence->channel; + struct nouveau_fence_chan *fctx; + struct fence_work *work = NULL; + + if (nouveau_fence_done(fence)) { + func(data); + return; + } + + fctx = chan->fence; + work = kmalloc(sizeof(*work), GFP_KERNEL); + if (!work) { + WARN_ON(nouveau_fence_wait(fence, false, false)); + func(data); + return; + } + + spin_lock(&fctx->lock); + if (!fence->channel) { + spin_unlock(&fctx->lock); + kfree(work); + func(data); + return; + } + + INIT_WORK(&work->base, nouveau_fence_work_handler); + work->func = func; + work->data = data; + list_add(&work->head, &fence->work); + spin_unlock(&fctx->lock); +} + +static void nouveau_fence_update(struct nouveau_channel *chan) { struct nouveau_fence_chan *fctx = chan->fence; @@ -67,8 +130,7 @@ nouveau_fence_update(struct nouveau_channel *chan) if (fctx->read(chan) < fence->sequence) break; - fence->channel = NULL; - list_del(&fence->head); + nouveau_fence_signal(fence); nouveau_fence_unref(&fence); } spin_unlock(&fctx->lock); @@ -265,6 +327,7 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem, if (!fence) return -ENOMEM; + INIT_LIST_HEAD(&fence->work); fence->sysmem = sysmem; kref_init(&fence->kref); diff --git a/drm/nouveau_fence.h b/drm/nouveau_fence.h index c8994340..c57bb61d 100644 --- a/drm/nouveau_fence.h +++ b/drm/nouveau_fence.h @@ -5,6 +5,7 @@ struct nouveau_drm; struct nouveau_fence { struct list_head head; + struct list_head work; struct kref kref; bool sysmem; @@ -22,6 +23,7 @@ void nouveau_fence_unref(struct nouveau_fence **); int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); bool nouveau_fence_done(struct nouveau_fence *); +void nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *); int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); diff --git a/drm/nouveau_gem.c b/drm/nouveau_gem.c index b4b4d0c1..c0e324b5 100644 --- a/drm/nouveau_gem.c +++ b/drm/nouveau_gem.c @@ -101,6 +101,41 @@ out: return ret; } +static void +nouveau_gem_object_delete(void *data) +{ + struct nouveau_vma *vma = data; + nouveau_vm_unmap(vma); + nouveau_vm_put(vma); + kfree(vma); +} + +static void +nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) +{ + const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; + struct nouveau_fence *fence = NULL; + + list_del(&vma->head); + + if (mapped) { + spin_lock(&nvbo->bo.bdev->fence_lock); + if (nvbo->bo.sync_obj) + fence = nouveau_fence_ref(nvbo->bo.sync_obj); + spin_unlock(&nvbo->bo.bdev->fence_lock); + } + + if (fence) { + nouveau_fence_work(fence, nouveau_gem_object_delete, vma); + } else { + if (mapped) + nouveau_vm_unmap(vma); + nouveau_vm_put(vma); + kfree(vma); + } + nouveau_fence_unref(&fence); +} + void nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) { @@ -118,10 +153,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) vma = nouveau_bo_vma_find(nvbo, cli->base.vm); if (vma) { - if (--vma->refcount == 0) { - nouveau_bo_vma_del(nvbo, vma); - kfree(vma); - } + if (--vma->refcount == 0) + nouveau_gem_object_unmap(nvbo, vma); } ttm_bo_unreserve(&nvbo->bo); } |