diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2019-07-30 21:58:05 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2019-08-02 16:03:53 +0100 |
commit | cd2a4eaf8c79daa41bdeb7251dbf66413291fd70 (patch) | |
tree | 78099f13675bdd0f80faf84eec3de74129ed0832 | |
parent | e4661f144497f588b6baa6f00d7f3e4d58b29077 (diff) |
drm/i915: Report resv_obj allocation failure
Since commit 64d6c500a384 ("drm/i915: Generalise GPU activity
tracking"), we have been prepared for i915_vma_move_to_active() to fail.
We can take advantage of this to report the failure for allocating the
shared-fence slot in the reservation_object.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190730205805.3733-1-chris@chris-wilson.co.uk
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.c | 31 |
1 files changed, 9 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index eb16a1a93bbc..7734d6218ce7 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -886,23 +886,6 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) list_del(&vma->obj->userfault_link); } -static void export_fence(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) -{ - struct reservation_object *resv = vma->resv; - - /* - * Ignore errors from failing to allocate the new fence, we can't - * handle an error right now. Worst case should be missed - * synchronisation leading to rendering corruption. - */ - if (flags & EXEC_OBJECT_WRITE) - reservation_object_add_excl_fence(resv, &rq->fence); - else if (reservation_object_reserve_shared(resv, 1) == 0) - reservation_object_add_shared_fence(resv, &rq->fence); -} - int i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, unsigned int flags) @@ -926,14 +909,20 @@ int i915_vma_move_to_active(struct i915_vma *vma, if (unlikely(err)) return err; - obj->write_domain = 0; if (flags & EXEC_OBJECT_WRITE) { - obj->write_domain = I915_GEM_DOMAIN_RENDER; - if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) __i915_active_request_set(&obj->frontbuffer_write, rq); + reservation_object_add_excl_fence(vma->resv, &rq->fence); + obj->write_domain = I915_GEM_DOMAIN_RENDER; obj->read_domains = 0; + } else { + err = reservation_object_reserve_shared(vma->resv, 1); + if (unlikely(err)) + return err; + + reservation_object_add_shared_fence(vma->resv, &rq->fence); + obj->write_domain = 0; } obj->read_domains |= I915_GEM_GPU_DOMAINS; obj->mm.dirty = true; @@ -941,8 +930,6 @@ int i915_vma_move_to_active(struct i915_vma *vma, if (flags & EXEC_OBJECT_NEEDS_FENCE) __i915_active_request_set(&vma->last_fence, rq); - export_fence(vma, rq, flags); - GEM_BUG_ON(!i915_vma_is_active(vma)); return 0; } |