summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c5
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c7
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c15
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c16
27 files changed, 139 insertions, 73 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 900ed2a7483b..98b1736bb221 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1233,7 +1233,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
- ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
+ ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
amdgpu_bo_fence(vm->root.bo,
@@ -2571,7 +2571,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
* Add process eviction fence to bo so they can
* evict each other.
*/
- ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
+ ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 25731719c627..6f57a2fd5fe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1388,6 +1388,14 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
struct dma_resv *resv = bo->tbo.base.resv;
+ int r;
+
+ r = dma_resv_reserve_fences(resv, 1);
+ if (r) {
+ /* As last resort on OOM we block for the fence */
+ dma_fence_wait(fence, false);
+ return;
+ }
if (shared)
dma_resv_add_shared_fence(resv, fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5d11978c162e..b13451255e8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2926,7 +2926,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r)
goto error_free_root;
- r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
+ r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
if (r)
goto error_unreserve;
@@ -3369,7 +3369,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
value = 0;
}
- r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+ r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
if (r) {
pr_debug("failed %d to reserve fence slot\n", r);
goto error_unlock;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 3b8856b4cece..b3fc3e958227 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -548,7 +548,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
goto reserve_bo_failed;
}
- r = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
+ r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
if (r) {
pr_debug("failed %d to reserve bo\n", r);
amdgpu_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 5f502c49aec2..53f7c78628a4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -179,11 +179,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
struct dma_resv *robj = bo->obj->base.resv;
- if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
- ret = dma_resv_reserve_shared(robj, 1);
- if (ret)
- return ret;
- }
+ ret = dma_resv_reserve_fences(robj, 1);
+ if (ret)
+ return ret;
if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
continue;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index ce91b23385cf..1fd0cc9ca213 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -108,7 +108,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
trace_i915_gem_object_clflush(obj);
clflush = NULL;
- if (!(flags & I915_CLFLUSH_SYNC))
+ if (!(flags & I915_CLFLUSH_SYNC) &&
+ dma_resv_reserve_fences(obj->base.resv, 1) == 0)
clflush = clflush_work_create(obj);
if (clflush) {
i915_sw_fence_await_reservation(&clflush->base.chain,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d42f437149c9..78f8797853ce 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -998,11 +998,9 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
}
}
- if (!(ev->flags & EXEC_OBJECT_WRITE)) {
- err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
- if (err)
- return err;
- }
+ err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+ if (err)
+ return err;
GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
@@ -2303,7 +2301,7 @@ static int eb_parse(struct i915_execbuffer *eb)
if (IS_ERR(batch))
return PTR_ERR(batch);
- err = dma_resv_reserve_shared(shadow->obj->base.resv, 1);
+ err = dma_resv_reserve_fences(shadow->obj->base.resv, 1);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 1ebe6e4086a1..432ac74ff225 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -611,7 +611,11 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
assert_object_held(src);
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
- ret = dma_resv_reserve_shared(src_bo->base.resv, 1);
+ ret = dma_resv_reserve_fences(src_bo->base.resv, 1);
+ if (ret)
+ return ret;
+
+ ret = dma_resv_reserve_fences(dst_bo->base.resv, 1);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index d534141b2cf7..0e52eb87cd55 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -216,7 +216,10 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
i915_gem_object_is_lmem(obj),
0xdeadbeaf, &rq);
if (rq) {
- dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+ err = dma_resv_reserve_fences(obj->base.resv, 1);
+ if (!err)
+ dma_resv_add_excl_fence(obj->base.resv,
+ &rq->fence);
i915_gem_object_set_moving_fence(obj, &rq->fence);
i915_request_put(rq);
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 94fcdb7bd21d..bae3423f58e8 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1819,6 +1819,12 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
intel_frontbuffer_put(front);
}
+ if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
+ err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+ if (unlikely(err))
+ return err;
+ }
+
if (fence) {
dma_resv_add_excl_fence(vma->obj->base.resv, fence);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
@@ -1826,7 +1832,7 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
}
} else {
if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
- err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
+ err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
if (unlikely(err))
return err;
}
@@ -2044,7 +2050,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
if (!obj->mm.rsgt)
return -EBUSY;
- err = dma_resv_reserve_shared(obj->base.resv, 1);
+ err = dma_resv_reserve_fences(obj->base.resv, 1);
if (err)
return -EBUSY;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index ba32893e0873..6114e013092b 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -1043,6 +1043,13 @@ static int igt_lmem_write_cpu(void *arg)
}
i915_gem_object_lock(obj, NULL);
+
+ err = dma_resv_reserve_fences(obj->base.resv, 1);
+ if (err) {
+ i915_gem_object_unlock(obj);
+ goto out_put;
+ }
+
/* Put the pages into a known state -- from the gpu for added fun */
intel_engine_pm_get(engine);
err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 55bb1ec3c4f7..e0a11ee0e86d 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -257,13 +257,11 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
bool write, bool explicit)
{
- int err = 0;
+ int err;
- if (!write) {
- err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
- if (err)
- return err;
- }
+ err = dma_resv_reserve_fences(lima_bo_resv(bo), 1);
+ if (err)
+ return err;
/* explicit sync use user passed dep fence */
if (explicit)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index c6d60c8d286d..3164db8be893 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -320,16 +320,14 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
struct drm_gem_object *obj = &submit->bos[i].obj->base;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
- if (!write) {
- /* NOTE: _reserve_shared() must happen before
- * _add_shared_fence(), which makes this a slightly
- * strange place to call it. OTOH this is a
- * convenient can-fail point to hook it in.
- */
- ret = dma_resv_reserve_shared(obj->resv, 1);
- if (ret)
- return ret;
- }
+ /* NOTE: _reserve_shared() must happen before
+ * _add_shared_fence(), which makes this a slightly
+ * strange place to call it. OTOH this is a
+ * convenient can-fail point to hook it in.
+ */
+ ret = dma_resv_reserve_fences(obj->resv, 1);
+ if (ret)
+ return ret;
/* exclusive fences must be ordered */
if (no_implicit && !write)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index a3a04e0d76ec..0268259e97eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -346,11 +346,9 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
struct dma_resv *resv = nvbo->bo.base.resv;
int i, ret;
- if (!exclusive) {
- ret = dma_resv_reserve_shared(resv, 1);
- if (ret)
- return ret;
- }
+ ret = dma_resv_reserve_fences(resv, 1);
+ if (ret)
+ return ret;
/* Waiting for the exclusive fence first causes performance regressions
* under some circumstances. So manually wait for the shared ones first.
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index a6925dbb6224..c34114560e49 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -247,6 +247,10 @@ static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
int i, ret;
for (i = 0; i < bo_count; i++) {
+ ret = dma_resv_reserve_fences(bos[i]->resv, 1);
+ if (ret)
+ return ret;
+
/* panfrost always uses write mode in its current uapi */
ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
true);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 469979cd0341..cde1e8ddaeaa 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -200,7 +200,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
return ret;
}
- ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
+ ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 9ed2b2700e0a..446f7bae54c4 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -535,6 +535,10 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
return r;
radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
+
+ r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
+ if (r)
+ return r;
}
return radeon_vm_clear_invalids(rdev, vm);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 91a72cd14304..7ffd2e90f325 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -782,6 +782,14 @@ void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
bool shared)
{
struct dma_resv *resv = bo->tbo.base.resv;
+ int r;
+
+ r = dma_resv_reserve_fences(resv, 1);
+ if (r) {
+ /* As last resort on OOM we block for the fence */
+ dma_fence_wait(&fence->base, false);
+ return;
+ }
if (shared)
dma_resv_add_shared_fence(resv, &fence->base);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index bb53016f3138..987cabbf1318 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -831,7 +831,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
int r;
radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
- r = dma_resv_reserve_shared(pt->tbo.base.resv, 1);
+ r = dma_resv_reserve_fences(pt->tbo.base.resv, 1);
if (r)
return r;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e5fd0f2c0299..c49996cf25d0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -151,6 +151,10 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
}
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
+ if (ret)
+ goto out_err;
+
ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
if (ret) {
if (ret == -EMULTIHOP)
@@ -735,7 +739,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
dma_resv_add_shared_fence(bo->base.resv, fence);
- ret = dma_resv_reserve_shared(bo->base.resv, 1);
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (unlikely(ret)) {
dma_fence_put(fence);
return ret;
@@ -794,7 +798,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool type_found = false;
int i, ret;
- ret = dma_resv_reserve_shared(bo->base.resv, 1);
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (unlikely(ret))
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 219dd81bbeab..1b96b91bf81b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -221,9 +221,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base = *bo;
- ttm_bo_get(bo);
- fbo->bo = bo;
-
/**
* Fix up members that we shouldn't copy directly:
* TODO: Explicit member copy would probably be better here.
@@ -250,6 +247,15 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
+ if (ret) {
+ kfree(fbo);
+ return ret;
+ }
+
+ ttm_bo_get(bo);
+ fbo->bo = bo;
+
ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
*new_obj = &fbo->base;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 071c48d672c6..789c645f004e 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -90,6 +90,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
+ unsigned int num_fences;
ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
if (ret == -EALREADY && dups) {
@@ -100,12 +101,10 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
continue;
}
+ num_fences = min(entry->num_shared, 1u);
if (!ret) {
- if (!entry->num_shared)
- continue;
-
- ret = dma_resv_reserve_shared(bo->base.resv,
- entry->num_shared);
+ ret = dma_resv_reserve_fences(bo->base.resv,
+ num_fences);
if (!ret)
continue;
}
@@ -120,9 +119,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
}
- if (!ret && entry->num_shared)
- ret = dma_resv_reserve_shared(bo->base.resv,
- entry->num_shared);
+ if (!ret)
+ ret = dma_resv_reserve_fences(bo->base.resv,
+ num_fences);
if (unlikely(ret != 0)) {
if (ticket) {
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 92bc0faee84f..961812d33827 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -259,16 +259,21 @@ v3d_lock_bo_reservations(struct v3d_job *job,
return ret;
for (i = 0; i < job->bo_count; i++) {
+ ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
+ if (ret)
+ goto fail;
+
ret = drm_sched_job_add_implicit_dependencies(&job->base,
job->bo[i], true);
- if (ret) {
- drm_gem_unlock_reservations(job->bo, job->bo_count,
- acquire_ctx);
- return ret;
- }
+ if (ret)
+ goto fail;
}
return 0;
+
+fail:
+ drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 4abf10b66fe8..594bd6bb00d2 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -644,7 +644,7 @@ retry:
for (i = 0; i < exec->bo_count; i++) {
bo = &exec->bo[i]->base;
- ret = dma_resv_reserve_shared(bo->resv, 1);
+ ret = dma_resv_reserve_fences(bo->resv, 1);
if (ret) {
vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
return ret;
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index bd6f75285fd9..2ddbebca87d9 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -157,12 +157,14 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
}
/* Expose the fence via the dma-buf */
- ret = 0;
dma_resv_lock(resv, NULL);
- if (arg->flags & VGEM_FENCE_WRITE)
- dma_resv_add_excl_fence(resv, fence);
- else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0)
- dma_resv_add_shared_fence(resv, fence);
+ ret = dma_resv_reserve_fences(resv, 1);
+ if (!ret) {
+ if (arg->flags & VGEM_FENCE_WRITE)
+ dma_resv_add_excl_fence(resv, fence);
+ else
+ dma_resv_add_shared_fence(resv, fence);
+ }
dma_resv_unlock(resv);
/* Record the fence in our idr for later signaling */
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 48d3c9955f0d..1820ca6cf673 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -214,6 +214,7 @@ void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
{
+ unsigned int i;
int ret;
if (objs->nents == 1) {
@@ -222,6 +223,14 @@ int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
ret = drm_gem_lock_reservations(objs->objs, objs->nents,
&objs->ticket);
}
+ if (ret)
+ return ret;
+
+ for (i = 0; i < objs->nents; ++i) {
+ ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1);
+ if (ret)
+ return ret;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 31aecc46624b..fe13aa8b4a64 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -747,16 +747,22 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence)
{
struct ttm_device *bdev = bo->bdev;
-
struct vmw_private *dev_priv =
container_of(bdev, struct vmw_private, bdev);
+ int ret;
- if (fence == NULL) {
+ if (fence == NULL)
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ else
+ dma_fence_get(&fence->base);
+
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
+ if (!ret)
dma_resv_add_excl_fence(bo->base.resv, &fence->base);
- dma_fence_put(&fence->base);
- } else
- dma_resv_add_excl_fence(bo->base.resv, &fence->base);
+ else
+ /* Last resort fallback when we are OOM */
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
}