summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c36
3 files changed, 17 insertions, 30 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 99605c96a9b2..4a5f326d9461 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -895,12 +895,13 @@ struct radeon_vm {
struct radeon_bo_va *ib_bo_va;
struct mutex mutex;
- /* last fence for cs using this vm */
- struct radeon_fence *fence;
- /* last flush or NULL if we still need to flush */
- struct radeon_fence *last_flush;
+
+ /* last PT update */
+ struct radeon_fence *last_pt_update;
/* last use of vmid */
struct radeon_fence *last_id_use;
+ /* last time we flushed on each ring */
+ struct radeon_fence *last_flushes[RADEON_NUM_RINGS];
};
struct radeon_vm_manager {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index cb12df784d83..aab06acd1c6c 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -538,7 +538,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
- radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
+ radeon_semaphore_sync_to(parser->ib.semaphore, vm->last_pt_update);
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 8d15ce6187da..82479c4da40e 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -187,9 +187,6 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
return NULL;
- /* we definately need to flush */
- radeon_fence_unref(&vm->last_flush);
-
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < rdev->vm_manager.nvm; ++i) {
struct radeon_fence *fence = rdev->vm_manager.active[i];
@@ -236,14 +233,14 @@ void radeon_vm_flush(struct radeon_device *rdev,
int ring)
{
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
+ struct radeon_fence **last_flush = &vm->last_flushes[ring];
- /* if we can't remember our last VM flush then flush now! */
- /* flushing only when needed doesn't work because of a hardware race
- condition between the DMA and the GFX engines */
- if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
+ if (*last_flush != vm->last_pt_update || pd_addr != vm->pd_gpu_addr) {
trace_radeon_vm_flush(pd_addr, ring, vm->id);
vm->pd_gpu_addr = pd_addr;
radeon_ring_vm_flush(rdev, ring, vm);
+ radeon_fence_unref(last_flush);
+ *last_flush = radeon_fence_ref(vm->last_pt_update);
}
}
@@ -263,18 +260,11 @@ void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence)
{
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(fence);
-
radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
radeon_fence_unref(&vm->last_id_use);
vm->last_id_use = radeon_fence_ref(fence);
-
- /* we just flushed the VM, remember that */
- if (!vm->last_flush)
- vm->last_flush = radeon_fence_ref(fence);
}
/**
@@ -704,9 +694,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
radeon_ib_free(rdev, &ib);
return r;
}
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(ib.fence);
- radeon_fence_unref(&vm->last_flush);
+ radeon_fence_unref(&vm->last_pt_update);
+ vm->last_pt_update = radeon_fence_ref(ib.fence);
}
radeon_ib_free(rdev, &ib);
@@ -962,16 +951,15 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
radeon_asic_vm_pad_ib(rdev, &ib);
WARN_ON(ib.length_dw > ndw);
- radeon_semaphore_sync_to(ib.semaphore, vm->fence);
+ radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
return r;
}
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(ib.fence);
+ radeon_fence_unref(&vm->last_pt_update);
+ vm->last_pt_update = radeon_fence_ref(ib.fence);
radeon_ib_free(rdev, &ib);
- radeon_fence_unref(&vm->last_flush);
return 0;
}
@@ -1101,8 +1089,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
vm->id = 0;
vm->ib_bo_va = NULL;
- vm->fence = NULL;
- vm->last_flush = NULL;
+ vm->last_pt_update = NULL;
vm->last_id_use = NULL;
mutex_init(&vm->mutex);
vm->va = RB_ROOT;
@@ -1173,8 +1160,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_bo_unref(&vm->page_directory);
- radeon_fence_unref(&vm->fence);
- radeon_fence_unref(&vm->last_flush);
+ radeon_fence_unref(&vm->last_pt_update);
radeon_fence_unref(&vm->last_id_use);
mutex_destroy(&vm->mutex);