summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2024-04-24 21:55:03 -0700
committerMatthew Brost <matthew.brost@intel.com>2024-04-26 12:09:57 -0700
commit0a34c12449de4b09f74808c6f6c39205ee5071f0 (patch)
tree547a6773e89313f9724f039dc343de98fe98ef29
parent75192758d640227b68e4e21de811891219f3d0e2 (diff)
drm/xe: Move migrate to prefetch to op_lock_and_prep function
All non-binding operations in VM bind IOCTL should be in the lock and prepare step rather than the execution step. Move prefetch to conform to this pattern. v2: - Rebase - New function names (Oak) - Update stale comment (Oak) Cc: Oak Zeng <oak.zeng@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Oak Zeng <oak.zeng@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-4-matthew.brost@intel.com
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index d0905d98de8c..e7be99acaff2 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1937,20 +1937,10 @@ static const u32 region_to_mem_type[] = {
static struct dma_fence *
xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_exec_queue *q, u32 region,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool first_op, bool last_op)
+ struct xe_exec_queue *q, struct xe_sync_entry *syncs,
+ u32 num_syncs, bool first_op, bool last_op)
{
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
- int err;
-
- xe_assert(vm->xe, region < ARRAY_SIZE(region_to_mem_type));
-
- if (!xe_vma_has_no_bo(vma)) {
- err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
- if (err)
- return ERR_PTR(err);
- }
if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
@@ -2490,8 +2480,7 @@ static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
op->flags & XE_VMA_OP_LAST);
break;
case DRM_GPUVA_OP_PREFETCH:
- fence = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
- op->syncs, op->num_syncs,
+ fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs,
op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST);
break;
@@ -2722,9 +2711,20 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
false);
break;
case DRM_GPUVA_OP_PREFETCH:
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ u32 region = op->prefetch.region;
+
+ xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
+
err = vma_lock_and_validate(exec,
- gpuva_to_vma(op->base.prefetch.va), true);
+ gpuva_to_vma(op->base.prefetch.va),
+ false);
+ if (!err && !xe_vma_has_no_bo(vma))
+ err = xe_bo_migrate(xe_vma_bo(vma),
+ region_to_mem_type[region]);
break;
+ }
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}