summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/panthor/panthor_mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/panthor/panthor_mmu.c')
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index bbc12728437f..7db2edb3374c 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -826,6 +826,14 @@ void panthor_vm_idle(struct panthor_vm *vm)
mutex_unlock(&ptdev->mmu->as.slots_lock);
}
+u32 panthor_vm_page_size(struct panthor_vm *vm)
+{
+ const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops);
+ u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1;
+
+ return 1u << pg_shift;
+}
+
static void panthor_vm_stop(struct panthor_vm *vm)
{
drm_sched_stop(&vm->sched, NULL);
@@ -1025,12 +1033,13 @@ int
panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
struct drm_mm_node *va_node)
{
+ ssize_t vm_pgsz = panthor_vm_page_size(vm);
int ret;
- if (!size || (size & ~PAGE_MASK))
+ if (!size || !IS_ALIGNED(size, vm_pgsz))
return -EINVAL;
- if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK))
+ if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz))
return -EINVAL;
mutex_lock(&vm->mm_lock);
@@ -1251,9 +1260,17 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
goto err_cleanup;
}
+ /* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
+ * pre-allocated BO if the <BO,VM> association exists. Given we
+ * only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
+ * be called immediately, and we have to hold the VM resv lock when
+ * calling this function.
+ */
+ dma_resv_lock(panthor_vm_resv(vm), NULL);
mutex_lock(&bo->gpuva_list_lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
mutex_unlock(&bo->gpuva_list_lock);
+ dma_resv_unlock(panthor_vm_resv(vm));
/* If the a vm_bo for this <VM,BO> combination exists, it already
* retains a pin ref, and we can release the one we took earlier.
@@ -1563,7 +1580,9 @@ panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle)
{
struct panthor_vm *vm;
+ xa_lock(&pool->xa);
vm = panthor_vm_get(xa_load(&pool->xa, handle));
+ xa_unlock(&pool->xa);
return vm;
}
@@ -2358,11 +2377,12 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
const struct drm_panthor_vm_bind_op *op,
struct panthor_vm_op_ctx *op_ctx)
{
+ ssize_t vm_pgsz = panthor_vm_page_size(vm);
struct drm_gem_object *gem;
int ret;
/* Aligned on page size. */
- if ((op->va | op->size) & ~PAGE_MASK)
+ if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
return -EINVAL;
switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {