diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2018-07-07 18:29:20 +1000 |
---|---|---|
committer | Jérôme Glisse <jglisse@redhat.com> | 2019-01-19 17:21:43 -0500 |
commit | caec732aff121e883a89b763f447af1a4263f2b0 (patch) | |
tree | a3456eba1306cb54ffcb09030be6f3ca818c5b06 | |
parent | 168639573e3921da69ea24fd8935d707ff259222 (diff) |
gpu/nouveau/mmu: store mapped flag separately from memory pointer
This will be used to support a privileged client providing PTEs directly.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Tested-by: Jérôme Glisse <jglisse@redhat.com>
-rw-r--r-- | drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | 15 |
2 files changed, 11 insertions, 5 deletions
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h index 0a0e064f22e5..752aa4730dab 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h @@ -17,6 +17,7 @@ struct nvkm_vma { bool part:1; /* Region was split from an allocated region by map(). */ bool user:1; /* Region user-allocated. */ bool busy:1; /* Region busy (for temporarily preventing user access). */ + bool mapped:1; /* Region contains valid pages. */ struct nvkm_memory *memory; /* Memory currently mapped into VMA. */ struct nvkm_tags *tags; /* Compression tag reference. */ }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index 5274ab0598af..69b61e799fd4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -763,6 +763,7 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) new->part = vma->part; new->user = vma->user; new->busy = vma->busy; + new->mapped = vma->mapped; list_add(&new->head, &vma->head); return new; } @@ -1112,10 +1113,11 @@ nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); nvkm_memory_unref(&vma->memory); + vma->mapped = false; - if (!vma->part || ((prev = node(vma, prev)), prev->memory)) + if (!vma->part || ((prev = node(vma, prev)), prev->mapped)) prev = NULL; - if (!next->part || next->memory) + if (!next->part || next->mapped) next = NULL; nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size); } @@ -1274,6 +1276,7 @@ nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); nvkm_memory_unref(&vma->memory); vma->memory = nvkm_memory_ref(map->memory); + vma->mapped = true; vma->tags = map->tags; return 0; } @@ -1319,14 +1322,16 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) if (vma->mapref || !vma->sparse) { do { - const bool map = next->memory != NULL; + const bool mem = next->memory != NULL; + const bool map = next->mapped; const u8 refd = next->refd; const u64 addr = next->addr; u64 size = next->size; /* Merge regions that are in the same state. */ while ((next = node(next, next)) && next->part && - (next->memory != NULL) == map && + (next->mapped == map) && + (next->memory != NULL) == mem && (next->refd == refd)) size += next->size; @@ -1351,7 +1356,7 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) */ next = vma; do { - if (next->memory) + if (next->mapped) nvkm_vmm_unmap_region(vmm, next); } while ((next = node(vma, next)) && next->part); |