summaryrefslogtreecommitdiff
path: root/drivers/iommu/amd
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2024-08-29 21:06:10 -0300
committerJoerg Roedel <jroedel@suse.de>2024-09-04 11:37:41 +0200
commit8d00b77a52ef4b2091696ca25753d0ab95e4d839 (patch)
tree489666e46dc42ca6f868d410cd8f8fa25c561b30 /drivers/iommu/amd
parent89ffb2c3c2a1d0bff5515fc53f93de86fb6753c0 (diff)
iommu/amd: Move allocation of the top table into v1_alloc_pgtable
All the page table memory should be allocated/free within the io_pgtable struct. The v2 path is already doing this, make it consistent. It is hard to see but the free of the root in protection_domain_free() is a NOP on the success path because v1_free_pgtable() does amd_iommu_domain_clr_pt_root(). The root memory is already freed because free_sub_pt() put it on the freelist. The free path in protection_domain_free() is only used during error unwind of protection_domain_alloc(). Reviewed-by: Vasant Hegde <vasant.hegde@amd.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/1-v2-831cdc4d00f3+1a315-amd_iopgtbl_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd')
-rw-r--r--drivers/iommu/amd/io_pgtable.c8
-rw-r--r--drivers/iommu/amd/iommu.c21
2 files changed, 8 insertions, 21 deletions
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index bfbcec68efb9..03a3b09f0512 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -573,20 +573,24 @@ static void v1_free_pgtable(struct io_pgtable *iop)
pgtable->mode > PAGE_MODE_6_LEVEL);
free_sub_pt(pgtable->root, pgtable->mode, &freelist);
+ iommu_put_pages_list(&freelist);
/* Update data structure */
amd_iommu_domain_clr_pt_root(dom);
/* Make changes visible to IOMMUs */
amd_iommu_domain_update(dom);
-
- iommu_put_pages_list(&freelist);
}
static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
+ pgtable->root = iommu_alloc_page(GFP_KERNEL);
+ if (!pgtable->root)
+ return NULL;
+ pgtable->mode = PAGE_MODE_3_LEVEL;
+
cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES;
cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index adb579030e68..70213f94e24c 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -52,8 +52,6 @@
#define HT_RANGE_START (0xfd00000000ULL)
#define HT_RANGE_END (0xffffffffffULL)
-#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
-
static DEFINE_SPINLOCK(pd_bitmap_lock);
LIST_HEAD(ioapic_map);
@@ -2260,30 +2258,15 @@ void protection_domain_free(struct protection_domain *domain)
if (domain->iop.pgtbl_cfg.tlb)
free_io_pgtable_ops(&domain->iop.iop.ops);
- if (domain->iop.root)
- iommu_free_page(domain->iop.root);
-
if (domain->id)
domain_id_free(domain->id);
kfree(domain);
}
-static int protection_domain_init_v1(struct protection_domain *domain, int mode)
+static int protection_domain_init_v1(struct protection_domain *domain)
{
- u64 *pt_root = NULL;
-
- BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
-
- if (mode != PAGE_MODE_NONE) {
- pt_root = iommu_alloc_page(GFP_KERNEL);
- if (!pt_root)
- return -ENOMEM;
- }
-
domain->pd_mode = PD_MODE_V1;
- amd_iommu_domain_set_pgtable(domain, pt_root, mode);
-
return 0;
}
@@ -2336,7 +2319,7 @@ struct protection_domain *protection_domain_alloc(unsigned int type)
switch (pgtable) {
case AMD_IOMMU_V1:
- ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL);
+ ret = protection_domain_init_v1(domain);
break;
case AMD_IOMMU_V2:
ret = protection_domain_init_v2(domain);