diff options
author | Robin Murphy <robin.murphy@arm.com> | 2019-09-18 17:17:50 +0100 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2019-10-01 12:17:39 +0100 |
commit | ae2b60f34ab21780bc30d01ae976cc7340446bde (patch) | |
tree | 555c9cc946362eadc70a0b5c749cd95314675f71 /drivers/iommu/arm-smmu.c | |
parent | 3370cb6bf64f6896a30eb7ad97721b9598c8fb10 (diff) |
iommu/arm-smmu: Move .tlb_sync method to implementation
With the .tlb_sync interface no longer exposed directly to io-pgtable,
strip away the remains of that abstraction layer. Retain the callback
in spirit, though, by transforming it into an implementation override
for the low-level sync routine itself, for which we will have at least
one user.
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r-- | drivers/iommu/arm-smmu.c | 33 |
1 files changed, 15 insertions, 18 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 4edbe58a303a..25876eb9266d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -244,6 +244,9 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, unsigned int spin_cnt, delay; u32 reg; + if (smmu->impl && unlikely(smmu->impl->tlb_sync)) + return smmu->impl->tlb_sync(smmu, page, sync, status); + arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL); for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { @@ -268,9 +271,8 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) spin_unlock_irqrestore(&smmu->global_sync_lock, flags); } -static void arm_smmu_tlb_sync_context(void *cookie) +static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain) { - struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; unsigned long flags; @@ -280,13 +282,6 @@ static void arm_smmu_tlb_sync_context(void *cookie) spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); } -static void arm_smmu_tlb_sync_vmid(void *cookie) -{ - struct arm_smmu_domain *smmu_domain = cookie; - - arm_smmu_tlb_sync_global(smmu_domain->smmu); -} - static void arm_smmu_tlb_inv_context_s1(void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; @@ -297,7 +292,7 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie) wmb(); arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx, ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid); - arm_smmu_tlb_sync_context(cookie); + arm_smmu_tlb_sync_context(smmu_domain); } static void arm_smmu_tlb_inv_context_s2(void *cookie) @@ -439,7 +434,6 @@ static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = { .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1, .tlb_add_page = arm_smmu_tlb_add_page_s1, }, - .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { @@ -449,7 +443,6 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2, .tlb_add_page = arm_smmu_tlb_add_page_s2, }, - .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { @@ -459,7 +452,6 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1, .tlb_add_page = arm_smmu_tlb_add_page_s2_v1, }, - .tlb_sync = arm_smmu_tlb_sync_vmid, }; static irqreturn_t arm_smmu_context_fault(int irq, void *dev) @@ -1229,11 +1221,16 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain, struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; - if (smmu_domain->flush_ops) { - arm_smmu_rpm_get(smmu); - smmu_domain->flush_ops->tlb_sync(smmu_domain); - arm_smmu_rpm_put(smmu); - } + if (!smmu) + return; + + arm_smmu_rpm_get(smmu); + if (smmu->version == ARM_SMMU_V2 || + smmu_domain->stage == ARM_SMMU_DOMAIN_S1) + arm_smmu_tlb_sync_context(smmu_domain); + else + arm_smmu_tlb_sync_global(smmu); + arm_smmu_rpm_put(smmu); } static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, |