From 7cd75787820c3d3bf1ced20d3f2542a018b160c7 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Fri, 6 Nov 2015 17:26:24 +0100 Subject: iommu/s390: Fix sparse warnings Fix these warnings: CHECK drivers/iommu/s390-iommu.c drivers/iommu/s390-iommu.c:52:21: warning: symbol 's390_domain_alloc' was not declared. Should it be static? drivers/iommu/s390-iommu.c:76:6: warning: symbol 's390_domain_free' was not declared. Should it be static? Signed-off-by: Sebastian Ott Signed-off-by: Joerg Roedel --- drivers/iommu/s390-iommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 471ee36b9c6e..a04d491cf431 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -49,7 +49,7 @@ static bool s390_iommu_capable(enum iommu_cap cap) } } -struct iommu_domain *s390_domain_alloc(unsigned domain_type) +static struct iommu_domain *s390_domain_alloc(unsigned domain_type) { struct s390_domain *s390_domain; @@ -73,7 +73,7 @@ struct iommu_domain *s390_domain_alloc(unsigned domain_type) return &s390_domain->domain; } -void s390_domain_free(struct iommu_domain *domain) +static void s390_domain_free(struct iommu_domain *domain) { struct s390_domain *s390_domain = to_s390_domain(domain); -- cgit v1.2.3 From 43c0ea20de4de8e2d753dff0e76129c4febafffa Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 17 Nov 2015 16:11:37 +0100 Subject: iommu/amd: Correctly set flags for handle_mm_fault call Instead of just checking for a write access, calculate the flags that are passed to handle_mm_fault() more precisly and use the pre-defined macros. Reviewed-by: Jesse Barnes Acked-By: David Woodhouse Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_v2.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 7caf2fa237f2..a7edbd6851c4 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -513,16 +513,20 @@ static bool access_error(struct vm_area_struct *vma, struct fault *fault) static void do_fault(struct work_struct *work) { struct fault *fault = container_of(work, struct fault, work); - struct mm_struct *mm; struct vm_area_struct *vma; + unsigned int flags = 0; + struct mm_struct *mm; u64 address; - int ret, write; - - write = !!(fault->flags & PPR_FAULT_WRITE); + int ret; mm = fault->state->mm; address = fault->address; + if (fault->flags & PPR_FAULT_USER) + flags |= FAULT_FLAG_USER; + if (fault->flags & PPR_FAULT_WRITE) + flags |= FAULT_FLAG_WRITE; + down_read(&mm->mmap_sem); vma = find_extend_vma(mm, address); if (!vma || address < vma->vm_start) { @@ -539,7 +543,7 @@ static void do_fault(struct work_struct *work) goto out; } - ret = handle_mm_fault(mm, vma, address, write); + ret = handle_mm_fault(mm, vma, address, flags); if (ret & VM_FAULT_ERROR) { /* failed to service fault */ up_read(&mm->mmap_sem); -- cgit v1.2.3 From 492e74594ec6285207f9db02d41c41cad5dbc6ab Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 17 Nov 2015 16:11:38 +0100 Subject: iommu/amd: Cleanup error handling in do_fault() Get rid of the three error paths that look the same and move error handling to a single place. Reviewed-by: Jesse Barnes Acked-By: David Woodhouse Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_v2.c | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index a7edbd6851c4..6a28b745521d 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -514,10 +514,10 @@ static void do_fault(struct work_struct *work) { struct fault *fault = container_of(work, struct fault, work); struct vm_area_struct *vma; + int ret = VM_FAULT_ERROR; unsigned int flags = 0; struct mm_struct *mm; u64 address; - int ret; mm = fault->state->mm; address = fault->address; @@ -529,31 +529,23 @@ static void do_fault(struct work_struct *work) down_read(&mm->mmap_sem); vma = find_extend_vma(mm, address); - if (!vma || address < vma->vm_start) { + if (!vma || address < vma->vm_start) /* failed to get a vma in the right range */ - up_read(&mm->mmap_sem); - handle_fault_error(fault); goto out; - } /* Check if we have the right permissions on the vma */ - if (access_error(vma, fault)) { - up_read(&mm->mmap_sem); - handle_fault_error(fault); + if (access_error(vma, fault)) goto out; - } ret = handle_mm_fault(mm, vma, address, flags); - if (ret & VM_FAULT_ERROR) { - /* failed to service fault */ - up_read(&mm->mmap_sem); - handle_fault_error(fault); - goto out; - } +out: up_read(&mm->mmap_sem); -out: + if (ret & VM_FAULT_ERROR) + /* failed to service fault */ + handle_fault_error(fault); + finish_pri_tag(fault->dev_state, fault->state, fault->tag); put_pasid_state(fault->state); -- cgit v1.2.3 From 759ce23b62c4ba1aa467fe5eb12bc9f95af606fe Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 29 Nov 2015 23:02:50 +0100 Subject: iommu/amd: Constify mmu_notifier_ops structures This mmu_notifier_ops structure is never modified, so declare it as const, like the other mmu_notifier_ops structures. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_v2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 6a28b745521d..c865737326e1 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -432,7 +432,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) unbind_pasid(pasid_state); } -static struct mmu_notifier_ops iommu_mn = { +static const struct mmu_notifier_ops iommu_mn = { .release = mn_release, .clear_flush_young = mn_clear_flush_young, .invalidate_page = mn_invalidate_page, -- cgit v1.2.3 From e7479a1907dbb9761cef2ad0c94c6d3ba03d3013 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 2 Dec 2015 17:26:01 +0100 Subject: iommu/msm: Use platform_register/unregister_drivers() These new helpers simplify implementing multi-driver modules and properly handle failure to register one driver by unregistering all previously registered drivers. Signed-off-by: Thierry Reding Signed-off-by: Joerg Roedel --- drivers/iommu/msm_iommu_dev.c | 25 +++++++------------------ 1 file changed, 7 insertions(+), 18 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c index b6d01f97e537..4b09e815accf 100644 --- a/drivers/iommu/msm_iommu_dev.c +++ b/drivers/iommu/msm_iommu_dev.c @@ -359,30 +359,19 @@ static struct platform_driver msm_iommu_ctx_driver = { .remove = msm_iommu_ctx_remove, }; +static struct platform_driver * const drivers[] = { + &msm_iommu_driver, + &msm_iommu_ctx_driver, +}; + static int __init msm_iommu_driver_init(void) { - int ret; - ret = platform_driver_register(&msm_iommu_driver); - if (ret != 0) { - pr_err("Failed to register IOMMU driver\n"); - goto error; - } - - ret = platform_driver_register(&msm_iommu_ctx_driver); - if (ret != 0) { - platform_driver_unregister(&msm_iommu_driver); - pr_err("Failed to register IOMMU context driver\n"); - goto error; - } - -error: - return ret; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } static void __exit msm_iommu_driver_exit(void) { - platform_driver_unregister(&msm_iommu_ctx_driver); - platform_driver_unregister(&msm_iommu_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } subsys_initcall(msm_iommu_driver_init); -- cgit v1.2.3 From ae50dc4874c5b73dfefeb1d5a9c1adf2c41576fd Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 4 Dec 2015 16:58:56 +0100 Subject: iommu/shmobile: Remove unused Renesas IPMMU/IPMMUI driver As of commit 44d88c754e57a6d9 ("ARM: shmobile: Remove legacy SoC code for R-Mobile A1"), the Renesas IPMMU/IPMMUI driver is no longer used. In theory it could still be used on SH-Mobile AG5 and R-Mobile A1 SoCs, but that requires adding DT support to the driver, which is not planned. Remove the driver, it can be resurrected from git history when needed. Signed-off-by: Geert Uytterhoeven Acked-by: Simon Horman Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 75 -------- drivers/iommu/Makefile | 2 - drivers/iommu/shmobile-iommu.c | 402 ----------------------------------------- drivers/iommu/shmobile-ipmmu.c | 129 ------------- drivers/iommu/shmobile-ipmmu.h | 34 ---- 5 files changed, 642 deletions(-) delete mode 100644 drivers/iommu/shmobile-iommu.c delete mode 100644 drivers/iommu/shmobile-ipmmu.c delete mode 100644 drivers/iommu/shmobile-ipmmu.h (limited to 'drivers/iommu') diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index b9094e9da537..a1e75cba18e0 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -263,81 +263,6 @@ config EXYNOS_IOMMU_DEBUG Say N unless you need kernel log message for IOMMU debugging. -config SHMOBILE_IPMMU - bool - -config SHMOBILE_IPMMU_TLB - bool - -config SHMOBILE_IOMMU - bool "IOMMU for Renesas IPMMU/IPMMUI" - default n - depends on ARM && MMU - depends on ARCH_SHMOBILE || COMPILE_TEST - select IOMMU_API - select ARM_DMA_USE_IOMMU - select SHMOBILE_IPMMU - select SHMOBILE_IPMMU_TLB - help - Support for Renesas IPMMU/IPMMUI. This option enables - remapping of DMA memory accesses from all of the IP blocks - on the ICB. - - Warning: Drivers (including userspace drivers of UIO - devices) of the IP blocks on the ICB *must* use addresses - allocated from the IPMMU (iova) for DMA with this option - enabled. - - If unsure, say N. - -choice - prompt "IPMMU/IPMMUI address space size" - default SHMOBILE_IOMMU_ADDRSIZE_2048MB - depends on SHMOBILE_IOMMU - help - This option sets IPMMU/IPMMUI address space size by - adjusting the 1st level page table size. The page table size - is calculated as follows: - - page table size = number of page table entries * 4 bytes - number of page table entries = address space size / 1 MiB - - For example, when the address space size is 2048 MiB, the - 1st level page table size is 8192 bytes. - - config SHMOBILE_IOMMU_ADDRSIZE_2048MB - bool "2 GiB" - - config SHMOBILE_IOMMU_ADDRSIZE_1024MB - bool "1 GiB" - - config SHMOBILE_IOMMU_ADDRSIZE_512MB - bool "512 MiB" - - config SHMOBILE_IOMMU_ADDRSIZE_256MB - bool "256 MiB" - - config SHMOBILE_IOMMU_ADDRSIZE_128MB - bool "128 MiB" - - config SHMOBILE_IOMMU_ADDRSIZE_64MB - bool "64 MiB" - - config SHMOBILE_IOMMU_ADDRSIZE_32MB - bool "32 MiB" - -endchoice - -config SHMOBILE_IOMMU_L1SIZE - int - default 8192 if SHMOBILE_IOMMU_ADDRSIZE_2048MB - default 4096 if SHMOBILE_IOMMU_ADDRSIZE_1024MB - default 2048 if SHMOBILE_IOMMU_ADDRSIZE_512MB - default 1024 if SHMOBILE_IOMMU_ADDRSIZE_256MB - default 512 if SHMOBILE_IOMMU_ADDRSIZE_128MB - default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB - default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB - config IPMMU_VMSA bool "Renesas VMSA-compatible IPMMU" depends on ARM_LPAE diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 68faca02225d..42fc0c25cf1a 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -22,7 +22,5 @@ obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o -obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o -obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o obj-$(CONFIG_S390_IOMMU) += s390-iommu.o diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c deleted file mode 100644 index a0287519a1d4..000000000000 --- a/drivers/iommu/shmobile-iommu.c +++ /dev/null @@ -1,402 +0,0 @@ -/* - * IOMMU for IPMMU/IPMMUI - * Copyright (C) 2012 Hideki EIRAKU - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - */ - -#include -#include -#include -#include -#include -#include -#include -#include "shmobile-ipmmu.h" - -#define L1_SIZE CONFIG_SHMOBILE_IOMMU_L1SIZE -#define L1_LEN (L1_SIZE / 4) -#define L1_ALIGN L1_SIZE -#define L2_SIZE SZ_1K -#define L2_LEN (L2_SIZE / 4) -#define L2_ALIGN L2_SIZE - -struct shmobile_iommu_domain_pgtable { - uint32_t *pgtable; - dma_addr_t handle; -}; - -struct shmobile_iommu_archdata { - struct list_head attached_list; - struct dma_iommu_mapping *iommu_mapping; - spinlock_t attach_lock; - struct shmobile_iommu_domain *attached; - int num_attached_devices; - struct shmobile_ipmmu *ipmmu; -}; - -struct shmobile_iommu_domain { - struct shmobile_iommu_domain_pgtable l1, l2[L1_LEN]; - spinlock_t map_lock; - spinlock_t attached_list_lock; - struct list_head attached_list; - struct iommu_domain domain; -}; - -static struct shmobile_iommu_archdata *ipmmu_archdata; -static struct kmem_cache *l1cache, *l2cache; - -static struct shmobile_iommu_domain *to_sh_domain(struct iommu_domain *dom) -{ - return container_of(dom, struct shmobile_iommu_domain, domain); -} - -static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable, - struct kmem_cache *cache, size_t size) -{ - pgtable->pgtable = kmem_cache_zalloc(cache, GFP_ATOMIC); - if (!pgtable->pgtable) - return -ENOMEM; - pgtable->handle = dma_map_single(NULL, pgtable->pgtable, size, - DMA_TO_DEVICE); - return 0; -} - -static void pgtable_free(struct shmobile_iommu_domain_pgtable *pgtable, - struct kmem_cache *cache, size_t size) -{ - dma_unmap_single(NULL, pgtable->handle, size, DMA_TO_DEVICE); - kmem_cache_free(cache, pgtable->pgtable); -} - -static uint32_t pgtable_read(struct shmobile_iommu_domain_pgtable *pgtable, - unsigned int index) -{ - return pgtable->pgtable[index]; -} - -static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable, - unsigned int index, unsigned int count, uint32_t val) -{ - unsigned int i; - - for (i = 0; i < count; i++) - pgtable->pgtable[index + i] = val; - dma_sync_single_for_device(NULL, pgtable->handle + index * sizeof(val), - sizeof(val) * count, DMA_TO_DEVICE); -} - -static struct iommu_domain *shmobile_iommu_domain_alloc(unsigned type) -{ - struct shmobile_iommu_domain *sh_domain; - int i, ret; - - if (type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - - sh_domain = kzalloc(sizeof(*sh_domain), GFP_KERNEL); - if (!sh_domain) - return NULL; - ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE); - if (ret < 0) { - kfree(sh_domain); - return NULL; - } - for (i = 0; i < L1_LEN; i++) - sh_domain->l2[i].pgtable = NULL; - spin_lock_init(&sh_domain->map_lock); - spin_lock_init(&sh_domain->attached_list_lock); - INIT_LIST_HEAD(&sh_domain->attached_list); - return &sh_domain->domain; -} - -static void shmobile_iommu_domain_free(struct iommu_domain *domain) -{ - struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); - int i; - - for (i = 0; i < L1_LEN; i++) { - if (sh_domain->l2[i].pgtable) - pgtable_free(&sh_domain->l2[i], l2cache, L2_SIZE); - } - pgtable_free(&sh_domain->l1, l1cache, L1_SIZE); - kfree(sh_domain); -} - -static int shmobile_iommu_attach_device(struct iommu_domain *domain, - struct device *dev) -{ - struct shmobile_iommu_archdata *archdata = dev->archdata.iommu; - struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); - int ret = -EBUSY; - - if (!archdata) - return -ENODEV; - spin_lock(&sh_domain->attached_list_lock); - spin_lock(&archdata->attach_lock); - if (archdata->attached != sh_domain) { - if (archdata->attached) - goto err; - ipmmu_tlb_set(archdata->ipmmu, sh_domain->l1.handle, L1_SIZE, - 0); - ipmmu_tlb_flush(archdata->ipmmu); - archdata->attached = sh_domain; - archdata->num_attached_devices = 0; - list_add(&archdata->attached_list, &sh_domain->attached_list); - } - archdata->num_attached_devices++; - ret = 0; -err: - spin_unlock(&archdata->attach_lock); - spin_unlock(&sh_domain->attached_list_lock); - return ret; -} - -static void shmobile_iommu_detach_device(struct iommu_domain *domain, - struct device *dev) -{ - struct shmobile_iommu_archdata *archdata = dev->archdata.iommu; - struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); - - if (!archdata) - return; - spin_lock(&sh_domain->attached_list_lock); - spin_lock(&archdata->attach_lock); - archdata->num_attached_devices--; - if (!archdata->num_attached_devices) { - ipmmu_tlb_set(archdata->ipmmu, 0, 0, 0); - ipmmu_tlb_flush(archdata->ipmmu); - archdata->attached = NULL; - list_del(&archdata->attached_list); - } - spin_unlock(&archdata->attach_lock); - spin_unlock(&sh_domain->attached_list_lock); -} - -static void domain_tlb_flush(struct shmobile_iommu_domain *sh_domain) -{ - struct shmobile_iommu_archdata *archdata; - - spin_lock(&sh_domain->attached_list_lock); - list_for_each_entry(archdata, &sh_domain->attached_list, attached_list) - ipmmu_tlb_flush(archdata->ipmmu); - spin_unlock(&sh_domain->attached_list_lock); -} - -static int l2alloc(struct shmobile_iommu_domain *sh_domain, - unsigned int l1index) -{ - int ret; - - if (!sh_domain->l2[l1index].pgtable) { - ret = pgtable_alloc(&sh_domain->l2[l1index], l2cache, L2_SIZE); - if (ret < 0) - return ret; - } - pgtable_write(&sh_domain->l1, l1index, 1, - sh_domain->l2[l1index].handle | 0x1); - return 0; -} - -static void l2realfree(struct shmobile_iommu_domain_pgtable *l2) -{ - if (l2->pgtable) - pgtable_free(l2, l2cache, L2_SIZE); -} - -static void l2free(struct shmobile_iommu_domain *sh_domain, - unsigned int l1index, - struct shmobile_iommu_domain_pgtable *l2) -{ - pgtable_write(&sh_domain->l1, l1index, 1, 0); - if (sh_domain->l2[l1index].pgtable) { - *l2 = sh_domain->l2[l1index]; - sh_domain->l2[l1index].pgtable = NULL; - } -} - -static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) -{ - struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL }; - struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); - unsigned int l1index, l2index; - int ret; - - l1index = iova >> 20; - switch (size) { - case SZ_4K: - l2index = (iova >> 12) & 0xff; - spin_lock(&sh_domain->map_lock); - ret = l2alloc(sh_domain, l1index); - if (!ret) - pgtable_write(&sh_domain->l2[l1index], l2index, 1, - paddr | 0xff2); - spin_unlock(&sh_domain->map_lock); - break; - case SZ_64K: - l2index = (iova >> 12) & 0xf0; - spin_lock(&sh_domain->map_lock); - ret = l2alloc(sh_domain, l1index); - if (!ret) - pgtable_write(&sh_domain->l2[l1index], l2index, 0x10, - paddr | 0xff1); - spin_unlock(&sh_domain->map_lock); - break; - case SZ_1M: - spin_lock(&sh_domain->map_lock); - l2free(sh_domain, l1index, &l2); - pgtable_write(&sh_domain->l1, l1index, 1, paddr | 0xc02); - spin_unlock(&sh_domain->map_lock); - ret = 0; - break; - default: - ret = -EINVAL; - } - if (!ret) - domain_tlb_flush(sh_domain); - l2realfree(&l2); - return ret; -} - -static size_t shmobile_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size) -{ - struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL }; - struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); - unsigned int l1index, l2index; - uint32_t l2entry = 0; - size_t ret = 0; - - l1index = iova >> 20; - if (!(iova & 0xfffff) && size >= SZ_1M) { - spin_lock(&sh_domain->map_lock); - l2free(sh_domain, l1index, &l2); - spin_unlock(&sh_domain->map_lock); - ret = SZ_1M; - goto done; - } - l2index = (iova >> 12) & 0xff; - spin_lock(&sh_domain->map_lock); - if (sh_domain->l2[l1index].pgtable) - l2entry = pgtable_read(&sh_domain->l2[l1index], l2index); - switch (l2entry & 3) { - case 1: - if (l2index & 0xf) - break; - pgtable_write(&sh_domain->l2[l1index], l2index, 0x10, 0); - ret = SZ_64K; - break; - case 2: - pgtable_write(&sh_domain->l2[l1index], l2index, 1, 0); - ret = SZ_4K; - break; - } - spin_unlock(&sh_domain->map_lock); -done: - if (ret) - domain_tlb_flush(sh_domain); - l2realfree(&l2); - return ret; -} - -static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain, - dma_addr_t iova) -{ - struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); - uint32_t l1entry = 0, l2entry = 0; - unsigned int l1index, l2index; - - l1index = iova >> 20; - l2index = (iova >> 12) & 0xff; - spin_lock(&sh_domain->map_lock); - if (sh_domain->l2[l1index].pgtable) - l2entry = pgtable_read(&sh_domain->l2[l1index], l2index); - else - l1entry = pgtable_read(&sh_domain->l1, l1index); - spin_unlock(&sh_domain->map_lock); - switch (l2entry & 3) { - case 1: - return (l2entry & ~0xffff) | (iova & 0xffff); - case 2: - return (l2entry & ~0xfff) | (iova & 0xfff); - default: - if ((l1entry & 3) == 2) - return (l1entry & ~0xfffff) | (iova & 0xfffff); - return 0; - } -} - -static int find_dev_name(struct shmobile_ipmmu *ipmmu, const char *dev_name) -{ - unsigned int i, n = ipmmu->num_dev_names; - - for (i = 0; i < n; i++) { - if (strcmp(ipmmu->dev_names[i], dev_name) == 0) - return 1; - } - return 0; -} - -static int shmobile_iommu_add_device(struct device *dev) -{ - struct shmobile_iommu_archdata *archdata = ipmmu_archdata; - struct dma_iommu_mapping *mapping; - - if (!find_dev_name(archdata->ipmmu, dev_name(dev))) - return 0; - mapping = archdata->iommu_mapping; - if (!mapping) { - mapping = arm_iommu_create_mapping(&platform_bus_type, 0, - L1_LEN << 20); - if (IS_ERR(mapping)) - return PTR_ERR(mapping); - archdata->iommu_mapping = mapping; - } - dev->archdata.iommu = archdata; - if (arm_iommu_attach_device(dev, mapping)) - pr_err("arm_iommu_attach_device failed\n"); - return 0; -} - -static const struct iommu_ops shmobile_iommu_ops = { - .domain_alloc = shmobile_iommu_domain_alloc, - .domain_free = shmobile_iommu_domain_free, - .attach_dev = shmobile_iommu_attach_device, - .detach_dev = shmobile_iommu_detach_device, - .map = shmobile_iommu_map, - .unmap = shmobile_iommu_unmap, - .map_sg = default_iommu_map_sg, - .iova_to_phys = shmobile_iommu_iova_to_phys, - .add_device = shmobile_iommu_add_device, - .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K, -}; - -int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu) -{ - static struct shmobile_iommu_archdata *archdata; - - l1cache = kmem_cache_create("shmobile-iommu-pgtable1", L1_SIZE, - L1_ALIGN, SLAB_HWCACHE_ALIGN, NULL); - if (!l1cache) - return -ENOMEM; - l2cache = kmem_cache_create("shmobile-iommu-pgtable2", L2_SIZE, - L2_ALIGN, SLAB_HWCACHE_ALIGN, NULL); - if (!l2cache) { - kmem_cache_destroy(l1cache); - return -ENOMEM; - } - archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); - if (!archdata) { - kmem_cache_destroy(l1cache); - kmem_cache_destroy(l2cache); - return -ENOMEM; - } - spin_lock_init(&archdata->attach_lock); - archdata->ipmmu = ipmmu; - ipmmu_archdata = archdata; - bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops); - return 0; -} diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c deleted file mode 100644 index 951651a9746b..000000000000 --- a/drivers/iommu/shmobile-ipmmu.c +++ /dev/null @@ -1,129 +0,0 @@ -/* - * IPMMU/IPMMUI - * Copyright (C) 2012 Hideki EIRAKU - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - */ - -#include -#include -#include -#include -#include -#include -#include "shmobile-ipmmu.h" - -#define IMCTR1 0x000 -#define IMCTR2 0x004 -#define IMASID 0x010 -#define IMTTBR 0x014 -#define IMTTBCR 0x018 - -#define IMCTR1_TLBEN (1 << 0) -#define IMCTR1_FLUSH (1 << 1) - -static void ipmmu_reg_write(struct shmobile_ipmmu *ipmmu, unsigned long reg_off, - unsigned long data) -{ - iowrite32(data, ipmmu->ipmmu_base + reg_off); -} - -void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu) -{ - if (!ipmmu) - return; - - spin_lock(&ipmmu->flush_lock); - if (ipmmu->tlb_enabled) - ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN); - else - ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH); - spin_unlock(&ipmmu->flush_lock); -} - -void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size, - int asid) -{ - if (!ipmmu) - return; - - spin_lock(&ipmmu->flush_lock); - switch (size) { - default: - ipmmu->tlb_enabled = 0; - break; - case 0x2000: - ipmmu_reg_write(ipmmu, IMTTBCR, 1); - ipmmu->tlb_enabled = 1; - break; - case 0x1000: - ipmmu_reg_write(ipmmu, IMTTBCR, 2); - ipmmu->tlb_enabled = 1; - break; - case 0x800: - ipmmu_reg_write(ipmmu, IMTTBCR, 3); - ipmmu->tlb_enabled = 1; - break; - case 0x400: - ipmmu_reg_write(ipmmu, IMTTBCR, 4); - ipmmu->tlb_enabled = 1; - break; - case 0x200: - ipmmu_reg_write(ipmmu, IMTTBCR, 5); - ipmmu->tlb_enabled = 1; - break; - case 0x100: - ipmmu_reg_write(ipmmu, IMTTBCR, 6); - ipmmu->tlb_enabled = 1; - break; - case 0x80: - ipmmu_reg_write(ipmmu, IMTTBCR, 7); - ipmmu->tlb_enabled = 1; - break; - } - ipmmu_reg_write(ipmmu, IMTTBR, phys); - ipmmu_reg_write(ipmmu, IMASID, asid); - spin_unlock(&ipmmu->flush_lock); -} - -static int ipmmu_probe(struct platform_device *pdev) -{ - struct shmobile_ipmmu *ipmmu; - struct resource *res; - struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data; - - ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL); - if (!ipmmu) { - dev_err(&pdev->dev, "cannot allocate device data\n"); - return -ENOMEM; - } - spin_lock_init(&ipmmu->flush_lock); - ipmmu->dev = &pdev->dev; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ipmmu->ipmmu_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(ipmmu->ipmmu_base)) - return PTR_ERR(ipmmu->ipmmu_base); - - ipmmu->dev_names = pdata->dev_names; - ipmmu->num_dev_names = pdata->num_dev_names; - platform_set_drvdata(pdev, ipmmu); - ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */ - ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */ - return ipmmu_iommu_init(ipmmu); -} - -static struct platform_driver ipmmu_driver = { - .probe = ipmmu_probe, - .driver = { - .name = "ipmmu", - }, -}; - -static int __init ipmmu_init(void) -{ - return platform_driver_register(&ipmmu_driver); -} -subsys_initcall(ipmmu_init); diff --git a/drivers/iommu/shmobile-ipmmu.h b/drivers/iommu/shmobile-ipmmu.h deleted file mode 100644 index 9524743ca1fb..000000000000 --- a/drivers/iommu/shmobile-ipmmu.h +++ /dev/null @@ -1,34 +0,0 @@ -/* shmobile-ipmmu.h - * - * Copyright (C) 2012 Hideki EIRAKU - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - */ - -#ifndef __SHMOBILE_IPMMU_H__ -#define __SHMOBILE_IPMMU_H__ - -struct shmobile_ipmmu { - struct device *dev; - void __iomem *ipmmu_base; - int tlb_enabled; - spinlock_t flush_lock; - const char * const *dev_names; - unsigned int num_dev_names; -}; - -#ifdef CONFIG_SHMOBILE_IPMMU_TLB -void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu); -void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size, - int asid); -int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu); -#else -static inline int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu) -{ - return -EINVAL; -} -#endif - -#endif /* __SHMOBILE_IPMMU_H__ */ -- cgit v1.2.3 From 89df3a96baeaf5d565183e9e9fc35c9974c20d68 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 29 Oct 2015 13:48:56 +0000 Subject: iommu/arm-smmu: Remove #define for non-existent PRIQ_0_OF field PRIQ_0_OF has been removed from the SMMUv3 architecture, so remove its corresponding (and unused) #define from the driver. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 4e5118a4cd30..e0032c098b32 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -378,7 +378,6 @@ #define PRIQ_0_SID_MASK 0xffffffffUL #define PRIQ_0_SSID_SHIFT 32 #define PRIQ_0_SSID_MASK 0xfffffUL -#define PRIQ_0_OF (1UL << 57) #define PRIQ_0_PERM_PRIV (1UL << 58) #define PRIQ_0_PERM_EXEC (1UL << 59) #define PRIQ_0_PERM_READ (1UL << 60) -- cgit v1.2.3 From 04fa26c71be5d7cf1c63f23f6345dad209f361d7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 30 Oct 2015 18:12:41 +0000 Subject: iommu/arm-smmu: Convert DMA buffer allocations to the managed API The ARM SMMUv3 driver uses dma_{alloc,free}_coherent to manage its queues and configuration data structures. This patch converts the driver to the managed (dmam_*) API, so that resources are freed automatically on device teardown. This greatly simplifies the failure paths and allows us to remove a bunch of handcrafted freeing code. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 132 +++++++------------------------------------- 1 file changed, 21 insertions(+), 111 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index e0032c098b32..86480480895d 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1122,8 +1122,8 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; desc->span = STRTAB_SPLIT + 1; - desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma, - GFP_KERNEL); + desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma, + GFP_KERNEL | __GFP_ZERO); if (!desc->l2ptr) { dev_err(smmu->dev, "failed to allocate l2 stream table for SID %u\n", @@ -1428,10 +1428,10 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; if (cfg->cdptr) { - dma_free_coherent(smmu_domain->smmu->dev, - CTXDESC_CD_DWORDS << 3, - cfg->cdptr, - cfg->cdptr_dma); + dmam_free_coherent(smmu_domain->smmu->dev, + CTXDESC_CD_DWORDS << 3, + cfg->cdptr, + cfg->cdptr_dma); arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid); } @@ -1456,8 +1456,9 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, if (IS_ERR_VALUE(asid)) return asid; - cfg->cdptr = dma_zalloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3, - &cfg->cdptr_dma, GFP_KERNEL); + cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3, + &cfg->cdptr_dma, + GFP_KERNEL | __GFP_ZERO); if (!cfg->cdptr) { dev_warn(smmu->dev, "failed to allocate context descriptor\n"); ret = -ENOMEM; @@ -1936,7 +1937,7 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, { size_t qsz = ((1 << q->max_n_shift) * dwords) << 3; - q->base = dma_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL); + q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL); if (!q->base) { dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n", qsz); @@ -1956,23 +1957,6 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, return 0; } -static void arm_smmu_free_one_queue(struct arm_smmu_device *smmu, - struct arm_smmu_queue *q) -{ - size_t qsz = ((1 << q->max_n_shift) * q->ent_dwords) << 3; - - dma_free_coherent(smmu->dev, qsz, q->base, q->base_dma); -} - -static void arm_smmu_free_queues(struct arm_smmu_device *smmu) -{ - arm_smmu_free_one_queue(smmu, &smmu->cmdq.q); - arm_smmu_free_one_queue(smmu, &smmu->evtq.q); - - if (smmu->features & ARM_SMMU_FEAT_PRI) - arm_smmu_free_one_queue(smmu, &smmu->priq.q); -} - static int arm_smmu_init_queues(struct arm_smmu_device *smmu) { int ret; @@ -1982,49 +1966,20 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu) ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD, ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS); if (ret) - goto out; + return ret; /* evtq */ ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD, ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS); if (ret) - goto out_free_cmdq; + return ret; /* priq */ if (!(smmu->features & ARM_SMMU_FEAT_PRI)) return 0; - ret = arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD, - ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS); - if (ret) - goto out_free_evtq; - - return 0; - -out_free_evtq: - arm_smmu_free_one_queue(smmu, &smmu->evtq.q); -out_free_cmdq: - arm_smmu_free_one_queue(smmu, &smmu->cmdq.q); -out: - return ret; -} - -static void arm_smmu_free_l2_strtab(struct arm_smmu_device *smmu) -{ - int i; - size_t size; - struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; - - size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); - for (i = 0; i < cfg->num_l1_ents; ++i) { - struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[i]; - - if (!desc->l2ptr) - continue; - - dma_free_coherent(smmu->dev, size, desc->l2ptr, - desc->l2ptr_dma); - } + return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD, + ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS); } static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) @@ -2053,7 +2008,6 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) void *strtab; u64 reg; u32 size, l1size; - int ret; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; /* @@ -2076,8 +2030,8 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) size, smmu->sid_bits); l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); - strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, - GFP_KERNEL); + strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, + GFP_KERNEL | __GFP_ZERO); if (!strtab) { dev_err(smmu->dev, "failed to allocate l1 stream table (%u bytes)\n", @@ -2094,13 +2048,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) << STRTAB_BASE_CFG_SPLIT_SHIFT; cfg->strtab_base_cfg = reg; - ret = arm_smmu_init_l1_strtab(smmu); - if (ret) - dma_free_coherent(smmu->dev, - l1size, - strtab, - cfg->strtab_dma); - return ret; + return arm_smmu_init_l1_strtab(smmu); } static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) @@ -2111,8 +2059,8 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3); - strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma, - GFP_KERNEL); + strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma, + GFP_KERNEL | __GFP_ZERO); if (!strtab) { dev_err(smmu->dev, "failed to allocate linear stream table (%u bytes)\n", @@ -2156,21 +2104,6 @@ static int arm_smmu_init_strtab(struct arm_smmu_device *smmu) return 0; } -static void arm_smmu_free_strtab(struct arm_smmu_device *smmu) -{ - struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; - u32 size = cfg->num_l1_ents; - - if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { - arm_smmu_free_l2_strtab(smmu); - size *= STRTAB_L1_DESC_DWORDS << 3; - } else { - size *= STRTAB_STE_DWORDS * 3; - } - - dma_free_coherent(smmu->dev, size, cfg->strtab, cfg->strtab_dma); -} - static int arm_smmu_init_structures(struct arm_smmu_device *smmu) { int ret; @@ -2179,21 +2112,7 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu) if (ret) return ret; - ret = arm_smmu_init_strtab(smmu); - if (ret) - goto out_free_queues; - - return 0; - -out_free_queues: - arm_smmu_free_queues(smmu); - return ret; -} - -static void arm_smmu_free_structures(struct arm_smmu_device *smmu) -{ - arm_smmu_free_strtab(smmu); - arm_smmu_free_queues(smmu); + return arm_smmu_init_strtab(smmu); } static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val, @@ -2698,15 +2617,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) platform_set_drvdata(pdev, smmu); /* Reset the device */ - ret = arm_smmu_device_reset(smmu); - if (ret) - goto out_free_structures; - - return 0; - -out_free_structures: - arm_smmu_free_structures(smmu); - return ret; + return arm_smmu_device_reset(smmu); } static int arm_smmu_device_remove(struct platform_device *pdev) @@ -2714,7 +2625,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev) struct arm_smmu_device *smmu = platform_get_drvdata(pdev); arm_smmu_device_disable(smmu); - arm_smmu_free_structures(smmu); return 0; } -- cgit v1.2.3 From 44830b0cbdba29789f2a569d08dbaa3d1605c94c Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 6 Nov 2015 18:32:41 +0100 Subject: iommu/arm-smmu: Delete an unnecessary check before free_io_pgtable_ops() The free_io_pgtable_ops() function tests whether its argument is NULL and then returns immediately. Thus the test around the call is not needed. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 47dc7a793f5c..1ce4b85d5216 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -945,9 +945,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) free_irq(irq, domain); } - if (smmu_domain->pgtbl_ops) - free_io_pgtable_ops(smmu_domain->pgtbl_ops); - + free_io_pgtable_ops(smmu_domain->pgtbl_ops); __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); } -- cgit v1.2.3 From a0eacd89e35e55aad284cc2e6865bf2dcf7037ba Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 18 Nov 2015 18:15:51 +0000 Subject: iommu/arm-smmu: Use incoming shareability attributes in bypass mode When we initialise a bypass STE, we memset the structure to zero and set the Valid and Config fields to indicate that the stream should bypass the SMMU. Unfortunately, this results in an SHCFG field of 0 which means that the shareability of any incoming transactions is overridden with non-shareable, leading to potential coherence problems down the line. This patch fixes the issue by initialising bypass STEs to use the incoming shareability attributes. When translation is in effect at either stage 1 or stage 2, the shareability is determined by the page tables. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 86480480895d..2e3e235f509c 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -253,6 +253,9 @@ #define STRTAB_STE_1_STRW_EL2 2UL #define STRTAB_STE_1_STRW_SHIFT 30 +#define STRTAB_STE_1_SHCFG_INCOMING 1UL +#define STRTAB_STE_1_SHCFG_SHIFT 44 + #define STRTAB_STE_2_S2VMID_SHIFT 0 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL #define STRTAB_STE_2_VTCR_SHIFT 32 @@ -1041,6 +1044,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT : STRTAB_STE_0_CFG_BYPASS; dst[0] = cpu_to_le64(val); + dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING + << STRTAB_STE_1_SHCFG_SHIFT); dst[2] = 0; /* Nuke the VMID */ if (ste_live) arm_smmu_sync_ste_for_sid(smmu, sid); -- cgit v1.2.3 From 9a4a9d8c34bc0b0102e8a9dd67ee3910b0bfaeb4 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Fri, 20 Nov 2015 16:56:18 +0800 Subject: iommu/arm-smmu: Correct group reference count The basic flow for add a device: arm_smmu_add_device |->iommu_group_get_for_dev |->iommu_group_get return group; (1) |->ops->device_group : Init/increase reference count to/by 1. |->iommu_group_add_device : Increase reference count by 1. return group (2) |->return 0; Since we are adding one device, the flow is (2) and the group reference count will be increased by 2. So, we need to add iommu_group_put at the end of arm_smmu_add_device to decrease the count by 1. Also take the failure path into consideration when fail to add a device. Signed-off-by: Peng Fan Cc: Will Deacon Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 18 +++++++++++------- drivers/iommu/arm-smmu.c | 1 + 2 files changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 2e3e235f509c..3ea4d576bf08 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1809,13 +1809,13 @@ static int arm_smmu_add_device(struct device *dev) smmu = arm_smmu_get_for_pci_dev(pdev); if (!smmu) { ret = -ENOENT; - goto out_put_group; + goto out_remove_dev; } smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL); if (!smmu_group) { ret = -ENOMEM; - goto out_put_group; + goto out_remove_dev; } smmu_group->ste.valid = true; @@ -1831,20 +1831,20 @@ static int arm_smmu_add_device(struct device *dev) for (i = 0; i < smmu_group->num_sids; ++i) { /* If we already know about this SID, then we're done */ if (smmu_group->sids[i] == sid) - return 0; + goto out_put_group; } /* Check the SID is in range of the SMMU and our stream table */ if (!arm_smmu_sid_in_range(smmu, sid)) { ret = -ERANGE; - goto out_put_group; + goto out_remove_dev; } /* Ensure l2 strtab is initialised */ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { ret = arm_smmu_init_l2_strtab(smmu, sid); if (ret) - goto out_put_group; + goto out_remove_dev; } /* Resize the SID array for the group */ @@ -1854,15 +1854,19 @@ static int arm_smmu_add_device(struct device *dev) if (!sids) { smmu_group->num_sids--; ret = -ENOMEM; - goto out_put_group; + goto out_remove_dev; } /* Add the new SID */ sids[smmu_group->num_sids - 1] = sid; smmu_group->sids = sids; - return 0; out_put_group: + iommu_group_put(group); + return 0; + +out_remove_dev: + iommu_group_remove_device(dev); iommu_group_put(group); return ret; } diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 1ce4b85d5216..6ed169bcb39d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1355,6 +1355,7 @@ static int arm_smmu_add_device(struct device *dev) if (IS_ERR(group)) return PTR_ERR(group); + iommu_group_put(group); return 0; } -- cgit v1.2.3 From a0d5c04c6053d8c47cca37384ae472f6b2ee0dee Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 4 Dec 2015 12:00:29 +0000 Subject: iommu/arm-smmu: Handle unknown CERROR values gracefully Whilst the architecture only defines a few of the possible CERROR values, we should handle unknown values gracefully rather than go out of bounds trying to print out an error description. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 3ea4d576bf08..4c5ef4e5da98 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -857,15 +857,17 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) }; dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons, - cerror_str[idx]); + idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown"); switch (idx) { - case CMDQ_ERR_CERROR_ILL_IDX: - break; case CMDQ_ERR_CERROR_ABT_IDX: dev_err(smmu->dev, "retrying command fetch\n"); case CMDQ_ERR_CERROR_NONE_IDX: return; + case CMDQ_ERR_CERROR_ILL_IDX: + /* Fallthrough */ + default: + break; } /* -- cgit v1.2.3 From 2eb97c78613082f308c0b39366c034cb589b8ee9 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 4 Dec 2015 17:52:58 +0000 Subject: iommu/io-pgtable-arm: Avoid dereferencing bogus PTEs In the case of corrupted page tables, or when an invalid size is given, __arm_lpae_unmap() may recurse beyond the maximum number of levels. Unfortunately the detection of this error condition only happens *after* calculating a nonsense offset from something which might not be a valid table pointer and dereferencing that to see if it is a valid PTE. Make things a little more robust by checking the level is valid before doing anything which depends on it being so. Reviewed-by: Laurent Pinchart Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 7df97777662d..366a354c689d 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -486,11 +486,13 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, void *cookie = data->iop.cookie; size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); + /* Something went horribly wrong and we ran out of page table */ + if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) + return 0; + ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); pte = *ptep; - - /* Something went horribly wrong and we ran out of page table */ - if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS))) + if (WARN_ON(!pte)) return 0; /* If the size matches this level, we're in the right place */ -- cgit v1.2.3 From 06c610e8f32ba2fe41d57e1718611c2ec5024878 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 7 Dec 2015 18:18:53 +0000 Subject: iommu/io-pgtable: Indicate granule for TLB maintenance IOMMU hardware with range-based TLB maintenance commands can work happily with the iova and size arguments passed via the tlb_add_flush callback, but for IOMMUs which require separate commands per entry in the range, it is not straightforward to infer the necessary granularity when it comes to issuing the actual commands. Add an additional argument indicating the granularity for the benefit of drivers needing to know, and update the ARM LPAE code appropriately (for non-leaf invalidations we currently just assume the worst-case page granularity rather than walking the table to check). Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 2 +- drivers/iommu/arm-smmu.c | 2 +- drivers/iommu/io-pgtable-arm.c | 27 +++++++++++++++------------ drivers/iommu/io-pgtable.h | 4 ++-- drivers/iommu/ipmmu-vmsa.c | 4 ++-- 5 files changed, 21 insertions(+), 18 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 4c5ef4e5da98..735ad2c58dd8 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1341,7 +1341,7 @@ static void arm_smmu_tlb_inv_context(void *cookie) } static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, - bool leaf, void *cookie) + size_t granule, bool leaf, void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 6ed169bcb39d..7e04bf5640ae 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -582,7 +582,7 @@ static void arm_smmu_tlb_inv_context(void *cookie) } static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, - bool leaf, void *cookie) + size_t granule, bool leaf, void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 366a354c689d..7a5c772f7be2 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -58,8 +58,10 @@ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ * (d)->bits_per_level) + (d)->pg_shift) +#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift) + #define ARM_LPAE_PAGES_PER_PGD(d) \ - DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) + DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d)) /* * Calculate the index at level l used to map virtual address a using the @@ -169,7 +171,7 @@ /* IOPTE accessors */ #define iopte_deref(pte,d) \ (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ - & ~((1ULL << (d)->pg_shift) - 1))) + & ~(ARM_LPAE_GRANULE(d) - 1ULL))) #define iopte_type(pte,l) \ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) @@ -326,7 +328,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, /* Grab a pointer to the next level */ pte = *ptep; if (!pte) { - cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift, + cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data), GFP_ATOMIC, cfg); if (!cptep) return -ENOMEM; @@ -412,7 +414,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, if (lvl == ARM_LPAE_START_LVL(data)) table_size = data->pgd_size; else - table_size = 1UL << data->pg_shift; + table_size = ARM_LPAE_GRANULE(data); start = ptep; end = (void *)ptep + table_size; @@ -473,7 +475,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, __arm_lpae_set_pte(ptep, table, cfg); iova &= ~(blk_size - 1); - cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie); + cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie); return size; } @@ -501,12 +503,13 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, if (!iopte_leaf(pte, lvl)) { /* Also flush any partial walks */ - tlb->tlb_add_flush(iova, size, false, cookie); + tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data), + false, cookie); tlb->tlb_sync(cookie); ptep = iopte_deref(pte, data); __arm_lpae_free_pgtable(data, lvl + 1, ptep); } else { - tlb->tlb_add_flush(iova, size, true, cookie); + tlb->tlb_add_flush(iova, size, size, true, cookie); } return size; @@ -572,7 +575,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, return 0; found_translation: - iova &= ((1 << data->pg_shift) - 1); + iova &= (ARM_LPAE_GRANULE(data) - 1); return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; } @@ -670,7 +673,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); - switch (1 << data->pg_shift) { + switch (ARM_LPAE_GRANULE(data)) { case SZ_4K: reg |= ARM_LPAE_TCR_TG0_4K; break; @@ -771,7 +774,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) sl = ARM_LPAE_START_LVL(data); - switch (1 << data->pg_shift) { + switch (ARM_LPAE_GRANULE(data)) { case SZ_4K: reg |= ARM_LPAE_TCR_TG0_4K; sl++; /* SL0 format is different for 4K granule size */ @@ -891,8 +894,8 @@ static void dummy_tlb_flush_all(void *cookie) WARN_ON(cookie != cfg_cookie); } -static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf, - void *cookie) +static void dummy_tlb_add_flush(unsigned long iova, size_t size, + size_t granule, bool leaf, void *cookie) { WARN_ON(cookie != cfg_cookie); WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index ac9e2341a633..2e18469afe3c 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h @@ -26,8 +26,8 @@ enum io_pgtable_fmt { */ struct iommu_gather_ops { void (*tlb_flush_all)(void *cookie); - void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf, - void *cookie); + void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, + bool leaf, void *cookie); void (*tlb_sync)(void *cookie); }; diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 8cf605fa9946..5b1166d407c4 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -277,8 +277,8 @@ static void ipmmu_tlb_flush_all(void *cookie) ipmmu_tlb_invalidate(domain); } -static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf, - void *cookie) +static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, + size_t granule, bool leaf, void *cookie) { /* The hardware doesn't support selective TLB flush. */ } -- cgit v1.2.3 From 75df1386557c25188bd2383bbe8dd14a5ac81c06 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 7 Dec 2015 18:18:52 +0000 Subject: iommu/arm-smmu: Invalidate TLBs properly When invalidating an IOVA range potentially spanning multiple pages, such as when removing an entire intermediate-level table, we currently only issue an invalidation for the first IOVA of that range. Since the architecture specifies that address-based TLB maintenance operations target a single entry, an SMMU could feasibly retain live entries for subsequent pages within that unmapped range, which is not good. Make sure we hit every possible entry by iterating over the whole range at the granularity provided by the pagetable implementation. Signed-off-by: Robin Murphy [will: added missing semicolons...] Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 5 ++++- drivers/iommu/arm-smmu.c | 16 +++++++++++++--- 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 735ad2c58dd8..4991e79465ee 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1360,7 +1360,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; } - arm_smmu_cmdq_issue_cmd(smmu, &cmd); + do { + arm_smmu_cmdq_issue_cmd(smmu, &cmd); + cmd.tlbi.addr += granule; + } while (size -= granule); } static struct iommu_gather_ops arm_smmu_gather_ops = { diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 7e04bf5640ae..59ee4b8a3236 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -597,12 +597,18 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) { iova &= ~12UL; iova |= ARM_SMMU_CB_ASID(cfg); - writel_relaxed(iova, reg); + do { + writel_relaxed(iova, reg); + iova += granule; + } while (size -= granule); #ifdef CONFIG_64BIT } else { iova >>= 12; iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; - writeq_relaxed(iova, reg); + do { + writeq_relaxed(iova, reg); + iova += granule >> 12; + } while (size -= granule); #endif } #ifdef CONFIG_64BIT @@ -610,7 +616,11 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2; - writeq_relaxed(iova >> 12, reg); + iova >>= 12; + do { + writeq_relaxed(iova, reg); + iova += granule >> 12; + } while (size -= granule); #endif } else { reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; -- cgit v1.2.3 From fdc38967633ec23b3b24dfc487dfb7b90d1a0215 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 4 Dec 2015 17:53:01 +0000 Subject: iommu/io-pgtable: Make io_pgtable_ops_to_pgtable() macro common There is no need to keep a useful accessor for a public structure hidden away in a private implementation. Move it out alongside the structure definition so that other implementations may reuse it. Acked-by: Laurent Pinchart Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 3 --- drivers/iommu/io-pgtable.h | 2 ++ 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 7a5c772f7be2..937ba23e48d7 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -38,9 +38,6 @@ #define io_pgtable_to_data(x) \ container_of((x), struct arm_lpae_io_pgtable, iop) -#define io_pgtable_ops_to_pgtable(x) \ - container_of((x), struct io_pgtable, ops) - #define io_pgtable_ops_to_data(x) \ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index 2e18469afe3c..36673c83de58 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h @@ -131,6 +131,8 @@ struct io_pgtable { struct io_pgtable_ops ops; }; +#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) + /** * struct io_pgtable_init_fns - Alloc/free a set of page tables for a * particular format. -- cgit v1.2.3 From 324ba1082323a51a3ad282c20e3d3b11845cf030 Mon Sep 17 00:00:00 2001 From: Prem Mallappa Date: Mon, 14 Dec 2015 22:01:14 +0530 Subject: iommu/arm-smmu: Fix write to GERRORN register When acknowledging global errors, the GERRORN register should be written with the original GERROR value so that active errors are toggled. This patch fixed the driver to write the original GERROR value to GERRORN, instead of an active error mask. Signed-off-by: Prem Mallappa [will: reworked use of active bits and fixed commit log] Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 4991e79465ee..488f763877d2 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1256,50 +1256,50 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu); static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) { - u32 gerror, gerrorn; + u32 gerror, gerrorn, active; struct arm_smmu_device *smmu = dev; gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR); gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN); - gerror ^= gerrorn; - if (!(gerror & GERROR_ERR_MASK)) + active = gerror ^ gerrorn; + if (!(active & GERROR_ERR_MASK)) return IRQ_NONE; /* No errors pending */ dev_warn(smmu->dev, "unexpected global error reported (0x%08x), this could be serious\n", - gerror); + active); - if (gerror & GERROR_SFM_ERR) { + if (active & GERROR_SFM_ERR) { dev_err(smmu->dev, "device has entered Service Failure Mode!\n"); arm_smmu_device_disable(smmu); } - if (gerror & GERROR_MSI_GERROR_ABT_ERR) + if (active & GERROR_MSI_GERROR_ABT_ERR) dev_warn(smmu->dev, "GERROR MSI write aborted\n"); - if (gerror & GERROR_MSI_PRIQ_ABT_ERR) { + if (active & GERROR_MSI_PRIQ_ABT_ERR) { dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); arm_smmu_priq_handler(irq, smmu->dev); } - if (gerror & GERROR_MSI_EVTQ_ABT_ERR) { + if (active & GERROR_MSI_EVTQ_ABT_ERR) { dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); arm_smmu_evtq_handler(irq, smmu->dev); } - if (gerror & GERROR_MSI_CMDQ_ABT_ERR) { + if (active & GERROR_MSI_CMDQ_ABT_ERR) { dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); arm_smmu_cmdq_sync_handler(irq, smmu->dev); } - if (gerror & GERROR_PRIQ_ABT_ERR) + if (active & GERROR_PRIQ_ABT_ERR) dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n"); - if (gerror & GERROR_EVTQ_ABT_ERR) + if (active & GERROR_EVTQ_ABT_ERR) dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n"); - if (gerror & GERROR_CMDQ_ERR) + if (active & GERROR_CMDQ_ERR) arm_smmu_cmdq_skip_err(smmu); writel(gerror, smmu->base + ARM_SMMU_GERRORN); -- cgit v1.2.3 From 6380be0535fd60c0a346ec0ae447f0f6c9e3ea83 Mon Sep 17 00:00:00 2001 From: Prem Mallappa Date: Mon, 14 Dec 2015 22:01:23 +0530 Subject: iommu/arm-smmu: Use STE.S1STALLD only when supported It is ILLEGAL to set STE.S1STALLD to 1 if stage 1 is enabled and either the stall or terminate models are not supported. This patch fixes the STALLD check and ensures that we don't set STALLD in the STE when it is not supported. Signed-off-by: Prem Mallappa [will: consistently use IDR0_STALL_MODEL_* prefix] Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 488f763877d2..20875341c865 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -40,7 +40,10 @@ #define IDR0_ST_LVL_SHIFT 27 #define IDR0_ST_LVL_MASK 0x3 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT) -#define IDR0_STALL_MODEL (3 << 24) +#define IDR0_STALL_MODEL_SHIFT 24 +#define IDR0_STALL_MODEL_MASK 0x3 +#define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT) +#define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT) #define IDR0_TTENDIAN_SHIFT 21 #define IDR0_TTENDIAN_MASK 0x3 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT) @@ -1062,12 +1065,14 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, STRTAB_STE_1_S1C_CACHE_WBRA << STRTAB_STE_1_S1COR_SHIFT | STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT | - STRTAB_STE_1_S1STALLD | #ifdef CONFIG_PCI_ATS STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT | #endif STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT); + if (smmu->features & ARM_SMMU_FEAT_STALLS) + dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); + val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK << STRTAB_STE_0_S1CTXPTR_SHIFT) | STRTAB_STE_0_CFG_S1_TRANS; @@ -2464,8 +2469,12 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n", coherent ? "true" : "false"); - if (reg & IDR0_STALL_MODEL) + switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) { + case IDR0_STALL_MODEL_STALL: + /* Fallthrough */ + case IDR0_STALL_MODEL_FORCE: smmu->features |= ARM_SMMU_FEAT_STALLS; + } if (reg & IDR0_S1P) smmu->features |= ARM_SMMU_FEAT_TRANS_S1; -- cgit v1.2.3 From 12c2ab09571e8aae3a87da2a4a452632a5fac1e5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 15 Dec 2015 16:08:12 +0000 Subject: iommu/io-pgtable-arm: Ensure we free the final level on teardown When tearing down page tables, we return early for the final level since we know that we won't have any table pointers to follow. Unfortunately, this also means that we forget to free the final level, so we end up leaking memory. Fix the issue by always freeing the current level, but just don't bother to iterate over the ptes if we're at the final level. Cc: Reported-by: Zhang Bo Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 937ba23e48d7..8bbcbfe7695c 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -404,17 +404,18 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, arm_lpae_iopte *start, *end; unsigned long table_size; - /* Only leaf entries at the last level */ - if (lvl == ARM_LPAE_MAX_LEVELS - 1) - return; - if (lvl == ARM_LPAE_START_LVL(data)) table_size = data->pgd_size; else table_size = ARM_LPAE_GRANULE(data); start = ptep; - end = (void *)ptep + table_size; + + /* Only leaf entries at the last level */ + if (lvl == ARM_LPAE_MAX_LEVELS - 1) + end = ptep; + else + end = (void *)ptep + table_size; while (ptep != end) { arm_lpae_iopte pte = *ptep++; -- cgit v1.2.3 From a7fb668fd88c979e790f0c324a1a6d749a8c5a60 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 12:50:54 +0100 Subject: iommu/amd: Warn only once on unexpected pte value This prevents possible flooding of the kernel log. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 8b2be1e7714f..3cdfac6024a5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2328,7 +2328,7 @@ static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom, else if (direction == DMA_BIDIRECTIONAL) __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW; - WARN_ON(*pte); + WARN_ON_ONCE(*pte); *pte = __pte; @@ -2357,7 +2357,7 @@ static void dma_ops_domain_unmap(struct dma_ops_domain *dom, pte += PM_LEVEL_INDEX(0, address); - WARN_ON(!*pte); + WARN_ON_ONCE(!*pte); *pte = 0ULL; } -- cgit v1.2.3 From 007b74bab2739539bbc49bb9e3f022159dc2c279 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 12:53:54 +0100 Subject: iommu/amd: Move 'struct dma_ops_domain' definition to amd_iommu.c It is only used in this file anyway, so keep it there. Same with 'struct aperture_range'. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 40 ++++++++++++++++++++++++++++++++++++++++ drivers/iommu/amd_iommu_types.h | 40 ---------------------------------------- 2 files changed, 40 insertions(+), 40 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 3cdfac6024a5..9ce51eb1c33a 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -114,6 +114,46 @@ struct kmem_cache *amd_iommu_irq_cache; static void update_domain(struct protection_domain *domain); static int protection_domain_init(struct protection_domain *domain); +/* + * For dynamic growth the aperture size is split into ranges of 128MB of + * DMA address space each. This struct represents one such range. + */ +struct aperture_range { + + /* address allocation bitmap */ + unsigned long *bitmap; + + /* + * Array of PTE pages for the aperture. In this array we save all the + * leaf pages of the domain page table used for the aperture. This way + * we don't need to walk the page table to find a specific PTE. We can + * just calculate its address in constant time. + */ + u64 *pte_pages[64]; + + unsigned long offset; +}; + +/* + * Data container for a dma_ops specific protection domain + */ +struct dma_ops_domain { + /* generic protection domain information */ + struct protection_domain domain; + + /* size of the aperture for the mappings */ + unsigned long aperture_size; + + /* address we start to search for free addresses */ + unsigned long next_address; + + /* address space relevant data */ + struct aperture_range *aperture[APERTURE_MAX_RANGES]; + + /* This will be set to true when TLB needs to be flushed */ + bool need_flush; +}; + /**************************************************************************** * * Helper functions diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index b08cf57bf455..9d32b20a5e9a 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -424,46 +424,6 @@ struct protection_domain { void *priv; /* private data */ }; -/* - * For dynamic growth the aperture size is split into ranges of 128MB of - * DMA address space each. This struct represents one such range. - */ -struct aperture_range { - - /* address allocation bitmap */ - unsigned long *bitmap; - - /* - * Array of PTE pages for the aperture. In this array we save all the - * leaf pages of the domain page table used for the aperture. This way - * we don't need to walk the page table to find a specific PTE. We can - * just calculate its address in constant time. - */ - u64 *pte_pages[64]; - - unsigned long offset; -}; - -/* - * Data container for a dma_ops specific protection domain - */ -struct dma_ops_domain { - /* generic protection domain information */ - struct protection_domain domain; - - /* size of the aperture for the mappings */ - unsigned long aperture_size; - - /* address we start to search for free addresses */ - unsigned long next_address; - - /* address space relevant data */ - struct aperture_range *aperture[APERTURE_MAX_RANGES]; - - /* This will be set to true when TLB needs to be flushed */ - bool need_flush; -}; - /* * Structure where we save information about one hardware AMD IOMMU in the * system. -- cgit v1.2.3 From 08c5fb938e05314b48fc12e697003e91d43c3c9d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 13:04:49 +0100 Subject: iommu/amd: Introduce bitmap_lock in struct aperture_range This lock only protects the address allocation bitmap in one aperture. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 9ce51eb1c33a..8ff33314a668 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -120,6 +120,8 @@ static int protection_domain_init(struct protection_domain *domain); */ struct aperture_range { + spinlock_t bitmap_lock; + /* address allocation bitmap */ unsigned long *bitmap; @@ -1436,6 +1438,8 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, dma_dom->aperture[index]->offset = dma_dom->aperture_size; + spin_lock_init(&dma_dom->aperture[index]->bitmap_lock); + if (populate) { unsigned long address = dma_dom->aperture_size; int i, num_ptes = APERTURE_RANGE_PAGES / 512; @@ -1527,6 +1531,7 @@ static unsigned long dma_ops_area_alloc(struct device *dev, unsigned long boundary_size, mask; unsigned long address = -1; unsigned long limit; + unsigned long flags; next_bit >>= PAGE_SHIFT; @@ -1544,9 +1549,11 @@ static unsigned long dma_ops_area_alloc(struct device *dev, limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset, dma_mask >> PAGE_SHIFT); + spin_lock_irqsave(&dom->aperture[i]->bitmap_lock, flags); address = iommu_area_alloc(dom->aperture[i]->bitmap, limit, next_bit, pages, 0, boundary_size, align_mask); + spin_unlock_irqrestore(&dom->aperture[i]->bitmap_lock, flags); if (address != -1) { address = dom->aperture[i]->offset + (address << PAGE_SHIFT); @@ -1602,6 +1609,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, { unsigned i = address >> APERTURE_RANGE_SHIFT; struct aperture_range *range = dom->aperture[i]; + unsigned long flags; BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL); @@ -1615,7 +1623,9 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; + spin_lock_irqsave(&range->bitmap_lock, flags); bitmap_clear(range->bitmap, address, pages); + spin_unlock_irqrestore(&range->bitmap_lock, flags); } -- cgit v1.2.3 From 53b3b65aa5befe9e96e8f8708a76208190a07e14 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 13:14:52 +0100 Subject: iommu/amd: Flush IOMMU TLB on __map_single error path There have been present PTEs which in theory could have made it to the IOMMU TLB. Flush the addresses out on the error path to make sure no stale entries remain. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 8ff33314a668..42c0a81ba9b3 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2493,6 +2493,8 @@ out_unmap: dma_ops_domain_unmap(dma_dom, start); } + domain_flush_pages(&dma_dom->domain, address, size); + dma_ops_free_addresses(dma_dom, address, pages); return DMA_ERROR_CODE; -- cgit v1.2.3 From 84b3a0bc88534d9e49d1642957f64db61a3aa5c4 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 13:23:59 +0100 Subject: iommu/amd: Flush the IOMMU TLB before the addresses are freed This allows to keep the bitmap_lock only for a very short period of time. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 42c0a81ba9b3..69021ec79da9 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2527,14 +2527,14 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, start += PAGE_SIZE; } - SUB_STATS_COUNTER(alloced_io_mem, size); - - dma_ops_free_addresses(dma_dom, dma_addr, pages); - if (amd_iommu_unmap_flush || dma_dom->need_flush) { domain_flush_pages(&dma_dom->domain, flush_addr, size); dma_dom->need_flush = false; } + + SUB_STATS_COUNTER(alloced_io_mem, size); + + dma_ops_free_addresses(dma_dom, dma_addr, pages); } /* -- cgit v1.2.3 From b57c3c802e51e83620b739759c8bba829e231b57 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 15:40:38 +0100 Subject: iommu/amd: Pass correct shift to iommu_area_alloc() The page-offset of the aperture must be passed instead of 0. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 69021ec79da9..1d1ef374a5a8 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1551,7 +1551,7 @@ static unsigned long dma_ops_area_alloc(struct device *dev, spin_lock_irqsave(&dom->aperture[i]->bitmap_lock, flags); address = iommu_area_alloc(dom->aperture[i]->bitmap, - limit, next_bit, pages, 0, + limit, next_bit, pages, offset, boundary_size, align_mask); spin_unlock_irqrestore(&dom->aperture[i]->bitmap_lock, flags); if (address != -1) { -- cgit v1.2.3 From a0f51447f4accd8f0f9420d8e617dd4e371504d7 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 16:20:09 +0100 Subject: iommu/amd: Add dma_ops_aperture_alloc() function Make this a wrapper around iommu_ops_area_alloc() for now and add more logic to this function later on. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 1d1ef374a5a8..be0e81aff3bc 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1518,6 +1518,28 @@ out_free: return -ENOMEM; } +static dma_addr_t dma_ops_aperture_alloc(struct aperture_range *range, + unsigned long pages, + unsigned long next_bit, + unsigned long dma_mask, + unsigned long boundary_size, + unsigned long align_mask) +{ + unsigned long offset, limit, flags; + dma_addr_t address; + + offset = range->offset >> PAGE_SHIFT; + limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset, + dma_mask >> PAGE_SHIFT); + + spin_lock_irqsave(&range->bitmap_lock, flags); + address = iommu_area_alloc(range->bitmap, limit, next_bit, pages, + offset, boundary_size, align_mask); + spin_unlock_irqrestore(&range->bitmap_lock, flags); + + return address; +} + static unsigned long dma_ops_area_alloc(struct device *dev, struct dma_ops_domain *dom, unsigned int pages, @@ -1530,8 +1552,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev, int i = start >> APERTURE_RANGE_SHIFT; unsigned long boundary_size, mask; unsigned long address = -1; - unsigned long limit; - unsigned long flags; next_bit >>= PAGE_SHIFT; @@ -1541,19 +1561,12 @@ static unsigned long dma_ops_area_alloc(struct device *dev, 1UL << (BITS_PER_LONG - PAGE_SHIFT); for (;i < max_index; ++i) { - unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT; - if (dom->aperture[i]->offset >= dma_mask) break; - limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset, - dma_mask >> PAGE_SHIFT); - - spin_lock_irqsave(&dom->aperture[i]->bitmap_lock, flags); - address = iommu_area_alloc(dom->aperture[i]->bitmap, - limit, next_bit, pages, offset, - boundary_size, align_mask); - spin_unlock_irqrestore(&dom->aperture[i]->bitmap_lock, flags); + address = dma_ops_aperture_alloc(dom->aperture[i], pages, + next_bit, dma_mask, + boundary_size, align_mask); if (address != -1) { address = dom->aperture[i]->offset + (address << PAGE_SHIFT); -- cgit v1.2.3 From ae62d49c7a9303de868f4925d020719d00686411 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 16:28:45 +0100 Subject: iommu/amd: Move aperture_range.offset to another cache-line Moving it before the pte_pages array puts in into the same cache-line as the spin-lock and the bitmap array pointer. This should safe a cache-miss. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index be0e81aff3bc..2a22515f5a8b 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -124,6 +124,7 @@ struct aperture_range { /* address allocation bitmap */ unsigned long *bitmap; + unsigned long offset; /* * Array of PTE pages for the aperture. In this array we save all the @@ -132,8 +133,6 @@ struct aperture_range { * just calculate its address in constant time. */ u64 *pte_pages[64]; - - unsigned long offset; }; /* -- cgit v1.2.3 From 60e6a7cb44edf7f7daef6ca00e1c84f85bdf8084 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 16:53:17 +0100 Subject: iommu/amd: Retry address allocation within one aperture Instead of skipping to the next aperture, first try again in the current one. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 2a22515f5a8b..58d7d82f005a 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -125,6 +125,7 @@ struct aperture_range { /* address allocation bitmap */ unsigned long *bitmap; unsigned long offset; + unsigned long next_bit; /* * Array of PTE pages for the aperture. In this array we save all the @@ -1519,7 +1520,6 @@ out_free: static dma_addr_t dma_ops_aperture_alloc(struct aperture_range *range, unsigned long pages, - unsigned long next_bit, unsigned long dma_mask, unsigned long boundary_size, unsigned long align_mask) @@ -1532,8 +1532,17 @@ static dma_addr_t dma_ops_aperture_alloc(struct aperture_range *range, dma_mask >> PAGE_SHIFT); spin_lock_irqsave(&range->bitmap_lock, flags); - address = iommu_area_alloc(range->bitmap, limit, next_bit, pages, - offset, boundary_size, align_mask); + address = iommu_area_alloc(range->bitmap, limit, range->next_bit, + pages, offset, boundary_size, align_mask); + if (address == -1) + /* Nothing found, retry one time */ + address = iommu_area_alloc(range->bitmap, limit, + 0, pages, offset, boundary_size, + align_mask); + + if (address != -1) + range->next_bit = address + pages; + spin_unlock_irqrestore(&range->bitmap_lock, flags); return address; @@ -1546,14 +1555,11 @@ static unsigned long dma_ops_area_alloc(struct device *dev, u64 dma_mask, unsigned long start) { - unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE; int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT; int i = start >> APERTURE_RANGE_SHIFT; - unsigned long boundary_size, mask; + unsigned long next_bit, boundary_size, mask; unsigned long address = -1; - next_bit >>= PAGE_SHIFT; - mask = dma_get_seg_boundary(dev); boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT : @@ -1563,9 +1569,11 @@ static unsigned long dma_ops_area_alloc(struct device *dev, if (dom->aperture[i]->offset >= dma_mask) break; + next_bit = dom->aperture[i]->next_bit; + address = dma_ops_aperture_alloc(dom->aperture[i], pages, - next_bit, dma_mask, - boundary_size, align_mask); + dma_mask, boundary_size, + align_mask); if (address != -1) { address = dom->aperture[i]->offset + (address << PAGE_SHIFT); @@ -1573,7 +1581,8 @@ static unsigned long dma_ops_area_alloc(struct device *dev, break; } - next_bit = 0; + if (next_bit > dom->aperture[i]->next_bit) + dom->need_flush = true; } return address; -- cgit v1.2.3 From ccb50e03da72d943897e4629ec959c5ea0588ad8 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 17:49:34 +0100 Subject: iommu/amd: Flush iommu tlb in dma_ops_aperture_alloc() Since the allocator wraparound happens in this function now, flush the iommu tlb there too. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 58d7d82f005a..eb11996e310f 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1518,7 +1518,8 @@ out_free: return -ENOMEM; } -static dma_addr_t dma_ops_aperture_alloc(struct aperture_range *range, +static dma_addr_t dma_ops_aperture_alloc(struct dma_ops_domain *dom, + struct aperture_range *range, unsigned long pages, unsigned long dma_mask, unsigned long boundary_size, @@ -1526,6 +1527,7 @@ static dma_addr_t dma_ops_aperture_alloc(struct aperture_range *range, { unsigned long offset, limit, flags; dma_addr_t address; + bool flush = false; offset = range->offset >> PAGE_SHIFT; limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset, @@ -1534,17 +1536,24 @@ static dma_addr_t dma_ops_aperture_alloc(struct aperture_range *range, spin_lock_irqsave(&range->bitmap_lock, flags); address = iommu_area_alloc(range->bitmap, limit, range->next_bit, pages, offset, boundary_size, align_mask); - if (address == -1) + if (address == -1) { /* Nothing found, retry one time */ address = iommu_area_alloc(range->bitmap, limit, 0, pages, offset, boundary_size, align_mask); + flush = true; + } if (address != -1) range->next_bit = address + pages; spin_unlock_irqrestore(&range->bitmap_lock, flags); + if (flush) { + domain_flush_tlb(&dom->domain); + domain_flush_complete(&dom->domain); + } + return address; } @@ -1566,12 +1575,14 @@ static unsigned long dma_ops_area_alloc(struct device *dev, 1UL << (BITS_PER_LONG - PAGE_SHIFT); for (;i < max_index; ++i) { - if (dom->aperture[i]->offset >= dma_mask) + struct aperture_range *range = dom->aperture[i]; + + if (range->offset >= dma_mask) break; - next_bit = dom->aperture[i]->next_bit; + next_bit = range->next_bit; - address = dma_ops_aperture_alloc(dom->aperture[i], pages, + address = dma_ops_aperture_alloc(dom, dom->aperture[i], pages, dma_mask, boundary_size, align_mask); if (address != -1) { -- cgit v1.2.3 From 05ab49e0056a702b98345a548cc888be0ba9ddf8 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 17:58:26 +0100 Subject: iommu/amd: Remove 'start' parameter from dma_ops_area_alloc Parameter is not needed because the value is part of the already passed in struct dma_ops_domain. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index eb11996e310f..2962c62a8377 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1561,11 +1561,10 @@ static unsigned long dma_ops_area_alloc(struct device *dev, struct dma_ops_domain *dom, unsigned int pages, unsigned long align_mask, - u64 dma_mask, - unsigned long start) + u64 dma_mask) { int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT; - int i = start >> APERTURE_RANGE_SHIFT; + int i = dom->next_address >> APERTURE_RANGE_SHIFT; unsigned long next_bit, boundary_size, mask; unsigned long address = -1; @@ -1612,13 +1611,12 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, dom->need_flush = true; #endif - address = dma_ops_area_alloc(dev, dom, pages, align_mask, - dma_mask, dom->next_address); + address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask); if (address == -1) { dom->next_address = 0; address = dma_ops_area_alloc(dev, dom, pages, align_mask, - dma_mask, 0); + dma_mask); dom->need_flush = true; } -- cgit v1.2.3 From ebaecb423bfa0f88487aa98238c89fd3df9734dc Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 18:11:32 +0100 Subject: iommu/amd: Rename dma_ops_domain->next_address to next_index It points to the next aperture index to allocate from. We don't need the full address anymore because this is now tracked in struct aperture_range. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 2962c62a8377..a26cd76588cd 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -146,8 +146,8 @@ struct dma_ops_domain { /* size of the aperture for the mappings */ unsigned long aperture_size; - /* address we start to search for free addresses */ - unsigned long next_address; + /* aperture index we start searching for free addresses */ + unsigned long next_index; /* address space relevant data */ struct aperture_range *aperture[APERTURE_MAX_RANGES]; @@ -1564,9 +1564,9 @@ static unsigned long dma_ops_area_alloc(struct device *dev, u64 dma_mask) { int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT; - int i = dom->next_address >> APERTURE_RANGE_SHIFT; unsigned long next_bit, boundary_size, mask; unsigned long address = -1; + int i = dom->next_index; mask = dma_get_seg_boundary(dev); @@ -1587,7 +1587,7 @@ static unsigned long dma_ops_area_alloc(struct device *dev, if (address != -1) { address = dom->aperture[i]->offset + (address << PAGE_SHIFT); - dom->next_address = address + (pages << PAGE_SHIFT); + dom->next_index = i; break; } @@ -1607,14 +1607,14 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, unsigned long address; #ifdef CONFIG_IOMMU_STRESS - dom->next_address = 0; + dom->next_index = 0; dom->need_flush = true; #endif address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask); if (address == -1) { - dom->next_address = 0; + dom->next_index = 0; address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask); dom->need_flush = true; @@ -1648,7 +1648,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, return; #endif - if (address >= dom->next_address) + if ((address >> APERTURE_RANGE_SHIFT) >= dom->next_index) dom->need_flush = true; address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; @@ -1884,7 +1884,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) * a valid dma-address. So we can use 0 as error value */ dma_dom->aperture[0]->bitmap[0] = 1; - dma_dom->next_address = 0; + dma_dom->next_index = 0; return dma_dom; @@ -2477,15 +2477,15 @@ retry: address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, dma_mask); if (unlikely(address == DMA_ERROR_CODE)) { + if (alloc_new_range(dma_dom, false, GFP_ATOMIC)) + goto out; + /* - * setting next_address here will let the address + * setting next_index here will let the address * allocator only scan the new allocated range in the * first run. This is a small optimization. */ - dma_dom->next_address = dma_dom->aperture_size; - - if (alloc_new_range(dma_dom, false, GFP_ATOMIC)) - goto out; + dma_dom->next_index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; /* * aperture was successfully enlarged by 128 MB, try -- cgit v1.2.3 From d41ab09896dcfc517a7aa050b5c8563b5682a71d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 18:20:03 +0100 Subject: iommu/amd: Flush iommu tlb in dma_ops_free_addresses Instead of setting need_flush, do the flush directly in dma_ops_free_addresses. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index a26cd76588cd..62a407947b02 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1648,8 +1648,10 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, return; #endif - if ((address >> APERTURE_RANGE_SHIFT) >= dom->next_index) - dom->need_flush = true; + if (address + pages > range->next_bit) { + domain_flush_tlb(&dom->domain); + domain_flush_complete(&dom->domain); + } address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; -- cgit v1.2.3 From 2a87442c5b9858cbfc43eb17da4331551d578d25 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 18:34:47 +0100 Subject: iommu/amd: Iterate over all aperture ranges in dma_ops_area_alloc This way we don't need to care about the next_index wrapping around in dma_ops_alloc_addresses. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 62a407947b02..faf51a066e98 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1563,35 +1563,36 @@ static unsigned long dma_ops_area_alloc(struct device *dev, unsigned long align_mask, u64 dma_mask) { - int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT; unsigned long next_bit, boundary_size, mask; unsigned long address = -1; - int i = dom->next_index; + int start = dom->next_index; + int i; mask = dma_get_seg_boundary(dev); boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT : 1UL << (BITS_PER_LONG - PAGE_SHIFT); - for (;i < max_index; ++i) { - struct aperture_range *range = dom->aperture[i]; + for (i = 0; i < APERTURE_MAX_RANGES; ++i) { + struct aperture_range *range; + + range = dom->aperture[(start + i) % APERTURE_MAX_RANGES]; - if (range->offset >= dma_mask) - break; + if (!range || range->offset >= dma_mask) + continue; next_bit = range->next_bit; - address = dma_ops_aperture_alloc(dom, dom->aperture[i], pages, + address = dma_ops_aperture_alloc(dom, range, pages, dma_mask, boundary_size, align_mask); if (address != -1) { - address = dom->aperture[i]->offset + - (address << PAGE_SHIFT); + address = range->offset + (address << PAGE_SHIFT); dom->next_index = i; break; } - if (next_bit > dom->aperture[i]->next_bit) + if (next_bit > range->next_bit) dom->need_flush = true; } @@ -1613,13 +1614,6 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask); - if (address == -1) { - dom->next_index = 0; - address = dma_ops_area_alloc(dev, dom, pages, align_mask, - dma_mask); - dom->need_flush = true; - } - if (unlikely(address == -1)) address = DMA_ERROR_CODE; -- cgit v1.2.3 From ab7032bb9c37f9d36ade2267a01a6edf8f2d41d7 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 18:47:11 +0100 Subject: iommu/amd: Remove need_flush from struct dma_ops_domain The flushing of iommu tlbs is now done on a per-range basis. So there is no need anymore for domain-wide flush tracking. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 30 ++++++------------------------ 1 file changed, 6 insertions(+), 24 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index faf51a066e98..39a2048a6cd2 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -151,9 +151,6 @@ struct dma_ops_domain { /* address space relevant data */ struct aperture_range *aperture[APERTURE_MAX_RANGES]; - - /* This will be set to true when TLB needs to be flushed */ - bool need_flush; }; /**************************************************************************** @@ -1563,7 +1560,7 @@ static unsigned long dma_ops_area_alloc(struct device *dev, unsigned long align_mask, u64 dma_mask) { - unsigned long next_bit, boundary_size, mask; + unsigned long boundary_size, mask; unsigned long address = -1; int start = dom->next_index; int i; @@ -1581,8 +1578,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev, if (!range || range->offset >= dma_mask) continue; - next_bit = range->next_bit; - address = dma_ops_aperture_alloc(dom, range, pages, dma_mask, boundary_size, align_mask); @@ -1591,9 +1586,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev, dom->next_index = i; break; } - - if (next_bit > range->next_bit) - dom->need_flush = true; } return address; @@ -1609,7 +1601,6 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, #ifdef CONFIG_IOMMU_STRESS dom->next_index = 0; - dom->need_flush = true; #endif address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask); @@ -1642,7 +1633,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, return; #endif - if (address + pages > range->next_bit) { + if (amd_iommu_unmap_flush || + (address + pages > range->next_bit)) { domain_flush_tlb(&dom->domain); domain_flush_complete(&dom->domain); } @@ -1868,8 +1860,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) if (!dma_dom->domain.pt_root) goto free_dma_dom; - dma_dom->need_flush = false; - add_domain_to_list(&dma_dom->domain); if (alloc_new_range(dma_dom, true, GFP_KERNEL)) @@ -2503,11 +2493,10 @@ retry: ADD_STATS_COUNTER(alloced_io_mem, size); - if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { - domain_flush_tlb(&dma_dom->domain); - dma_dom->need_flush = false; - } else if (unlikely(amd_iommu_np_cache)) + if (unlikely(amd_iommu_np_cache)) { domain_flush_pages(&dma_dom->domain, address, size); + domain_flush_complete(&dma_dom->domain); + } out: return address; @@ -2519,8 +2508,6 @@ out_unmap: dma_ops_domain_unmap(dma_dom, start); } - domain_flush_pages(&dma_dom->domain, address, size); - dma_ops_free_addresses(dma_dom, address, pages); return DMA_ERROR_CODE; @@ -2553,11 +2540,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, start += PAGE_SIZE; } - if (amd_iommu_unmap_flush || dma_dom->need_flush) { - domain_flush_pages(&dma_dom->domain, flush_addr, size); - dma_dom->need_flush = false; - } - SUB_STATS_COUNTER(alloced_io_mem, size); dma_ops_free_addresses(dma_dom, dma_addr, pages); -- cgit v1.2.3 From 4eeca8c5e72fad752eba9efc293c924d65faa86e Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 22 Dec 2015 12:15:35 +0100 Subject: iommu/amd: Optimize dma_ops_free_addresses Don't flush the iommu tlb when we free something behind the current next_bit pointer. Update the next_bit pointer instead and let the flush happen on the next wraparound in the allocation path. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 39a2048a6cd2..c657e48f0aed 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1633,8 +1633,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, return; #endif - if (amd_iommu_unmap_flush || - (address + pages > range->next_bit)) { + if (amd_iommu_unmap_flush) { domain_flush_tlb(&dom->domain); domain_flush_complete(&dom->domain); } @@ -1642,6 +1641,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; spin_lock_irqsave(&range->bitmap_lock, flags); + if (address + pages > range->next_bit) + range->next_bit = address + pages; bitmap_clear(range->bitmap, address, pages); spin_unlock_irqrestore(&range->bitmap_lock, flags); -- cgit v1.2.3 From 266a3bd28f9842bac54f934df8dc9834799efbff Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 18:54:24 +0100 Subject: iommu/amd: Allocate new aperture ranges in dma_ops_alloc_addresses It really belongs there and not in __map_single. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index c657e48f0aed..4c926dadb281 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1597,13 +1597,19 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, unsigned long align_mask, u64 dma_mask) { - unsigned long address; + unsigned long address = -1; #ifdef CONFIG_IOMMU_STRESS dom->next_index = 0; #endif - address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask); + while (address == -1) { + address = dma_ops_area_alloc(dev, dom, pages, + align_mask, dma_mask); + + if (address == -1 && alloc_new_range(dom, true, GFP_ATOMIC)) + break; + } if (unlikely(address == -1)) address = DMA_ERROR_CODE; @@ -2460,26 +2466,11 @@ static dma_addr_t __map_single(struct device *dev, if (align) align_mask = (1UL << get_order(size)) - 1; -retry: address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, dma_mask); - if (unlikely(address == DMA_ERROR_CODE)) { - if (alloc_new_range(dma_dom, false, GFP_ATOMIC)) - goto out; - - /* - * setting next_index here will let the address - * allocator only scan the new allocated range in the - * first run. This is a small optimization. - */ - dma_dom->next_index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; - /* - * aperture was successfully enlarged by 128 MB, try - * allocation again - */ - goto retry; - } + if (address == DMA_ERROR_CODE) + goto out; start = address; for (i = 0; i < pages; ++i) { -- cgit v1.2.3 From 7bfa5bd2708d096c79fc2c9f32de478ade7a013f Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 19:07:50 +0100 Subject: iommu/amd: Build io page-tables with cmpxchg64 This allows to build up the page-tables without holding any locks. As a consequence it removes the need to pre-populate dma_ops page-tables. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 4c926dadb281..ecdd3f7dfb89 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1206,11 +1206,21 @@ static u64 *alloc_pte(struct protection_domain *domain, end_lvl = PAGE_SIZE_LEVEL(page_size); while (level > end_lvl) { - if (!IOMMU_PTE_PRESENT(*pte)) { + u64 __pte, __npte; + + __pte = *pte; + + if (!IOMMU_PTE_PRESENT(__pte)) { page = (u64 *)get_zeroed_page(gfp); if (!page) return NULL; - *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); + + __npte = PM_LEVEL_PDE(level, virt_to_phys(page)); + + if (cmpxchg64(pte, __pte, __npte)) { + free_page((unsigned long)page); + continue; + } } /* No level skipping support yet */ @@ -1607,7 +1617,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask); - if (address == -1 && alloc_new_range(dom, true, GFP_ATOMIC)) + if (address == -1 && alloc_new_range(dom, false, GFP_ATOMIC)) break; } -- cgit v1.2.3 From a73c1566655df0631de5070695e7a56cd3896fdc Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 19:25:56 +0100 Subject: iommu/amd: Initialize new aperture range before making it visible Make sure the aperture range is fully initialized before it is visible to the address allocator. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index ecdd3f7dfb89..11ee8855a2e0 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1425,8 +1425,10 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, bool populate, gfp_t gfp) { int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; - struct amd_iommu *iommu; unsigned long i, old_size, pte_pgsize; + struct aperture_range *range; + struct amd_iommu *iommu; + unsigned long flags; #ifdef CONFIG_IOMMU_STRESS populate = false; @@ -1435,17 +1437,17 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, if (index >= APERTURE_MAX_RANGES) return -ENOMEM; - dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp); - if (!dma_dom->aperture[index]) + range = kzalloc(sizeof(struct aperture_range), gfp); + if (!range) return -ENOMEM; - dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp); - if (!dma_dom->aperture[index]->bitmap) + range->bitmap = (void *)get_zeroed_page(gfp); + if (!range->bitmap) goto out_free; - dma_dom->aperture[index]->offset = dma_dom->aperture_size; + range->offset = dma_dom->aperture_size; - spin_lock_init(&dma_dom->aperture[index]->bitmap_lock); + spin_lock_init(&range->bitmap_lock); if (populate) { unsigned long address = dma_dom->aperture_size; @@ -1458,14 +1460,18 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, if (!pte) goto out_free; - dma_dom->aperture[index]->pte_pages[i] = pte_page; + range->pte_pages[i] = pte_page; address += APERTURE_RANGE_SIZE / 64; } } - old_size = dma_dom->aperture_size; - dma_dom->aperture_size += APERTURE_RANGE_SIZE; + /* First take the bitmap_lock and then publish the range */ + spin_lock_irqsave(&range->bitmap_lock, flags); + + old_size = dma_dom->aperture_size; + dma_dom->aperture[index] = range; + dma_dom->aperture_size += APERTURE_RANGE_SIZE; /* Reserve address range used for MSI messages */ if (old_size < MSI_ADDR_BASE_LO && @@ -1512,15 +1518,16 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, update_domain(&dma_dom->domain); + spin_unlock_irqrestore(&range->bitmap_lock, flags); + return 0; out_free: update_domain(&dma_dom->domain); - free_page((unsigned long)dma_dom->aperture[index]->bitmap); + free_page((unsigned long)range->bitmap); - kfree(dma_dom->aperture[index]); - dma_dom->aperture[index] = NULL; + kfree(range); return -ENOMEM; } -- cgit v1.2.3 From 92d420ec028d24f18ce194149bd0a0a7c42029a3 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 21 Dec 2015 19:31:33 +0100 Subject: iommu/amd: Relax locking in dma_ops path Remove the long holding times of the domain->lock and rely on the bitmap_lock instead. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 70 ++++++++--------------------------------------- 1 file changed, 11 insertions(+), 59 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 11ee8855a2e0..e98a466faabf 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1466,8 +1466,10 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, } } + spin_lock_irqsave(&dma_dom->domain.lock, flags); + /* First take the bitmap_lock and then publish the range */ - spin_lock_irqsave(&range->bitmap_lock, flags); + spin_lock(&range->bitmap_lock); old_size = dma_dom->aperture_size; dma_dom->aperture[index] = range; @@ -1518,7 +1520,9 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, update_domain(&dma_dom->domain); - spin_unlock_irqrestore(&range->bitmap_lock, flags); + spin_unlock(&range->bitmap_lock); + + spin_unlock_irqrestore(&dma_dom->domain.lock, flags); return 0; @@ -2562,11 +2566,9 @@ static dma_addr_t map_page(struct device *dev, struct page *page, enum dma_data_direction dir, struct dma_attrs *attrs) { - unsigned long flags; + phys_addr_t paddr = page_to_phys(page) + offset; struct protection_domain *domain; - dma_addr_t addr; u64 dma_mask; - phys_addr_t paddr = page_to_phys(page) + offset; INC_STATS_COUNTER(cnt_map_single); @@ -2578,19 +2580,8 @@ static dma_addr_t map_page(struct device *dev, struct page *page, dma_mask = *dev->dma_mask; - spin_lock_irqsave(&domain->lock, flags); - - addr = __map_single(dev, domain->priv, paddr, size, dir, false, + return __map_single(dev, domain->priv, paddr, size, dir, false, dma_mask); - if (addr == DMA_ERROR_CODE) - goto out; - - domain_flush_complete(domain); - -out: - spin_unlock_irqrestore(&domain->lock, flags); - - return addr; } /* @@ -2599,7 +2590,6 @@ out: static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - unsigned long flags; struct protection_domain *domain; INC_STATS_COUNTER(cnt_unmap_single); @@ -2608,13 +2598,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, if (IS_ERR(domain)) return; - spin_lock_irqsave(&domain->lock, flags); - __unmap_single(domain->priv, dma_addr, size, dir); - - domain_flush_complete(domain); - - spin_unlock_irqrestore(&domain->lock, flags); } /* @@ -2625,7 +2609,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { - unsigned long flags; struct protection_domain *domain; int i; struct scatterlist *s; @@ -2641,8 +2624,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, dma_mask = *dev->dma_mask; - spin_lock_irqsave(&domain->lock, flags); - for_each_sg(sglist, s, nelems, i) { paddr = sg_phys(s); @@ -2657,12 +2638,8 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, goto unmap; } - domain_flush_complete(domain); - -out: - spin_unlock_irqrestore(&domain->lock, flags); - return mapped_elems; + unmap: for_each_sg(sglist, s, mapped_elems, i) { if (s->dma_address) @@ -2671,9 +2648,7 @@ unmap: s->dma_address = s->dma_length = 0; } - mapped_elems = 0; - - goto out; + return 0; } /* @@ -2684,7 +2659,6 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { - unsigned long flags; struct protection_domain *domain; struct scatterlist *s; int i; @@ -2695,17 +2669,11 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, if (IS_ERR(domain)) return; - spin_lock_irqsave(&domain->lock, flags); - for_each_sg(sglist, s, nelems, i) { __unmap_single(domain->priv, s->dma_address, s->dma_length, dir); s->dma_address = s->dma_length = 0; } - - domain_flush_complete(domain); - - spin_unlock_irqrestore(&domain->lock, flags); } /* @@ -2717,7 +2685,6 @@ static void *alloc_coherent(struct device *dev, size_t size, { u64 dma_mask = dev->coherent_dma_mask; struct protection_domain *domain; - unsigned long flags; struct page *page; INC_STATS_COUNTER(cnt_alloc_coherent); @@ -2749,19 +2716,11 @@ static void *alloc_coherent(struct device *dev, size_t size, if (!dma_mask) dma_mask = *dev->dma_mask; - spin_lock_irqsave(&domain->lock, flags); - *dma_addr = __map_single(dev, domain->priv, page_to_phys(page), size, DMA_BIDIRECTIONAL, true, dma_mask); - if (*dma_addr == DMA_ERROR_CODE) { - spin_unlock_irqrestore(&domain->lock, flags); + if (*dma_addr == DMA_ERROR_CODE) goto out_free; - } - - domain_flush_complete(domain); - - spin_unlock_irqrestore(&domain->lock, flags); return page_address(page); @@ -2781,7 +2740,6 @@ static void free_coherent(struct device *dev, size_t size, struct dma_attrs *attrs) { struct protection_domain *domain; - unsigned long flags; struct page *page; INC_STATS_COUNTER(cnt_free_coherent); @@ -2793,14 +2751,8 @@ static void free_coherent(struct device *dev, size_t size, if (IS_ERR(domain)) goto free_mem; - spin_lock_irqsave(&domain->lock, flags); - __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); - domain_flush_complete(domain); - - spin_unlock_irqrestore(&domain->lock, flags); - free_mem: if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) __free_pages(page, get_order(size)); -- cgit v1.2.3 From 5f6bed50050625f1b76fa2c0dfd0ce887cb0420a Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 22 Dec 2015 13:34:22 +0100 Subject: iommu/amd: Make dma_ops_domain->next_index percpu Make this pointer percpu so that we start searching for new addresses in the range we last stopped and which is has a higher probability of being still in the cache. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index e98a466faabf..84c7da17895f 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -147,7 +148,7 @@ struct dma_ops_domain { unsigned long aperture_size; /* aperture index we start searching for free addresses */ - unsigned long next_index; + u32 __percpu *next_index; /* address space relevant data */ struct aperture_range *aperture[APERTURE_MAX_RANGES]; @@ -1583,18 +1584,30 @@ static unsigned long dma_ops_area_alloc(struct device *dev, { unsigned long boundary_size, mask; unsigned long address = -1; - int start = dom->next_index; - int i; + u32 start, i; + + preempt_disable(); mask = dma_get_seg_boundary(dev); + start = this_cpu_read(*dom->next_index); + + /* Sanity check - is it really necessary? */ + if (unlikely(start > APERTURE_MAX_RANGES)) { + start = 0; + this_cpu_write(*dom->next_index, 0); + } + boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT : 1UL << (BITS_PER_LONG - PAGE_SHIFT); for (i = 0; i < APERTURE_MAX_RANGES; ++i) { struct aperture_range *range; + int index; + + index = (start + i) % APERTURE_MAX_RANGES; - range = dom->aperture[(start + i) % APERTURE_MAX_RANGES]; + range = dom->aperture[index]; if (!range || range->offset >= dma_mask) continue; @@ -1604,11 +1617,13 @@ static unsigned long dma_ops_area_alloc(struct device *dev, align_mask); if (address != -1) { address = range->offset + (address << PAGE_SHIFT); - dom->next_index = i; + this_cpu_write(*dom->next_index, index); break; } } + preempt_enable(); + return address; } @@ -1620,10 +1635,6 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, { unsigned long address = -1; -#ifdef CONFIG_IOMMU_STRESS - dom->next_index = 0; -#endif - while (address == -1) { address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask); @@ -1851,6 +1862,8 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) if (!dom) return; + free_percpu(dom->next_index); + del_domain_from_list(&dom->domain); free_pagetable(&dom->domain); @@ -1873,6 +1886,7 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) static struct dma_ops_domain *dma_ops_domain_alloc(void) { struct dma_ops_domain *dma_dom; + int cpu; dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); if (!dma_dom) @@ -1881,6 +1895,10 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) if (protection_domain_init(&dma_dom->domain)) goto free_dma_dom; + dma_dom->next_index = alloc_percpu(u32); + if (!dma_dom->next_index) + goto free_dma_dom; + dma_dom->domain.mode = PAGE_MODE_2_LEVEL; dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); dma_dom->domain.flags = PD_DMA_OPS_MASK; @@ -1898,8 +1916,9 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) * a valid dma-address. So we can use 0 as error value */ dma_dom->aperture[0]->bitmap[0] = 1; - dma_dom->next_index = 0; + for_each_possible_cpu(cpu) + *per_cpu_ptr(dma_dom->next_index, cpu) = 0; return dma_dom; -- cgit v1.2.3 From 7b5e25b84eba25e78bce736a5c99649f7ff0f5c2 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 22 Dec 2015 13:38:12 +0100 Subject: iommu/amd: Use trylock to aquire bitmap_lock First search for a non-contended aperture with trylock before spinning. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 84c7da17895f..eed355c5211d 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1542,7 +1542,8 @@ static dma_addr_t dma_ops_aperture_alloc(struct dma_ops_domain *dom, unsigned long pages, unsigned long dma_mask, unsigned long boundary_size, - unsigned long align_mask) + unsigned long align_mask, + bool trylock) { unsigned long offset, limit, flags; dma_addr_t address; @@ -1552,7 +1553,13 @@ static dma_addr_t dma_ops_aperture_alloc(struct dma_ops_domain *dom, limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset, dma_mask >> PAGE_SHIFT); - spin_lock_irqsave(&range->bitmap_lock, flags); + if (trylock) { + if (!spin_trylock_irqsave(&range->bitmap_lock, flags)) + return -1; + } else { + spin_lock_irqsave(&range->bitmap_lock, flags); + } + address = iommu_area_alloc(range->bitmap, limit, range->next_bit, pages, offset, boundary_size, align_mask); if (address == -1) { @@ -1584,12 +1591,14 @@ static unsigned long dma_ops_area_alloc(struct device *dev, { unsigned long boundary_size, mask; unsigned long address = -1; + bool first = true; u32 start, i; preempt_disable(); mask = dma_get_seg_boundary(dev); +again: start = this_cpu_read(*dom->next_index); /* Sanity check - is it really necessary? */ @@ -1614,7 +1623,7 @@ static unsigned long dma_ops_area_alloc(struct device *dev, address = dma_ops_aperture_alloc(dom, range, pages, dma_mask, boundary_size, - align_mask); + align_mask, first); if (address != -1) { address = range->offset + (address << PAGE_SHIFT); this_cpu_write(*dom->next_index, index); @@ -1622,6 +1631,11 @@ static unsigned long dma_ops_area_alloc(struct device *dev, } } + if (address == -1 && first) { + first = false; + goto again; + } + preempt_enable(); return address; -- cgit v1.2.3 From a639a8eecf2be962b5dcd38dedd8b6bd10c2354b Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 22 Dec 2015 16:06:49 +0100 Subject: iommu/amd: Preallocate dma_ops apertures based on dma_mask Preallocate between 4 and 8 apertures when a device gets it dma_mask. With more apertures we reduce the lock contention of the domain lock significantly. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 60 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 7 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index eed355c5211d..6f6502d9fd67 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1892,6 +1892,23 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) kfree(dom); } +static int dma_ops_domain_alloc_apertures(struct dma_ops_domain *dma_dom, + int max_apertures) +{ + int ret, i, apertures; + + apertures = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; + ret = 0; + + for (i = apertures; i < max_apertures; ++i) { + ret = alloc_new_range(dma_dom, false, GFP_KERNEL); + if (ret) + break; + } + + return ret; +} + /* * Allocates a new protection domain usable for the dma_ops functions. * It also initializes the page table and the address allocator data @@ -2800,14 +2817,43 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) return check_device(dev); } +static int set_dma_mask(struct device *dev, u64 mask) +{ + struct protection_domain *domain; + int max_apertures = 1; + + domain = get_domain(dev); + if (IS_ERR(domain)) + return PTR_ERR(domain); + + if (mask == DMA_BIT_MASK(64)) + max_apertures = 8; + else if (mask > DMA_BIT_MASK(32)) + max_apertures = 4; + + /* + * To prevent lock contention it doesn't make sense to allocate more + * apertures than online cpus + */ + if (max_apertures > num_online_cpus()) + max_apertures = num_online_cpus(); + + if (dma_ops_domain_alloc_apertures(domain->priv, max_apertures)) + dev_err(dev, "Can't allocate %d iommu apertures\n", + max_apertures); + + return 0; +} + static struct dma_map_ops amd_iommu_dma_ops = { - .alloc = alloc_coherent, - .free = free_coherent, - .map_page = map_page, - .unmap_page = unmap_page, - .map_sg = map_sg, - .unmap_sg = unmap_sg, - .dma_supported = amd_iommu_dma_supported, + .alloc = alloc_coherent, + .free = free_coherent, + .map_page = map_page, + .unmap_page = unmap_page, + .map_sg = map_sg, + .unmap_sg = unmap_sg, + .dma_supported = amd_iommu_dma_supported, + .set_dma_mask = set_dma_mask, }; int __init amd_iommu_init_api(void) -- cgit v1.2.3 From 1fb260bc003d7c6196fc0ee3a169a2f6495d17fe Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 7 Jan 2016 12:36:06 +0300 Subject: iommu/amd: Remove an unneeded condition get_device_id() returns an unsigned short device id. It never fails and it never returns a negative so we can remove this condition. Signed-off-by: Dan Carpenter Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 6f6502d9fd67..539b0dea8034 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3857,11 +3857,9 @@ static struct irq_domain *get_irq_domain(struct irq_alloc_info *info) case X86_IRQ_ALLOC_TYPE_MSI: case X86_IRQ_ALLOC_TYPE_MSIX: devid = get_device_id(&info->msi_dev->dev); - if (devid >= 0) { - iommu = amd_iommu_rlookup_table[devid]; - if (iommu) - return iommu->msi_domain; - } + iommu = amd_iommu_rlookup_table[devid]; + if (iommu) + return iommu->msi_domain; break; default: break; -- cgit v1.2.3 From 592033790e8276f7038efb480871598648464a01 Mon Sep 17 00:00:00 2001 From: Nicholas Krause Date: Mon, 4 Jan 2016 18:27:57 -0500 Subject: iommu/vt-d: Check the return value of iommu_device_create() This adds the proper check to alloc_iommu to make sure that the call to iommu_device_create has completed successfully and if not return the error code to the caller after freeing up resources allocated previously. Signed-off-by: Nicholas Krause Signed-off-by: Joerg Roedel --- drivers/iommu/dmar.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 80e3c176008e..add177a37f00 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1070,6 +1070,12 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) intel_iommu_groups, "%s", iommu->name); + if (IS_ERR(iommu->iommu_dev)) { + drhd->iommu = NULL; + err = PTR_ERR(iommu->iommu_dev); + goto err_unmap; + } + return 0; err_unmap: -- cgit v1.2.3 From bc8474549e94efddb5d5791af0a015d13115b67b Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 7 Jan 2016 12:16:51 +0100 Subject: iommu/vt-d: Fix up error handling in alloc_iommu Only check for error when iommu->iommu_dev has been assigned and only assign drhd->iommu when the function can't fail anymore. Signed-off-by: Joerg Roedel --- drivers/iommu/dmar.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index add177a37f00..62a400c5ba06 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1063,19 +1063,19 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) raw_spin_lock_init(&iommu->register_lock); - drhd->iommu = iommu; - - if (intel_iommu_enabled) + if (intel_iommu_enabled) { iommu->iommu_dev = iommu_device_create(NULL, iommu, intel_iommu_groups, "%s", iommu->name); - if (IS_ERR(iommu->iommu_dev)) { - drhd->iommu = NULL; - err = PTR_ERR(iommu->iommu_dev); - goto err_unmap; + if (IS_ERR(iommu->iommu_dev)) { + err = PTR_ERR(iommu->iommu_dev); + goto err_unmap; + } } + drhd->iommu = iommu; + return 0; err_unmap: -- cgit v1.2.3