From a5604f260e68a45510ef773ce36ce301d1f6d102 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 28 Jul 2016 02:09:53 +0000 Subject: iommu/amd: Fix non static symbol warning Fixes the following sparse warning: drivers/iommu/amd_iommu.c:106:1: warning: symbol '__pcpu_scope_flush_queue' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 96de97a46079..bf5ec0c8f95e 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -103,7 +103,7 @@ struct flush_queue { struct flush_queue_entry *entries; }; -DEFINE_PER_CPU(struct flush_queue, flush_queue); +static DEFINE_PER_CPU(struct flush_queue, flush_queue); static atomic_t queue_timer_on; static struct timer_list queue_timer; -- cgit v1.2.3 From 517abe495df7279f3a45ae01ed709e07d384309e Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 28 Jul 2016 02:10:26 +0000 Subject: iommu/amd: Fix error return code in irq_remapping_alloc() Fix to return a negative error code from the alloc_irq_index() error handling case instead of 0, as done elsewhere in this function. Signed-off-by: Wei Yongjun Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index bf5ec0c8f95e..2bcaca53aba5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3886,6 +3886,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, } if (index < 0) { pr_warn("Failed to allocate IRTE\n"); + ret = index; goto out_free_parent; } -- cgit v1.2.3 From 423595e89ddf9b657812a33932a5e2ad1bb9dd54 Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Mon, 1 Aug 2016 11:48:38 +0530 Subject: iommu/exynos: Fix error handling for of_platform_device_create of_platform_device_create returns NULL on error so an IS_ERR test is incorrect here and a NULL check is required. The Coccinelle semantic patch used to make this change is as follows: @@ expression e; @@ e = of_platform_device_create(...); if( - IS_ERR(e) + !e ) { <+... return - PTR_ERR(e) + -ENODEV ; ...+> } Signed-off-by: Amitoj Kaur Chawla Signed-off-by: Joerg Roedel --- drivers/iommu/exynos-iommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 33dcc29ec200..30808e91b775 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1345,8 +1345,8 @@ static int __init exynos_iommu_of_setup(struct device_node *np) exynos_iommu_init(); pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + if (!pdev) + return -ENODEV; /* * use the first registered sysmmu device for performing -- cgit v1.2.3 From 395adae45095e55ee90e688e9021d79dd6ffaefe Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Mon, 22 Aug 2016 21:11:31 +0200 Subject: iommu/amd: Remove AMD_IOMMU_STATS Commit e85e8f69cedb ("iommu/amd: Remove statistics code") removed that configuration. Also remove function definition (suggested by Joerg Roedel) Signed-off-by: Fabian Frederick Signed-off-by: Joerg Roedel --- arch/x86/configs/x86_64_defconfig | 1 - drivers/iommu/amd_iommu_proto.h | 6 ------ 2 files changed, 7 deletions(-) (limited to 'drivers/iommu') diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index d28bdabcc87e..7ef4a099defc 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -255,7 +255,6 @@ CONFIG_RTC_CLASS=y CONFIG_DMADEVICES=y CONFIG_EEEPC_LAPTOP=y CONFIG_AMD_IOMMU=y -CONFIG_AMD_IOMMU_STATS=y CONFIG_INTEL_IOMMU=y # CONFIG_INTEL_IOMMU_DEFAULT_ON is not set CONFIG_EFI_VARS=y diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 0bd9eb374462..a1f3346736e5 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h @@ -78,12 +78,6 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid, int status, int tag); -#ifndef CONFIG_AMD_IOMMU_STATS - -static inline void amd_iommu_stats_init(void) { } - -#endif /* !CONFIG_AMD_IOMMU_STATS */ - static inline bool is_rd890_iommu(struct pci_dev *pdev) { return (pdev->vendor == PCI_VENDOR_ID_ATI) && -- cgit v1.2.3 From b1e2afca635bb01f93ecd8112c265b980f1abc08 Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Wed, 24 Aug 2016 10:23:51 +0800 Subject: iommu/ipmmu-vmsa: Fix wrong error handle of ipmmu_add_device Let's fix the error handle of ipmmu_add_device when failing to find utlbs, otherwise we take a risk of pontential memleak. Signed-off-by: Shawn Lin Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 2fdbac67a77f..ace331da6459 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -636,7 +636,7 @@ static int ipmmu_add_device(struct device *dev) spin_unlock(&ipmmu_devices_lock); if (ret < 0) - return -ENODEV; + goto error; for (i = 0; i < num_utlbs; ++i) { if (utlbs[i] >= mmu->num_utlbs) { -- cgit v1.2.3 From 76208356a0ab357a1fb9f43bedb7fd1046ad8ece Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 25 Aug 2016 14:25:12 +0200 Subject: iommu/vt-d: Split up get_domain_for_dev function Split out the search for an already existing domain and the context mapping of the device to the new domain. This allows to map possible RMRR regions into the domain before it is context mapped. Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 76 ++++++++++++++++++++++++++++++++------------- 1 file changed, 55 insertions(+), 21 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ebb5bf3ddbd9..bcdbe9de7560 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2452,20 +2452,15 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque) return 0; } -/* domain is initialized */ -static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) +static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) { struct device_domain_info *info = NULL; - struct dmar_domain *domain, *tmp; + struct dmar_domain *domain = NULL; struct intel_iommu *iommu; u16 req_id, dma_alias; unsigned long flags; u8 bus, devfn; - domain = find_domain(dev); - if (domain) - return domain; - iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return NULL; @@ -2487,9 +2482,9 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) } spin_unlock_irqrestore(&device_domain_lock, flags); - /* DMA alias already has a domain, uses it */ + /* DMA alias already has a domain, use it */ if (info) - goto found_domain; + goto out; } /* Allocate and initialize new domain for the device */ @@ -2501,28 +2496,67 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) return NULL; } - /* register PCI DMA alias device */ - if (dev_is_pci(dev) && req_id != dma_alias) { - tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias), - dma_alias & 0xff, NULL, domain); +out: - if (!tmp || tmp != domain) { - domain_exit(domain); - domain = tmp; - } + return domain; +} - if (!domain) - return NULL; +static struct dmar_domain *set_domain_for_dev(struct device *dev, + struct dmar_domain *domain) +{ + struct intel_iommu *iommu; + struct dmar_domain *tmp; + u16 req_id, dma_alias; + u8 bus, devfn; + + iommu = device_to_iommu(dev, &bus, &devfn); + if (!iommu) + return NULL; + + req_id = ((u16)bus << 8) | devfn; + + if (dev_is_pci(dev)) { + struct pci_dev *pdev = to_pci_dev(dev); + + pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias); + + /* register PCI DMA alias device */ + if (req_id != dma_alias) { + tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias), + dma_alias & 0xff, NULL, domain); + + if (!tmp || tmp != domain) + return tmp; + } } -found_domain: tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); + if (!tmp || tmp != domain) + return tmp; + + return domain; +} - if (!tmp || tmp != domain) { +static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) +{ + struct dmar_domain *domain, *tmp; + + domain = find_domain(dev); + if (domain) + goto out; + + domain = find_or_alloc_domain(dev, gaw); + if (!domain) + goto out; + + tmp = set_domain_for_dev(dev, domain); + if (!tmp || domain != tmp) { domain_exit(domain); domain = tmp; } +out: + return domain; } -- cgit v1.2.3 From 1c5ebba95b486f1ea0e17d76dd8f6d7f1a8d1e89 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 25 Aug 2016 13:52:51 +0200 Subject: iommu/vt-d: Make sure RMRRs are mapped before domain goes public When a domain is allocated through the get_valid_domain_for_dev path, it will be context-mapped before the RMRR regions are mapped in the page-table. This opens a short time window where device-accesses to these regions fail and causing DMAR faults. Fix this by mapping the RMRR regions before the domain is context-mapped. Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index bcdbe9de7560..a4407eabf0e6 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3428,17 +3428,18 @@ static unsigned long intel_alloc_iova(struct device *dev, static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) { + struct dmar_domain *domain, *tmp; struct dmar_rmrr_unit *rmrr; - struct dmar_domain *domain; struct device *i_dev; int i, ret; - domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); - if (!domain) { - pr_err("Allocating domain for %s failed\n", - dev_name(dev)); - return NULL; - } + domain = find_domain(dev); + if (domain) + goto out; + + domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); + if (!domain) + goto out; /* We have a new domain - setup possible RMRRs for the device */ rcu_read_lock(); @@ -3457,6 +3458,18 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) } rcu_read_unlock(); + tmp = set_domain_for_dev(dev, domain); + if (!tmp || domain != tmp) { + domain_exit(domain); + domain = tmp; + } + +out: + + if (!domain) + pr_err("Allocating domain for %s failed\n", dev_name(dev)); + + return domain; } -- cgit v1.2.3 From e2d42311ffc9a5014eb129d02eb0e7bc791430f0 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Wed, 13 Jul 2016 10:13:08 +0800 Subject: iommu/arm-smmu: Drop devm_free_irq when driver detach There is no need to call devm_free_irq when driver detach. devres_release_all which is called after 'drv->remove' will release all managed resources. Signed-off-by: Peng Fan Reviewed-by: Robin Murphy Cc: Will Deacon Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 2db74ebc3240..069b2ea89113 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1998,7 +1998,6 @@ out_put_masters: static int arm_smmu_device_remove(struct platform_device *pdev) { - int i; struct device *dev = &pdev->dev; struct arm_smmu_device *curr, *smmu = NULL; struct rb_node *node; @@ -2025,9 +2024,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev) if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) dev_err(dev, "removing device with active domains!\n"); - for (i = 0; i < smmu->num_global_irqs; ++i) - devm_free_irq(smmu->dev, smmu->irqs[i], smmu); - /* Turn the thing off */ writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); return 0; -- cgit v1.2.3 From b4163fb3333cf2279f5bfa2bb4d2d93aa66a3eac Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Mon, 22 Aug 2016 14:42:24 +0100 Subject: iommu/arm-smmu: Fix event queues synchronization SMMUv3 only sends interrupts for event queues (EVTQ and PRIQ) when they transition from empty to non-empty. At the moment, if the SMMU adds new items to a queue before the event thread finished consuming a previous batch, the driver ignores any new item. The queue is then stuck in non-empty state and all subsequent events will be lost. As an example, consider the following flow, where (P, C) is the SMMU view of producer/consumer indices, and (p, c) the driver view. P C | p c 1. SMMU appends a PPR to the PRI queue, 1 0 | 0 0 sends an MSI 2. PRIQ handler is called. 1 0 | 1 0 3. SMMU appends a PPR to the PRI queue. 2 0 | 1 0 4. PRIQ thread removes the first element. 2 1 | 1 1 5. PRIQ thread believes that the queue is empty, goes into idle indefinitely. To avoid this, always synchronize the producer index and drain the queue once before leaving an event handler. In order to prevent races on the local producer index, move all event queue handling into the threads. Signed-off-by: Jean-Philippe Brucker Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 150 +++++++++++++++++++------------------------- 1 file changed, 66 insertions(+), 84 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 641e88761319..d156c1e610d6 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1161,36 +1161,66 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) struct arm_smmu_queue *q = &smmu->evtq.q; u64 evt[EVTQ_ENT_DWORDS]; - while (!queue_remove_raw(q, evt)) { - u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK; + do { + while (!queue_remove_raw(q, evt)) { + u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK; - dev_info(smmu->dev, "event 0x%02x received:\n", id); - for (i = 0; i < ARRAY_SIZE(evt); ++i) - dev_info(smmu->dev, "\t0x%016llx\n", - (unsigned long long)evt[i]); - } + dev_info(smmu->dev, "event 0x%02x received:\n", id); + for (i = 0; i < ARRAY_SIZE(evt); ++i) + dev_info(smmu->dev, "\t0x%016llx\n", + (unsigned long long)evt[i]); + + } + + /* + * Not much we can do on overflow, so scream and pretend we're + * trying harder. + */ + if (queue_sync_prod(q) == -EOVERFLOW) + dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); + } while (!queue_empty(q)); /* Sync our overflow flag, as we believe we're up to speed */ q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); return IRQ_HANDLED; } -static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev) -{ - irqreturn_t ret = IRQ_WAKE_THREAD; - struct arm_smmu_device *smmu = dev; - struct arm_smmu_queue *q = &smmu->evtq.q; - - /* - * Not much we can do on overflow, so scream and pretend we're - * trying harder. - */ - if (queue_sync_prod(q) == -EOVERFLOW) - dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); - else if (queue_empty(q)) - ret = IRQ_NONE; +static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt) +{ + u32 sid, ssid; + u16 grpid; + bool ssv, last; + + sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK; + ssv = evt[0] & PRIQ_0_SSID_V; + ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0; + last = evt[0] & PRIQ_0_PRG_LAST; + grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK; + + dev_info(smmu->dev, "unexpected PRI request received:\n"); + dev_info(smmu->dev, + "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n", + sid, ssid, grpid, last ? "L" : "", + evt[0] & PRIQ_0_PERM_PRIV ? "" : "un", + evt[0] & PRIQ_0_PERM_READ ? "R" : "", + evt[0] & PRIQ_0_PERM_WRITE ? "W" : "", + evt[0] & PRIQ_0_PERM_EXEC ? "X" : "", + evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT); + + if (last) { + struct arm_smmu_cmdq_ent cmd = { + .opcode = CMDQ_OP_PRI_RESP, + .substream_valid = ssv, + .pri = { + .sid = sid, + .ssid = ssid, + .grpid = grpid, + .resp = PRI_RESP_DENY, + }, + }; - return ret; + arm_smmu_cmdq_issue_cmd(smmu, &cmd); + } } static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) @@ -1199,63 +1229,19 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) struct arm_smmu_queue *q = &smmu->priq.q; u64 evt[PRIQ_ENT_DWORDS]; - while (!queue_remove_raw(q, evt)) { - u32 sid, ssid; - u16 grpid; - bool ssv, last; - - sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK; - ssv = evt[0] & PRIQ_0_SSID_V; - ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0; - last = evt[0] & PRIQ_0_PRG_LAST; - grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK; - - dev_info(smmu->dev, "unexpected PRI request received:\n"); - dev_info(smmu->dev, - "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n", - sid, ssid, grpid, last ? "L" : "", - evt[0] & PRIQ_0_PERM_PRIV ? "" : "un", - evt[0] & PRIQ_0_PERM_READ ? "R" : "", - evt[0] & PRIQ_0_PERM_WRITE ? "W" : "", - evt[0] & PRIQ_0_PERM_EXEC ? "X" : "", - evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT); - - if (last) { - struct arm_smmu_cmdq_ent cmd = { - .opcode = CMDQ_OP_PRI_RESP, - .substream_valid = ssv, - .pri = { - .sid = sid, - .ssid = ssid, - .grpid = grpid, - .resp = PRI_RESP_DENY, - }, - }; + do { + while (!queue_remove_raw(q, evt)) + arm_smmu_handle_ppr(smmu, evt); - arm_smmu_cmdq_issue_cmd(smmu, &cmd); - } - } + if (queue_sync_prod(q) == -EOVERFLOW) + dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); + } while (!queue_empty(q)); /* Sync our overflow flag, as we believe we're up to speed */ q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); return IRQ_HANDLED; } -static irqreturn_t arm_smmu_priq_handler(int irq, void *dev) -{ - irqreturn_t ret = IRQ_WAKE_THREAD; - struct arm_smmu_device *smmu = dev; - struct arm_smmu_queue *q = &smmu->priq.q; - - /* PRIQ overflow indicates a programming error */ - if (queue_sync_prod(q) == -EOVERFLOW) - dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); - else if (queue_empty(q)) - ret = IRQ_NONE; - - return ret; -} - static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev) { /* We don't actually use CMD_SYNC interrupts for anything */ @@ -1288,15 +1274,11 @@ static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) if (active & GERROR_MSI_GERROR_ABT_ERR) dev_warn(smmu->dev, "GERROR MSI write aborted\n"); - if (active & GERROR_MSI_PRIQ_ABT_ERR) { + if (active & GERROR_MSI_PRIQ_ABT_ERR) dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); - arm_smmu_priq_handler(irq, smmu->dev); - } - if (active & GERROR_MSI_EVTQ_ABT_ERR) { + if (active & GERROR_MSI_EVTQ_ABT_ERR) dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); - arm_smmu_evtq_handler(irq, smmu->dev); - } if (active & GERROR_MSI_CMDQ_ABT_ERR) { dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); @@ -2235,10 +2217,10 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) /* Request interrupt lines */ irq = smmu->evtq.q.irq; if (irq) { - ret = devm_request_threaded_irq(smmu->dev, irq, - arm_smmu_evtq_handler, + ret = devm_request_threaded_irq(smmu->dev, irq, NULL, arm_smmu_evtq_thread, - 0, "arm-smmu-v3-evtq", smmu); + IRQF_ONESHOT, + "arm-smmu-v3-evtq", smmu); if (ret < 0) dev_warn(smmu->dev, "failed to enable evtq irq\n"); } @@ -2263,10 +2245,10 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) if (smmu->features & ARM_SMMU_FEAT_PRI) { irq = smmu->priq.q.irq; if (irq) { - ret = devm_request_threaded_irq(smmu->dev, irq, - arm_smmu_priq_handler, + ret = devm_request_threaded_irq(smmu->dev, irq, NULL, arm_smmu_priq_thread, - 0, "arm-smmu-v3-priq", + IRQF_ONESHOT, + "arm-smmu-v3-priq", smmu); if (ret < 0) dev_warn(smmu->dev, -- cgit v1.2.3 From 6070529bebd26e0a80d9b9653a6f53275086603f Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 11 Aug 2016 17:44:06 +0100 Subject: iommu/arm-smmu: Support v7s context format Fill in the last bits of machinery required to drive a stage 1 context bank in v7 short descriptor format. By default we'll prefer to use it only when the CPUs are also using the same format, such that we're guaranteed that everything will be strictly 32-bit. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 69 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 22 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 069b2ea89113..4b1c87e947fd 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -217,6 +217,7 @@ #define ARM_SMMU_CB_TTBR0 0x20 #define ARM_SMMU_CB_TTBR1 0x28 #define ARM_SMMU_CB_TTBCR 0x30 +#define ARM_SMMU_CB_CONTEXTIDR 0x34 #define ARM_SMMU_CB_S1_MAIR0 0x38 #define ARM_SMMU_CB_S1_MAIR1 0x3c #define ARM_SMMU_CB_PAR 0x50 @@ -239,7 +240,6 @@ #define SCTLR_AFE (1 << 2) #define SCTLR_TRE (1 << 1) #define SCTLR_M (1 << 0) -#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) #define ARM_MMU500_ACTLR_CPRE (1 << 1) @@ -738,7 +738,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, struct io_pgtable_cfg *pgtbl_cfg) { - u32 reg; + u32 reg, reg2; u64 reg64; bool stage1; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; @@ -781,14 +781,22 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, /* TTBRs */ if (stage1) { - reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; - - reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT; - writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); - - reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; - reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT; - writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1); + u16 asid = ARM_SMMU_CB_ASID(smmu, cfg); + + if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { + reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0]; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0); + reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1]; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1); + writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR); + } else { + reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; + reg64 |= (u64)asid << TTBRn_ASID_SHIFT; + writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); + reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; + reg64 |= (u64)asid << TTBRn_ASID_SHIFT; + writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1); + } } else { reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); @@ -796,28 +804,36 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, /* TTBCR */ if (stage1) { - reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); - if (smmu->version > ARM_SMMU_V1) { - reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; - reg |= TTBCR2_SEP_UPSTREAM; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); + if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { + reg = pgtbl_cfg->arm_v7s_cfg.tcr; + reg2 = 0; + } else { + reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; + reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; + reg2 |= TTBCR2_SEP_UPSTREAM; } + if (smmu->version > ARM_SMMU_V1) + writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2); } else { reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); } + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); /* MAIRs (stage-1 only) */ if (stage1) { - reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; + if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { + reg = pgtbl_cfg->arm_v7s_cfg.prrr; + reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr; + } else { + reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; + reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; + } writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); - reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1); + writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1); } /* SCTLR */ - reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; + reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M; if (stage1) reg |= SCTLR_S1_ASIDPNE; #ifdef __BIG_ENDIAN @@ -880,6 +896,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, */ if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L) cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L; + if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) && + !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) && + (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) && + (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)) + cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S; if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) && (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K | ARM_SMMU_FEAT_FMT_AARCH64_16K | @@ -899,10 +920,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, oas = smmu->ipa_size; if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { fmt = ARM_64_LPAE_S1; - } else { + } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) { fmt = ARM_32_LPAE_S1; ias = min(ias, 32UL); oas = min(oas, 40UL); + } else { + fmt = ARM_V7S; + ias = min(ias, 32UL); + oas = min(oas, 32UL); } break; case ARM_SMMU_DOMAIN_NESTED: -- cgit v1.2.3 From bcfced1580c40662d1c095899af9d0dd3ed9e7bc Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Mon, 5 Sep 2016 14:09:53 +0100 Subject: iommu/arm-smmu: Fix polling of command queue When the SMMUv3 driver attempts to send a command, it adds an entry to the command queue. This is a circular buffer, where both the producer and consumer have a wrap bit. When producer.index == consumer.index and producer.wrap == consumer.wrap, the list is empty. When producer.index == consumer.index and producer.wrap != consumer.wrap, the list is full. If the list is full when the driver needs to add a command, it waits for the SMMU to consume one command, and advance the consumer pointer. The problem is that we currently rely on "X before Y" operation to know if entries have been consumed, which is a bit fiddly since it only makes sense when the distance between X and Y is less than or equal to the size of the queue. At the moment when the list is full, we use "Consumer before Producer + 1", which is out of range and returns a value opposite to what we expect: when the queue transitions to not full, we stay in the polling loop and time out, printing an error. Given that the actual bug was difficult to determine, simplify the polling logic by relying exclusively on queue_full and queue_empty, that don't have this range constraint. Polling the queue is now straightforward: * When we want to add a command and the list is full, wait until it isn't full and retry. * After adding a sync, wait for the list to be empty before returning. Suggested-by: Will Deacon Signed-off-by: Jean-Philippe Brucker Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index d156c1e610d6..c040e246bc59 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -713,19 +713,15 @@ static void queue_inc_prod(struct arm_smmu_queue *q) writel(q->prod, q->prod_reg); } -static bool __queue_cons_before(struct arm_smmu_queue *q, u32 until) -{ - if (Q_WRP(q, q->cons) == Q_WRP(q, until)) - return Q_IDX(q, q->cons) < Q_IDX(q, until); - - return Q_IDX(q, q->cons) >= Q_IDX(q, until); -} - -static int queue_poll_cons(struct arm_smmu_queue *q, u32 until, bool wfe) +/* + * Wait for the SMMU to consume items. If drain is true, wait until the queue + * is empty. Otherwise, wait until there is at least one free slot. + */ +static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe) { ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); - while (queue_sync_cons(q), __queue_cons_before(q, until)) { + while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) { if (ktime_compare(ktime_get(), timeout) > 0) return -ETIMEDOUT; @@ -896,7 +892,6 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, struct arm_smmu_cmdq_ent *ent) { - u32 until; u64 cmd[CMDQ_ENT_DWORDS]; bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); struct arm_smmu_queue *q = &smmu->cmdq.q; @@ -908,17 +903,12 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, } spin_lock(&smmu->cmdq.lock); - while (until = q->prod + 1, queue_insert_raw(q, cmd) == -ENOSPC) { - /* - * Keep the queue locked, otherwise the producer could wrap - * twice and we could see a future consumer pointer that looks - * like it's behind us. - */ - if (queue_poll_cons(q, until, wfe)) + while (queue_insert_raw(q, cmd) == -ENOSPC) { + if (queue_poll_cons(q, false, wfe)) dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); } - if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, until, wfe)) + if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe)) dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); spin_unlock(&smmu->cmdq.lock); } -- cgit v1.2.3 From 8ded2909e2c6c34b70ebb45480f6ce68c64fbddc Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 9 Sep 2016 14:33:59 +0100 Subject: iommu/arm-smmu: Disable interrupts whilst holding the cmdq lock The cmdq lock is taken whenever we issue commands into the command queue, which can occur in IRQ context (as a result of unmap) or in process context (as a result of a threaded IRQ handler or device probe). This can lead to a theoretical deadlock if the interrupt handler performing the unmap hits whilst the lock is taken, so explicitly use the {irqsave,irqrestore} spin_lock accessors for the cmdq lock. Tested-by: Jean-Philippe Brucker Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index c040e246bc59..5db6931c715c 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -893,6 +893,7 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, struct arm_smmu_cmdq_ent *ent) { u64 cmd[CMDQ_ENT_DWORDS]; + unsigned long flags; bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); struct arm_smmu_queue *q = &smmu->cmdq.q; @@ -902,7 +903,7 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, return; } - spin_lock(&smmu->cmdq.lock); + spin_lock_irqsave(&smmu->cmdq.lock, flags); while (queue_insert_raw(q, cmd) == -ENOSPC) { if (queue_poll_cons(q, false, wfe)) dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); @@ -910,7 +911,7 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe)) dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); - spin_unlock(&smmu->cmdq.lock); + spin_unlock_irqrestore(&smmu->cmdq.lock, flags); } /* Context descriptor manipulation functions */ -- cgit v1.2.3 From b996444cf35e736621855e73f9d0762bd49f41f2 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:41 +0100 Subject: iommu/of: Handle iommu-map property for PCI Now that we have a way to pick up the RID translation and target IOMMU, hook up of_iommu_configure() to bring PCI devices into the of_xlate mechanism and allow them IOMMU-backed DMA ops without the need for driver-specific handling. Reviewed-by: Will Deacon Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/of_iommu.c | 46 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 41 insertions(+), 5 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 57f23eaaa2f9..19e1e8f2f871 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -22,6 +22,7 @@ #include #include #include +#include #include static const struct of_device_id __iommu_of_table_sentinel @@ -134,6 +135,45 @@ const struct iommu_ops *of_iommu_get_ops(struct device_node *np) return ops; } +static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) +{ + struct of_phandle_args *iommu_spec = data; + + iommu_spec->args[0] = alias; + return iommu_spec->np == pdev->bus->dev.of_node; +} + +static const struct iommu_ops +*of_pci_iommu_configure(struct pci_dev *pdev, struct device_node *bridge_np) +{ + const struct iommu_ops *ops; + struct of_phandle_args iommu_spec; + + /* + * Start by tracing the RID alias down the PCI topology as + * far as the host bridge whose OF node we have... + * (we're not even attempting to handle multi-alias devices yet) + */ + iommu_spec.args_count = 1; + iommu_spec.np = bridge_np; + pci_for_each_dma_alias(pdev, __get_pci_rid, &iommu_spec); + /* + * ...then find out what that becomes once it escapes the PCI + * bus into the system beyond, and which IOMMU it ends up at. + */ + iommu_spec.np = NULL; + if (of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map", + "iommu-map-mask", &iommu_spec.np, iommu_spec.args)) + return NULL; + + ops = of_iommu_get_ops(iommu_spec.np); + if (!ops || !ops->of_xlate || ops->of_xlate(&pdev->dev, &iommu_spec)) + ops = NULL; + + of_node_put(iommu_spec.np); + return ops; +} + const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np) { @@ -142,12 +182,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, const struct iommu_ops *ops = NULL; int idx = 0; - /* - * We can't do much for PCI devices without knowing how - * device IDs are wired up from the PCI bus to the IOMMU. - */ if (dev_is_pci(dev)) - return NULL; + return of_pci_iommu_configure(to_pci_dev(dev), master_np); /* * We don't currently walk up the tree looking for a parent IOMMU. -- cgit v1.2.3 From 57f98d2f61e191ef9d06863c9ce3f8621f3671ef Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 13 Sep 2016 10:54:14 +0100 Subject: iommu: Introduce iommu_fwspec Introduce a common structure to hold the per-device firmware data that most IOMMU drivers need to keep track of. This enables us to configure much of that data from common firmware code, and consolidate a lot of the equivalent implementations, device look-up tables, etc. which are currently strewn across IOMMU drivers. This will also be enable us to address the outstanding "multiple IOMMUs on the platform bus" problem by tweaking IOMMU API calls to prefer dev->fwspec->ops before falling back to dev->bus->iommu_ops, and thus gracefully handle those troublesome systems which we currently cannot. As the first user, hook up the OF IOMMU configuration mechanism. The driver-defined nature of DT cells means that we still need the drivers to translate and add the IDs themselves, but future users such as the much less free-form ACPI IORT will be much simpler and self-contained. CC: Greg Kroah-Hartman Suggested-by: Will Deacon Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/iommu.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++ drivers/iommu/of_iommu.c | 8 +++++-- include/linux/device.h | 3 +++ include/linux/iommu.h | 39 ++++++++++++++++++++++++++++++++ 4 files changed, 106 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b06d93594436..9a2f1960873b 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -31,6 +31,7 @@ #include #include #include +#include #include static struct kset *iommu_group_kset; @@ -1613,3 +1614,60 @@ out: return ret; } + +int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, + const struct iommu_ops *ops) +{ + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + + if (fwspec) + return ops == fwspec->ops ? 0 : -EINVAL; + + fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); + if (!fwspec) + return -ENOMEM; + + of_node_get(to_of_node(iommu_fwnode)); + fwspec->iommu_fwnode = iommu_fwnode; + fwspec->ops = ops; + dev->iommu_fwspec = fwspec; + return 0; +} +EXPORT_SYMBOL_GPL(iommu_fwspec_init); + +void iommu_fwspec_free(struct device *dev) +{ + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + + if (fwspec) { + fwnode_handle_put(fwspec->iommu_fwnode); + kfree(fwspec); + dev->iommu_fwspec = NULL; + } +} +EXPORT_SYMBOL_GPL(iommu_fwspec_free); + +int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) +{ + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + size_t size; + int i; + + if (!fwspec) + return -EINVAL; + + size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]); + if (size > sizeof(*fwspec)) { + fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL); + if (!fwspec) + return -ENOMEM; + } + + for (i = 0; i < num_ids; i++) + fwspec->ids[fwspec->num_ids + i] = ids[i]; + + fwspec->num_ids += num_ids; + dev->iommu_fwspec = fwspec; + return 0; +} +EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 19e1e8f2f871..5b82862f571f 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -167,7 +167,9 @@ static const struct iommu_ops return NULL; ops = of_iommu_get_ops(iommu_spec.np); - if (!ops || !ops->of_xlate || ops->of_xlate(&pdev->dev, &iommu_spec)) + if (!ops || !ops->of_xlate || + iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) || + ops->of_xlate(&pdev->dev, &iommu_spec)) ops = NULL; of_node_put(iommu_spec.np); @@ -196,7 +198,9 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, np = iommu_spec.np; ops = of_iommu_get_ops(np); - if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec)) + if (!ops || !ops->of_xlate || + iommu_fwspec_init(dev, &np->fwnode, ops) || + ops->of_xlate(dev, &iommu_spec)) goto err_put_node; of_node_put(np); diff --git a/include/linux/device.h b/include/linux/device.h index 38f02814d53a..bc41e87a969b 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -41,6 +41,7 @@ struct device_node; struct fwnode_handle; struct iommu_ops; struct iommu_group; +struct iommu_fwspec; struct bus_attribute { struct attribute attr; @@ -765,6 +766,7 @@ struct device_dma_parameters { * gone away. This should be set by the allocator of the * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. + * @iommu_fwspec: IOMMU-specific properties supplied by firmware. * * @offline_disabled: If set, the device is permanently online. * @offline: Set after successful invocation of bus type's .offline(). @@ -849,6 +851,7 @@ struct device { void (*release)(struct device *dev); struct iommu_group *iommu_group; + struct iommu_fwspec *iommu_fwspec; bool offline_disabled:1; bool offline:1; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a35fb8b42e1a..436dc21318af 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -331,10 +331,32 @@ extern struct iommu_group *pci_device_group(struct device *dev); /* Generic device grouping function */ extern struct iommu_group *generic_device_group(struct device *dev); +/** + * struct iommu_fwspec - per-device IOMMU instance data + * @ops: ops for this device's IOMMU + * @iommu_fwnode: firmware handle for this device's IOMMU + * @iommu_priv: IOMMU driver private data for this device + * @num_ids: number of associated device IDs + * @ids: IDs which this device may present to the IOMMU + */ +struct iommu_fwspec { + const struct iommu_ops *ops; + struct fwnode_handle *iommu_fwnode; + void *iommu_priv; + unsigned int num_ids; + u32 ids[1]; +}; + +int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, + const struct iommu_ops *ops); +void iommu_fwspec_free(struct device *dev); +int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); + #else /* CONFIG_IOMMU_API */ struct iommu_ops {}; struct iommu_group {}; +struct iommu_fwspec {}; static inline bool iommu_present(struct bus_type *bus) { @@ -541,6 +563,23 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link) { } +static inline int iommu_fwspec_init(struct device *dev, + struct fwnode_handle *iommu_fwnode, + const struct iommu_ops *ops) +{ + return -ENODEV; +} + +static inline void iommu_fwspec_free(struct device *dev) +{ +} + +static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, + int num_ids) +{ + return -ENODEV; +} + #endif /* CONFIG_IOMMU_API */ #endif /* __LINUX_IOMMU_H */ -- cgit v1.2.3 From dc87a98db751a98577fc6a89b4f26180fc020d24 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:44 +0100 Subject: iommu/arm-smmu: Fall back to global bypass Unlike SMMUv2, SMMUv3 has no easy way to bypass unknown stream IDs, other than allocating and filling in the entire stream table with bypass entries, which for some configurations would waste *gigabytes* of RAM. Otherwise, all transactions on unknown stream IDs will simply be aborted with a C_BAD_STREAMID event. Rather than render the system unusable in the case of an invalid DT, avoid enabling the SMMU altogether such that everything bypasses (though letting the explicit disable_bypass option take precedence). Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 48 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 5db6931c715c..d7fef5f99bfc 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -123,6 +123,10 @@ #define CR2_RECINVSID (1 << 1) #define CR2_E2H (1 << 0) +#define ARM_SMMU_GBPA 0x44 +#define GBPA_ABORT (1 << 20) +#define GBPA_UPDATE (1 << 31) + #define ARM_SMMU_IRQ_CTRL 0x50 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2) #define IRQ_CTRL_PRIQ_IRQEN (1 << 1) @@ -2124,6 +2128,24 @@ static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val, 1, ARM_SMMU_POLL_TIMEOUT_US); } +/* GBPA is "special" */ +static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) +{ + int ret; + u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA; + + ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE), + 1, ARM_SMMU_POLL_TIMEOUT_US); + if (ret) + return ret; + + reg &= ~clr; + reg |= set; + writel_relaxed(reg | GBPA_UPDATE, gbpa); + return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE), + 1, ARM_SMMU_POLL_TIMEOUT_US); +} + static void arm_smmu_free_msis(void *data) { struct device *dev = data; @@ -2269,7 +2291,7 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu) return ret; } -static int arm_smmu_device_reset(struct arm_smmu_device *smmu) +static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) { int ret; u32 reg, enables; @@ -2370,8 +2392,17 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu) return ret; } - /* Enable the SMMU interface */ - enables |= CR0_SMMUEN; + + /* Enable the SMMU interface, or ensure bypass */ + if (!bypass || disable_bypass) { + enables |= CR0_SMMUEN; + } else { + ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT); + if (ret) { + dev_err(smmu->dev, "GBPA not responding to update\n"); + return ret; + } + } ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, ARM_SMMU_CR0ACK); if (ret) { @@ -2570,6 +2601,15 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) struct resource *res; struct arm_smmu_device *smmu; struct device *dev = &pdev->dev; + bool bypass = true; + u32 cells; + + if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells)) + dev_err(dev, "missing #iommu-cells property\n"); + else if (cells != 1) + dev_err(dev, "invalid #iommu-cells value (%d)\n", cells); + else + bypass = false; smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); if (!smmu) { @@ -2622,7 +2662,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) platform_set_drvdata(pdev, smmu); /* Reset the device */ - return arm_smmu_device_reset(smmu); + return arm_smmu_device_reset(smmu, bypass); } static int arm_smmu_device_remove(struct platform_device *pdev) -- cgit v1.2.3 From 8f78515425daead9d69e061cc4d8da8299ed0e88 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:45 +0100 Subject: iommu/arm-smmu: Implement of_xlate() for SMMUv3 Now that we can properly describe the mapping between PCI RIDs and stream IDs via "iommu-map", and have it fed it to the driver automatically via of_xlate(), rework the SMMUv3 driver to benefit from that, and get rid of the current misuse of the "iommus" binding. Since having of_xlate wired up means that masters will now be given the appropriate DMA ops, we also need to make sure that default domains work properly. This necessitates dispensing with the "whole group at a time" notion for attaching to a domain, as devices which share a group get attached to the group's default domain one by one as they are initially probed. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 298 +++++++++++++++++++------------------------- 1 file changed, 128 insertions(+), 170 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index d7fef5f99bfc..15ba80db6465 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -610,12 +611,9 @@ struct arm_smmu_device { struct arm_smmu_strtab_cfg strtab_cfg; }; -/* SMMU private data for an IOMMU group */ -struct arm_smmu_group { +/* SMMU private data for each master */ +struct arm_smmu_master_data { struct arm_smmu_device *smmu; - struct arm_smmu_domain *domain; - int num_sids; - u32 *sids; struct arm_smmu_strtab_ent ste; }; @@ -1555,20 +1553,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) return ret; } -static struct arm_smmu_group *arm_smmu_group_get(struct device *dev) -{ - struct iommu_group *group; - struct arm_smmu_group *smmu_group; - - group = iommu_group_get(dev); - if (!group) - return NULL; - - smmu_group = iommu_group_get_iommudata(group); - iommu_group_put(group); - return smmu_group; -} - static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) { __le64 *step; @@ -1591,27 +1575,17 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) return step; } -static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group) +static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) { int i; - struct arm_smmu_domain *smmu_domain = smmu_group->domain; - struct arm_smmu_strtab_ent *ste = &smmu_group->ste; - struct arm_smmu_device *smmu = smmu_group->smmu; - - if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { - ste->s1_cfg = &smmu_domain->s1_cfg; - ste->s2_cfg = NULL; - arm_smmu_write_ctx_desc(smmu, ste->s1_cfg); - } else { - ste->s1_cfg = NULL; - ste->s2_cfg = &smmu_domain->s2_cfg; - } + struct arm_smmu_master_data *master = fwspec->iommu_priv; + struct arm_smmu_device *smmu = master->smmu; - for (i = 0; i < smmu_group->num_sids; ++i) { - u32 sid = smmu_group->sids[i]; + for (i = 0; i < fwspec->num_ids; ++i) { + u32 sid = fwspec->ids[i]; __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); - arm_smmu_write_strtab_ent(smmu, sid, step, ste); + arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); } return 0; @@ -1619,13 +1593,11 @@ static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group) static void arm_smmu_detach_dev(struct device *dev) { - struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); + struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; - smmu_group->ste.bypass = true; - if (arm_smmu_install_ste_for_group(smmu_group) < 0) + master->ste.bypass = true; + if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0) dev_warn(dev, "failed to install bypass STE\n"); - - smmu_group->domain = NULL; } static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) @@ -1633,16 +1605,20 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) int ret = 0; struct arm_smmu_device *smmu; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); + struct arm_smmu_master_data *master; + struct arm_smmu_strtab_ent *ste; - if (!smmu_group) + if (!dev->iommu_fwspec) return -ENOENT; + master = dev->iommu_fwspec->iommu_priv; + smmu = master->smmu; + ste = &master->ste; + /* Already attached to a different domain? */ - if (smmu_group->domain && smmu_group->domain != smmu_domain) + if (!ste->bypass) arm_smmu_detach_dev(dev); - smmu = smmu_group->smmu; mutex_lock(&smmu_domain->init_mutex); if (!smmu_domain->smmu) { @@ -1661,21 +1637,21 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) goto out_unlock; } - /* Group already attached to this domain? */ - if (smmu_group->domain) - goto out_unlock; - - smmu_group->domain = smmu_domain; + ste->bypass = false; + ste->valid = true; - /* - * FIXME: This should always be "false" once we have IOMMU-backed - * DMA ops for all devices behind the SMMU. - */ - smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA; + if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { + ste->s1_cfg = &smmu_domain->s1_cfg; + ste->s2_cfg = NULL; + arm_smmu_write_ctx_desc(smmu, ste->s1_cfg); + } else { + ste->s1_cfg = NULL; + ste->s2_cfg = &smmu_domain->s2_cfg; + } - ret = arm_smmu_install_ste_for_group(smmu_group); + ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec); if (ret < 0) - smmu_group->domain = NULL; + ste->valid = false; out_unlock: mutex_unlock(&smmu_domain->init_mutex); @@ -1734,40 +1710,19 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) return ret; } -static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp) -{ - *(u32 *)sidp = alias; - return 0; /* Continue walking */ -} +static struct platform_driver arm_smmu_driver; -static void __arm_smmu_release_pci_iommudata(void *data) +static int arm_smmu_match_node(struct device *dev, void *data) { - kfree(data); + return dev->of_node == data; } -static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev) +static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np) { - struct device_node *of_node; - struct platform_device *smmu_pdev; - struct arm_smmu_device *smmu = NULL; - struct pci_bus *bus = pdev->bus; - - /* Walk up to the root bus */ - while (!pci_is_root_bus(bus)) - bus = bus->parent; - - /* Follow the "iommus" phandle from the host controller */ - of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0); - if (!of_node) - return NULL; - - /* See if we can find an SMMU corresponding to the phandle */ - smmu_pdev = of_find_device_by_node(of_node); - if (smmu_pdev) - smmu = platform_get_drvdata(smmu_pdev); - - of_node_put(of_node); - return smmu; + struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL, + np, arm_smmu_match_node); + put_device(dev); + return dev ? dev_get_drvdata(dev) : NULL; } static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) @@ -1780,94 +1735,74 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) return sid < limit; } +static struct iommu_ops arm_smmu_ops; + static int arm_smmu_add_device(struct device *dev) { int i, ret; - u32 sid, *sids; - struct pci_dev *pdev; - struct iommu_group *group; - struct arm_smmu_group *smmu_group; struct arm_smmu_device *smmu; + struct arm_smmu_master_data *master; + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + struct iommu_group *group; - /* We only support PCI, for now */ - if (!dev_is_pci(dev)) + if (!fwspec || fwspec->ops != &arm_smmu_ops) return -ENODEV; - - pdev = to_pci_dev(dev); - group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) - return PTR_ERR(group); - - smmu_group = iommu_group_get_iommudata(group); - if (!smmu_group) { - smmu = arm_smmu_get_for_pci_dev(pdev); - if (!smmu) { - ret = -ENOENT; - goto out_remove_dev; - } - - smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL); - if (!smmu_group) { - ret = -ENOMEM; - goto out_remove_dev; - } - - smmu_group->ste.valid = true; - smmu_group->smmu = smmu; - iommu_group_set_iommudata(group, smmu_group, - __arm_smmu_release_pci_iommudata); + /* + * We _can_ actually withstand dodgy bus code re-calling add_device() + * without an intervening remove_device()/of_xlate() sequence, but + * we're not going to do so quietly... + */ + if (WARN_ON_ONCE(fwspec->iommu_priv)) { + master = fwspec->iommu_priv; + smmu = master->smmu; } else { - smmu = smmu_group->smmu; + smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode)); + if (!smmu) + return -ENODEV; + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return -ENOMEM; + + master->smmu = smmu; + fwspec->iommu_priv = master; } - /* Assume SID == RID until firmware tells us otherwise */ - pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); - for (i = 0; i < smmu_group->num_sids; ++i) { - /* If we already know about this SID, then we're done */ - if (smmu_group->sids[i] == sid) - goto out_put_group; - } + /* Check the SIDs are in range of the SMMU and our stream table */ + for (i = 0; i < fwspec->num_ids; i++) { + u32 sid = fwspec->ids[i]; - /* Check the SID is in range of the SMMU and our stream table */ - if (!arm_smmu_sid_in_range(smmu, sid)) { - ret = -ERANGE; - goto out_remove_dev; - } - - /* Ensure l2 strtab is initialised */ - if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { - ret = arm_smmu_init_l2_strtab(smmu, sid); - if (ret) - goto out_remove_dev; - } + if (!arm_smmu_sid_in_range(smmu, sid)) + return -ERANGE; - /* Resize the SID array for the group */ - smmu_group->num_sids++; - sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids), - GFP_KERNEL); - if (!sids) { - smmu_group->num_sids--; - ret = -ENOMEM; - goto out_remove_dev; + /* Ensure l2 strtab is initialised */ + if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { + ret = arm_smmu_init_l2_strtab(smmu, sid); + if (ret) + return ret; + } } - /* Add the new SID */ - sids[smmu_group->num_sids - 1] = sid; - smmu_group->sids = sids; - -out_put_group: - iommu_group_put(group); - return 0; + group = iommu_group_get_for_dev(dev); + if (!IS_ERR(group)) + iommu_group_put(group); -out_remove_dev: - iommu_group_remove_device(dev); - iommu_group_put(group); - return ret; + return PTR_ERR_OR_ZERO(group); } static void arm_smmu_remove_device(struct device *dev) { + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + struct arm_smmu_master_data *master; + + if (!fwspec || fwspec->ops != &arm_smmu_ops) + return; + + master = fwspec->iommu_priv; + if (master && master->ste.valid) + arm_smmu_detach_dev(dev); iommu_group_remove_device(dev); + kfree(master); + iommu_fwspec_free(dev); } static int arm_smmu_domain_get_attr(struct iommu_domain *domain, @@ -1914,6 +1849,15 @@ out_unlock: return ret; } +static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) +{ + /* We only support PCI, for now */ + if (!dev_is_pci(dev)) + return -ENODEV; + + return iommu_fwspec_add_ids(dev, args->args, 1); +} + static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, @@ -1928,6 +1872,7 @@ static struct iommu_ops arm_smmu_ops = { .device_group = pci_device_group, .domain_get_attr = arm_smmu_domain_get_attr, .domain_set_attr = arm_smmu_domain_set_attr, + .of_xlate = arm_smmu_of_xlate, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; @@ -2662,7 +2607,14 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) platform_set_drvdata(pdev, smmu); /* Reset the device */ - return arm_smmu_device_reset(smmu, bypass); + ret = arm_smmu_device_reset(smmu, bypass); + if (ret) + return ret; + + /* And we're up. Go go go! */ + of_iommu_set_ops(dev->of_node, &arm_smmu_ops); + pci_request_acs(); + return bus_set_iommu(&pci_bus_type, &arm_smmu_ops); } static int arm_smmu_device_remove(struct platform_device *pdev) @@ -2690,22 +2642,14 @@ static struct platform_driver arm_smmu_driver = { static int __init arm_smmu_init(void) { - struct device_node *np; - int ret; - - np = of_find_matching_node(NULL, arm_smmu_of_match); - if (!np) - return 0; - - of_node_put(np); - - ret = platform_driver_register(&arm_smmu_driver); - if (ret) - return ret; - - pci_request_acs(); + static bool registered; + int ret = 0; - return bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + if (!registered) { + ret = platform_driver_register(&arm_smmu_driver); + registered = !ret; + } + return ret; } static void __exit arm_smmu_exit(void) @@ -2716,6 +2660,20 @@ static void __exit arm_smmu_exit(void) subsys_initcall(arm_smmu_init); module_exit(arm_smmu_exit); +static int __init arm_smmu_of_init(struct device_node *np) +{ + int ret = arm_smmu_init(); + + if (ret) + return ret; + + if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) + return -ENODEV; + + return 0; +} +IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init); + MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); MODULE_AUTHOR("Will Deacon "); MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 08d4ca2a672bab34e6640ffa946844d09d4f6f60 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:46 +0100 Subject: iommu/arm-smmu: Support non-PCI devices with SMMUv3 With the device <-> stream ID relationship suitably abstracted and of_xlate() hooked up, the PCI dependency now looks, and is, entirely arbitrary. Any bus using the of_dma_configure() mechanism will work, so extend support to the platform and AMBA buses which do just that. Acked-by: Will Deacon Tested-by: Lorenzo Pieralisi Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/Kconfig | 2 +- drivers/iommu/arm-smmu-v3.c | 37 +++++++++++++++++++++++++++++++------ 2 files changed, 32 insertions(+), 7 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d432ca828472..8ee54d71c7eb 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -309,7 +309,7 @@ config ARM_SMMU config ARM_SMMU_V3 bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support" - depends on ARM64 && PCI + depends on ARM64 select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select GENERIC_MSI_IRQ_DOMAIN diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 15ba80db6465..52860bcf80f2 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -35,6 +35,8 @@ #include #include +#include + #include "io-pgtable.h" /* MMIO registers */ @@ -1805,6 +1807,23 @@ static void arm_smmu_remove_device(struct device *dev) iommu_fwspec_free(dev); } +static struct iommu_group *arm_smmu_device_group(struct device *dev) +{ + struct iommu_group *group; + + /* + * We don't support devices sharing stream IDs other than PCI RID + * aliases, since the necessary ID-to-device lookup becomes rather + * impractical given a potential sparse 32-bit stream ID space. + */ + if (dev_is_pci(dev)) + group = pci_device_group(dev); + else + group = generic_device_group(dev); + + return group; +} + static int arm_smmu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) { @@ -1851,10 +1870,6 @@ out_unlock: static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) { - /* We only support PCI, for now */ - if (!dev_is_pci(dev)) - return -ENODEV; - return iommu_fwspec_add_ids(dev, args->args, 1); } @@ -1869,7 +1884,7 @@ static struct iommu_ops arm_smmu_ops = { .iova_to_phys = arm_smmu_iova_to_phys, .add_device = arm_smmu_add_device, .remove_device = arm_smmu_remove_device, - .device_group = pci_device_group, + .device_group = arm_smmu_device_group, .domain_get_attr = arm_smmu_domain_get_attr, .domain_set_attr = arm_smmu_domain_set_attr, .of_xlate = arm_smmu_of_xlate, @@ -2613,8 +2628,18 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) /* And we're up. Go go go! */ of_iommu_set_ops(dev->of_node, &arm_smmu_ops); +#ifdef CONFIG_PCI pci_request_acs(); - return bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + if (ret) + return ret; +#endif +#ifdef CONFIG_ARM_AMBA + ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops); + if (ret) + return ret; +#endif + return bus_set_iommu(&platform_bus_type, &arm_smmu_ops); } static int arm_smmu_device_remove(struct platform_device *pdev) -- cgit v1.2.3 From 95fa99aa402ad516ec825057a168f395ece39a2e Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:47 +0100 Subject: iommu/arm-smmu: Set PRIVCFG in stage 1 STEs Implement the SMMUv3 equivalent of d346180e70b9 ("iommu/arm-smmu: Treat all device transactions as unprivileged"), so that once again those pesky DMA controllers with their privileged instruction fetches don't unexpectedly fault in stage 1 domains due to VMSAv8 rules. Acked-by: Will Deacon Tested-by: Lorenzo Pieralisi Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 52860bcf80f2..0c45c1e02e04 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -267,6 +267,9 @@ #define STRTAB_STE_1_SHCFG_INCOMING 1UL #define STRTAB_STE_1_SHCFG_SHIFT 44 +#define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL +#define STRTAB_STE_1_PRIVCFG_SHIFT 48 + #define STRTAB_STE_2_S2VMID_SHIFT 0 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL #define STRTAB_STE_2_VTCR_SHIFT 32 @@ -1068,7 +1071,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, #ifdef CONFIG_PCI_ATS STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT | #endif - STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT); + STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT | + STRTAB_STE_1_PRIVCFG_UNPRIV << + STRTAB_STE_1_PRIVCFG_SHIFT); if (smmu->features & ARM_SMMU_FEAT_STALLS) dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); -- cgit v1.2.3 From 21174240e4f4439bb8ed6c116cdbdc03eba2126e Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:48 +0100 Subject: iommu/arm-smmu: Handle stream IDs more dynamically Rather than assuming fixed worst-case values for stream IDs and SMR masks, keep track of whatever implemented bits the hardware actually reports. This also obviates the slightly questionable validation of SMR fields in isolation - rather than aborting the whole SMMU probe for a hardware configuration which is still architecturally valid, we can simply refuse masters later if they try to claim an unrepresentable ID or mask (which almost certainly implies a DT error anyway). Acked-by: Will Deacon Tested-by: Lorenzo Pieralisi Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 4b1c87e947fd..f86d7887f69a 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -165,9 +165,7 @@ #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) #define SMR_VALID (1 << 31) #define SMR_MASK_SHIFT 16 -#define SMR_MASK_MASK 0x7fff #define SMR_ID_SHIFT 0 -#define SMR_ID_MASK 0x7fff #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) #define S2CR_CBNDX_SHIFT 0 @@ -346,6 +344,8 @@ struct arm_smmu_device { atomic_t irptndx; u32 num_mapping_groups; + u16 streamid_mask; + u16 smr_mask_mask; DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); unsigned long va_size; @@ -1715,39 +1715,40 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) dev_notice(smmu->dev, "\t(IDR0.CTTW overridden by dma-coherent property)\n"); + /* Max. number of entries we have for stream matching/indexing */ + size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); + smmu->streamid_mask = size - 1; if (id & ID0_SMS) { - u32 smr, sid, mask; + u32 smr; smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; - smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) & - ID0_NUMSMRG_MASK; - if (smmu->num_mapping_groups == 0) { + size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; + if (size == 0) { dev_err(smmu->dev, "stream-matching supported, but no SMRs present!\n"); return -ENODEV; } - smr = SMR_MASK_MASK << SMR_MASK_SHIFT; - smr |= (SMR_ID_MASK << SMR_ID_SHIFT); + /* + * SMR.ID bits may not be preserved if the corresponding MASK + * bits are set, so check each one separately. We can reject + * masters later if they try to claim IDs outside these masks. + */ + smr = smmu->streamid_mask << SMR_ID_SHIFT; writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); + smmu->streamid_mask = smr >> SMR_ID_SHIFT; - mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK; - sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK; - if ((mask & sid) != sid) { - dev_err(smmu->dev, - "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n", - mask, sid); - return -ENODEV; - } + smr = smmu->streamid_mask << SMR_MASK_SHIFT; + writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); + smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); + smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; dev_notice(smmu->dev, - "\tstream matching with %u register groups, mask 0x%x", - smmu->num_mapping_groups, mask); - } else { - smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) & - ID0_NUMSIDB_MASK; + "\tstream matching with %lu register groups, mask 0x%x", + size, smmu->smr_mask_mask); } + smmu->num_mapping_groups = size; if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; -- cgit v1.2.3 From 1f3d5ca43019bff1105838712d55be087d93c0da Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:49 +0100 Subject: iommu/arm-smmu: Consolidate stream map entry state In order to consider SMR masking, we really want to be able to validate ID/mask pairs against existing SMR contents to prevent stream match conflicts, which at best would cause transactions to fault unexpectedly, and at worst lead to silent unpredictable behaviour. With our SMMU instance data holding only an allocator bitmap, and the SMR values themselves scattered across master configs hanging off devices which we may have no way of finding, there's essentially no way short of digging everything back out of the hardware. Similarly, the thought of power management ops to support suspend/resume faces the exact same problem. By massaging the software state into a closer shape to the underlying hardware, everything comes together quite nicely; the allocator and the high-level view of the data become a single centralised state which we can easily keep track of, and to which any updates can be validated in full before being synchronised to the hardware itself. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 147 +++++++++++++++++++++++++++-------------------- 1 file changed, 86 insertions(+), 61 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index f86d7887f69a..dfe13780ba54 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -28,6 +28,7 @@ #define pr_fmt(fmt) "arm-smmu: " fmt +#include #include #include #include @@ -55,9 +56,6 @@ /* Maximum number of context banks per SMMU */ #define ARM_SMMU_MAX_CBS 128 -/* Maximum number of mapping groups per SMMU */ -#define ARM_SMMU_MAX_SMRS 128 - /* SMMU global address space */ #define ARM_SMMU_GR0(smmu) ((smmu)->base) #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) @@ -295,16 +293,17 @@ enum arm_smmu_implementation { }; struct arm_smmu_smr { - u8 idx; u16 mask; u16 id; + bool valid; }; struct arm_smmu_master_cfg { int num_streamids; u16 streamids[MAX_MASTER_STREAMIDS]; - struct arm_smmu_smr *smrs; + s16 smendx[MAX_MASTER_STREAMIDS]; }; +#define INVALID_SMENDX -1 struct arm_smmu_master { struct device_node *of_node; @@ -346,7 +345,7 @@ struct arm_smmu_device { u32 num_mapping_groups; u16 streamid_mask; u16 smr_mask_mask; - DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); + struct arm_smmu_smr *smrs; unsigned long va_size; unsigned long ipa_size; @@ -550,6 +549,7 @@ static int register_smmu_master(struct arm_smmu_device *smmu, return -ERANGE; } master->cfg.streamids[i] = streamid; + master->cfg.smendx[i] = INVALID_SMENDX; } return insert_smmu_master(smmu, master); } @@ -1080,79 +1080,91 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) kfree(smmu_domain); } -static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, - struct arm_smmu_master_cfg *cfg) +static int arm_smmu_alloc_smr(struct arm_smmu_device *smmu) { int i; - struct arm_smmu_smr *smrs; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) - return 0; + for (i = 0; i < smmu->num_mapping_groups; i++) + if (!cmpxchg(&smmu->smrs[i].valid, false, true)) + return i; - if (cfg->smrs) - return -EEXIST; + return INVALID_SMENDX; +} - smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL); - if (!smrs) { - dev_err(smmu->dev, "failed to allocate %d SMRs\n", - cfg->num_streamids); - return -ENOMEM; - } +static void arm_smmu_free_smr(struct arm_smmu_device *smmu, int idx) +{ + writel_relaxed(~SMR_VALID, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); + WRITE_ONCE(smmu->smrs[idx].valid, false); +} + +static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) +{ + struct arm_smmu_smr *smr = smmu->smrs + idx; + u32 reg = (smr->id & smmu->streamid_mask) << SMR_ID_SHIFT | + (smr->mask & smmu->smr_mask_mask) << SMR_MASK_SHIFT; + + if (smr->valid) + reg |= SMR_VALID; + writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); +} + +static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu, + struct arm_smmu_master_cfg *cfg) +{ + struct arm_smmu_smr *smrs = smmu->smrs; + int i, idx; /* Allocate the SMRs on the SMMU */ for (i = 0; i < cfg->num_streamids; ++i) { - int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, - smmu->num_mapping_groups); + if (cfg->smendx[i] != INVALID_SMENDX) + return -EEXIST; + + /* ...except on stream indexing hardware, of course */ + if (!smrs) { + cfg->smendx[i] = cfg->streamids[i]; + continue; + } + + idx = arm_smmu_alloc_smr(smmu); if (idx < 0) { dev_err(smmu->dev, "failed to allocate free SMR\n"); goto err_free_smrs; } + cfg->smendx[i] = idx; - smrs[i] = (struct arm_smmu_smr) { - .idx = idx, - .mask = 0, /* We don't currently share SMRs */ - .id = cfg->streamids[i], - }; + smrs[idx].id = cfg->streamids[i]; + smrs[idx].mask = 0; /* We don't currently share SMRs */ } + if (!smrs) + return 0; + /* It worked! Now, poke the actual hardware */ - for (i = 0; i < cfg->num_streamids; ++i) { - u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | - smrs[i].mask << SMR_MASK_SHIFT; - writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); - } + for (i = 0; i < cfg->num_streamids; ++i) + arm_smmu_write_smr(smmu, cfg->smendx[i]); - cfg->smrs = smrs; return 0; err_free_smrs: - while (--i >= 0) - __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx); - kfree(smrs); + while (i--) { + arm_smmu_free_smr(smmu, cfg->smendx[i]); + cfg->smendx[i] = INVALID_SMENDX; + } return -ENOSPC; } -static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, +static void arm_smmu_master_free_smes(struct arm_smmu_device *smmu, struct arm_smmu_master_cfg *cfg) { int i; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - struct arm_smmu_smr *smrs = cfg->smrs; - - if (!smrs) - return; /* Invalidate the SMRs before freeing back to the allocator */ for (i = 0; i < cfg->num_streamids; ++i) { - u8 idx = smrs[i].idx; + if (smmu->smrs) + arm_smmu_free_smr(smmu, cfg->smendx[i]); - writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); - __arm_smmu_free_bitmap(smmu->smr_map, idx); + cfg->smendx[i] = INVALID_SMENDX; } - - cfg->smrs = NULL; - kfree(smrs); } static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, @@ -1172,14 +1184,14 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, return 0; /* Devices in an IOMMU group may already be configured */ - ret = arm_smmu_master_configure_smrs(smmu, cfg); + ret = arm_smmu_master_alloc_smes(smmu, cfg); if (ret) return ret == -EEXIST ? 0 : ret; for (i = 0; i < cfg->num_streamids; ++i) { u32 idx, s2cr; - idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; + idx = cfg->smendx[i]; s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV | (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT); writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); @@ -1195,22 +1207,22 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, struct arm_smmu_device *smmu = smmu_domain->smmu; void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - /* An IOMMU group is torn down by the first device to be removed */ - if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs) - return; - /* * We *must* clear the S2CR first, because freeing the SMR means * that it can be re-allocated immediately. */ for (i = 0; i < cfg->num_streamids; ++i) { - u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; + int idx = cfg->smendx[i]; u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS; + /* An IOMMU group is torn down by the first device to be removed */ + if (idx == INVALID_SMENDX) + return; + writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx)); } - arm_smmu_master_free_smrs(smmu, cfg); + arm_smmu_master_free_smes(smmu, cfg); } static void arm_smmu_detach_dev(struct device *dev, @@ -1424,8 +1436,11 @@ static int arm_smmu_init_pci_device(struct pci_dev *pdev, break; /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ - if (i == cfg->num_streamids) - cfg->streamids[cfg->num_streamids++] = sid; + if (i == cfg->num_streamids) { + cfg->streamids[i] = sid; + cfg->smendx[i] = INVALID_SMENDX; + cfg->num_streamids++; + } return 0; } @@ -1556,17 +1571,21 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) { void __iomem *gr0_base = ARM_SMMU_GR0(smmu); void __iomem *cb_base; - int i = 0; + int i; u32 reg, major; /* clear global FSR */ reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); - /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */ + /* + * Reset stream mapping groups: Initial values mark all SMRn as + * invalid and all S2CRn as bypass unless overridden. + */ reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS; for (i = 0; i < smmu->num_mapping_groups; ++i) { - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i)); + if (smmu->smrs) + arm_smmu_write_smr(smmu, i); writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i)); } @@ -1744,6 +1763,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; + /* Zero-initialised to mark as invalid */ + smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), + GFP_KERNEL); + if (!smmu->smrs) + return -ENOMEM; + dev_notice(smmu->dev, "\tstream matching with %lu register groups, mask 0x%x", size, smmu->smr_mask_mask); -- cgit v1.2.3 From 8e8b203eabd8b9e96d02d6339e4abce3e5a7ea4b Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:50 +0100 Subject: iommu/arm-smmu: Keep track of S2CR state Making S2CRs first-class citizens within the driver with a high-level representation of their state offers a neat solution to a few problems: Firstly, the information about which context a device's stream IDs are associated with is already present by necessity in the S2CR. With that state easily accessible we can refer directly to it and obviate the need to track an IOMMU domain in each device's archdata (its earlier purpose of enforcing correct attachment of multi-device groups now being handled by the IOMMU core itself). Secondly, the core API now deprecates explicit domain detach and expects domain attach to move devices smoothly from one domain to another; for SMMUv2, this notion maps directly to simply rewriting the S2CRs assigned to the device. By giving the driver a suitable abstraction of those S2CRs to work with, we can massively reduce the overhead of the current heavy-handed "detach, free resources, reallocate resources, attach" approach. Thirdly, making the software state hardware-shaped and attached to the SMMU instance once again makes suspend/resume of this register group that much simpler to implement in future. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 159 +++++++++++++++++++++++++++-------------------- 1 file changed, 93 insertions(+), 66 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index dfe13780ba54..69b6cab65421 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -170,12 +170,20 @@ #define S2CR_CBNDX_MASK 0xff #define S2CR_TYPE_SHIFT 16 #define S2CR_TYPE_MASK 0x3 -#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT) -#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT) -#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT) +enum arm_smmu_s2cr_type { + S2CR_TYPE_TRANS, + S2CR_TYPE_BYPASS, + S2CR_TYPE_FAULT, +}; #define S2CR_PRIVCFG_SHIFT 24 -#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT) +#define S2CR_PRIVCFG_MASK 0x3 +enum arm_smmu_s2cr_privcfg { + S2CR_PRIVCFG_DEFAULT, + S2CR_PRIVCFG_DIPAN, + S2CR_PRIVCFG_UNPRIV, + S2CR_PRIVCFG_PRIV, +}; /* Context bank attribute registers */ #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) @@ -292,6 +300,16 @@ enum arm_smmu_implementation { CAVIUM_SMMUV2, }; +struct arm_smmu_s2cr { + enum arm_smmu_s2cr_type type; + enum arm_smmu_s2cr_privcfg privcfg; + u8 cbndx; +}; + +#define s2cr_init_val (struct arm_smmu_s2cr){ \ + .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \ +} + struct arm_smmu_smr { u16 mask; u16 id; @@ -346,6 +364,7 @@ struct arm_smmu_device { u16 streamid_mask; u16 smr_mask_mask; struct arm_smmu_smr *smrs; + struct arm_smmu_s2cr *s2crs; unsigned long va_size; unsigned long ipa_size; @@ -1108,6 +1127,23 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); } +static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) +{ + struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; + u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT | + (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT | + (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT; + + writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); +} + +static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) +{ + arm_smmu_write_s2cr(smmu, idx); + if (smmu->smrs) + arm_smmu_write_smr(smmu, idx); +} + static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu, struct arm_smmu_master_cfg *cfg) { @@ -1158,6 +1194,23 @@ static void arm_smmu_master_free_smes(struct arm_smmu_device *smmu, { int i; + /* + * We *must* clear the S2CR first, because freeing the SMR means + * that it can be re-allocated immediately. + */ + for (i = 0; i < cfg->num_streamids; ++i) { + int idx = cfg->smendx[i]; + + /* An IOMMU group is torn down by the first device to be removed */ + if (idx == INVALID_SMENDX) + return; + + smmu->s2crs[idx] = s2cr_init_val; + arm_smmu_write_s2cr(smmu, idx); + } + /* Sync S2CR updates before touching anything else */ + __iowmb(); + /* Invalidate the SMRs before freeing back to the allocator */ for (i = 0; i < cfg->num_streamids; ++i) { if (smmu->smrs) @@ -1170,9 +1223,16 @@ static void arm_smmu_master_free_smes(struct arm_smmu_device *smmu, static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, struct arm_smmu_master_cfg *cfg) { - int i, ret; + int i, ret = 0; struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + struct arm_smmu_s2cr *s2cr = smmu->s2crs; + enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS; + u8 cbndx = smmu_domain->cfg.cbndx; + + if (cfg->smendx[0] == INVALID_SMENDX) + ret = arm_smmu_master_alloc_smes(smmu, cfg); + if (ret) + return ret; /* * FIXME: This won't be needed once we have IOMMU-backed DMA ops @@ -1181,58 +1241,21 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, * and a PCI device (i.e. a PCI host controller) */ if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) - return 0; - - /* Devices in an IOMMU group may already be configured */ - ret = arm_smmu_master_alloc_smes(smmu, cfg); - if (ret) - return ret == -EEXIST ? 0 : ret; + type = S2CR_TYPE_BYPASS; - for (i = 0; i < cfg->num_streamids; ++i) { - u32 idx, s2cr; - - idx = cfg->smendx[i]; - s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV | - (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT); - writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); - } - - return 0; -} - -static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, - struct arm_smmu_master_cfg *cfg) -{ - int i; - struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - - /* - * We *must* clear the S2CR first, because freeing the SMR means - * that it can be re-allocated immediately. - */ for (i = 0; i < cfg->num_streamids; ++i) { int idx = cfg->smendx[i]; - u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS; - /* An IOMMU group is torn down by the first device to be removed */ - if (idx == INVALID_SMENDX) - return; + /* Devices in an IOMMU group may already be configured */ + if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) + break; - writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx)); + s2cr[idx].type = type; + s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV; + s2cr[idx].cbndx = cbndx; + arm_smmu_write_s2cr(smmu, idx); } - - arm_smmu_master_free_smes(smmu, cfg); -} - -static void arm_smmu_detach_dev(struct device *dev, - struct arm_smmu_master_cfg *cfg) -{ - struct iommu_domain *domain = dev->archdata.iommu; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - - dev->archdata.iommu = NULL; - arm_smmu_domain_remove_master(smmu_domain, cfg); + return 0; } static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) @@ -1269,14 +1292,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) if (!cfg) return -ENODEV; - /* Detach the dev from its current domain */ - if (dev->archdata.iommu) - arm_smmu_detach_dev(dev, cfg); - - ret = arm_smmu_domain_add_master(smmu_domain, cfg); - if (!ret) - dev->archdata.iommu = domain; - return ret; + return arm_smmu_domain_add_master(smmu_domain, cfg); } static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, @@ -1477,6 +1493,12 @@ static int arm_smmu_add_device(struct device *dev) static void arm_smmu_remove_device(struct device *dev) { + struct arm_smmu_device *smmu = find_smmu_for_device(dev); + struct arm_smmu_master_cfg *cfg = find_smmu_master_cfg(dev); + + if (smmu && cfg) + arm_smmu_master_free_smes(smmu, cfg); + iommu_group_remove_device(dev); } @@ -1582,12 +1604,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) * Reset stream mapping groups: Initial values mark all SMRn as * invalid and all S2CRn as bypass unless overridden. */ - reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS; - for (i = 0; i < smmu->num_mapping_groups; ++i) { - if (smmu->smrs) - arm_smmu_write_smr(smmu, i); - writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i)); - } + for (i = 0; i < smmu->num_mapping_groups; ++i) + arm_smmu_write_sme(smmu, i); /* * Before clearing ARM_MMU500_ACTLR_CPRE, need to @@ -1676,6 +1694,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) void __iomem *gr0_base = ARM_SMMU_GR0(smmu); u32 id; bool cttw_dt, cttw_reg; + int i; dev_notice(smmu->dev, "probing hardware configuration...\n"); dev_notice(smmu->dev, "SMMUv%d with:\n", @@ -1773,6 +1792,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) "\tstream matching with %lu register groups, mask 0x%x", size, smmu->smr_mask_mask); } + /* s2cr->type == 0 means translation, so initialise explicitly */ + smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), + GFP_KERNEL); + if (!smmu->s2crs) + return -ENOMEM; + for (i = 0; i < size; i++) + smmu->s2crs[i] = s2cr_init_val; + smmu->num_mapping_groups = size; if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { -- cgit v1.2.3 From f80cd885fcdd05dff769d62a116313927a03d480 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 14 Sep 2016 15:21:39 +0100 Subject: iommu/arm-smmu: Refactor mmu-masters handling To be able to support the generic bindings and handle of_xlate() calls, we need to be able to associate SMMUs and stream IDs directly with devices *before* allocating IOMMU groups. Furthermore, to support real default domains with multi-device groups we also have to handle domain attach on a per-device basis, as the "whole group at a time" assumption fails to properly handle subsequent devices added to a group after the first has already triggered default domain creation and attachment. To that end, use the now-vacant dev->archdata.iommu field for easy config and SMMU instance lookup, and unify config management by chopping down the platform-device-specific tree and probing the "mmu-masters" property on-demand instead. This may add a bit of one-off overhead to initially adding a new device, but we're about to deprecate that binding in favour of the inherently-more-efficient generic ones anyway. For the sake of simplicity, this patch does temporarily regress the case of aliasing PCI devices by losing the duplicate stream ID detection that the previous per-group config had. Stay tuned, because we'll be back to fix that in a better and more general way momentarily... Tested-by: Lorenzo Pieralisi Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 370 +++++++++++++---------------------------------- 1 file changed, 101 insertions(+), 269 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 69b6cab65421..2023a77015a0 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -317,18 +317,13 @@ struct arm_smmu_smr { }; struct arm_smmu_master_cfg { + struct arm_smmu_device *smmu; int num_streamids; u16 streamids[MAX_MASTER_STREAMIDS]; s16 smendx[MAX_MASTER_STREAMIDS]; }; #define INVALID_SMENDX -1 -struct arm_smmu_master { - struct device_node *of_node; - struct rb_node node; - struct arm_smmu_master_cfg cfg; -}; - struct arm_smmu_device { struct device *dev; @@ -376,7 +371,6 @@ struct arm_smmu_device { unsigned int *irqs; struct list_head list; - struct rb_root masters; u32 cavium_id_base; /* Specific to Cavium */ }; @@ -415,12 +409,6 @@ struct arm_smmu_domain { struct iommu_domain domain; }; -struct arm_smmu_phandle_args { - struct device_node *np; - int args_count; - uint32_t args[MAX_MASTER_STREAMIDS]; -}; - static DEFINE_SPINLOCK(arm_smmu_devices_lock); static LIST_HEAD(arm_smmu_devices); @@ -462,132 +450,89 @@ static struct device_node *dev_get_dev_node(struct device *dev) while (!pci_is_root_bus(bus)) bus = bus->parent; - return bus->bridge->parent->of_node; + return of_node_get(bus->bridge->parent->of_node); } - return dev->of_node; + return of_node_get(dev->of_node); } -static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, - struct device_node *dev_node) +static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) { - struct rb_node *node = smmu->masters.rb_node; - - while (node) { - struct arm_smmu_master *master; - - master = container_of(node, struct arm_smmu_master, node); - - if (dev_node < master->of_node) - node = node->rb_left; - else if (dev_node > master->of_node) - node = node->rb_right; - else - return master; - } - - return NULL; + *((__be32 *)data) = cpu_to_be32(alias); + return 0; /* Continue walking */ } -static struct arm_smmu_master_cfg * -find_smmu_master_cfg(struct device *dev) +static int __find_legacy_master_phandle(struct device *dev, void *data) { - struct arm_smmu_master_cfg *cfg = NULL; - struct iommu_group *group = iommu_group_get(dev); - - if (group) { - cfg = iommu_group_get_iommudata(group); - iommu_group_put(group); - } - - return cfg; + struct of_phandle_iterator *it = *(void **)data; + struct device_node *np = it->node; + int err; + + of_for_each_phandle(it, err, dev->of_node, "mmu-masters", + "#stream-id-cells", 0) + if (it->node == np) { + *(void **)data = dev; + return 1; + } + it->node = np; + return err == -ENOENT ? 0 : err; } -static int insert_smmu_master(struct arm_smmu_device *smmu, - struct arm_smmu_master *master) +static int arm_smmu_register_legacy_master(struct device *dev) { - struct rb_node **new, *parent; - - new = &smmu->masters.rb_node; - parent = NULL; - while (*new) { - struct arm_smmu_master *this - = container_of(*new, struct arm_smmu_master, node); - - parent = *new; - if (master->of_node < this->of_node) - new = &((*new)->rb_left); - else if (master->of_node > this->of_node) - new = &((*new)->rb_right); - else - return -EEXIST; - } - - rb_link_node(&master->node, parent, new); - rb_insert_color(&master->node, &smmu->masters); - return 0; -} + struct arm_smmu_device *smmu; + struct arm_smmu_master_cfg *cfg; + struct device_node *np; + struct of_phandle_iterator it; + void *data = ⁢ + __be32 pci_sid; + int err; -static int register_smmu_master(struct arm_smmu_device *smmu, - struct device *dev, - struct arm_smmu_phandle_args *masterspec) -{ - int i; - struct arm_smmu_master *master; + np = dev_get_dev_node(dev); + if (!np || !of_find_property(np, "#stream-id-cells", NULL)) { + of_node_put(np); + return -ENODEV; + } - master = find_smmu_master(smmu, masterspec->np); - if (master) { - dev_err(dev, - "rejecting multiple registrations for master device %s\n", - masterspec->np->name); - return -EBUSY; + it.node = np; + spin_lock(&arm_smmu_devices_lock); + list_for_each_entry(smmu, &arm_smmu_devices, list) { + err = __find_legacy_master_phandle(smmu->dev, &data); + if (err) + break; } + spin_unlock(&arm_smmu_devices_lock); + of_node_put(np); + if (err == 0) + return -ENODEV; + if (err < 0) + return err; - if (masterspec->args_count > MAX_MASTER_STREAMIDS) { - dev_err(dev, + if (it.cur_count > MAX_MASTER_STREAMIDS) { + dev_err(smmu->dev, "reached maximum number (%d) of stream IDs for master device %s\n", - MAX_MASTER_STREAMIDS, masterspec->np->name); + MAX_MASTER_STREAMIDS, dev_name(dev)); return -ENOSPC; } + if (dev_is_pci(dev)) { + /* "mmu-masters" assumes Stream ID == Requester ID */ + pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid, + &pci_sid); + it.cur = &pci_sid; + it.cur_count = 1; + } - master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); - if (!master) + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) return -ENOMEM; - master->of_node = masterspec->np; - master->cfg.num_streamids = masterspec->args_count; - - for (i = 0; i < master->cfg.num_streamids; ++i) { - u16 streamid = masterspec->args[i]; - - if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && - (streamid >= smmu->num_mapping_groups)) { - dev_err(dev, - "stream ID for master device %s greater than maximum allowed (%d)\n", - masterspec->np->name, smmu->num_mapping_groups); - return -ERANGE; - } - master->cfg.streamids[i] = streamid; - master->cfg.smendx[i] = INVALID_SMENDX; - } - return insert_smmu_master(smmu, master); -} - -static struct arm_smmu_device *find_smmu_for_device(struct device *dev) -{ - struct arm_smmu_device *smmu; - struct arm_smmu_master *master = NULL; - struct device_node *dev_node = dev_get_dev_node(dev); + cfg->smmu = smmu; + dev->archdata.iommu = cfg; - spin_lock(&arm_smmu_devices_lock); - list_for_each_entry(smmu, &arm_smmu_devices, list) { - master = find_smmu_master(smmu, dev_node); - if (master) - break; - } - spin_unlock(&arm_smmu_devices_lock); + while (it.cur_count--) + cfg->streamids[cfg->num_streamids++] = be32_to_cpup(it.cur++); - return master ? smmu : NULL; + return 0; } static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) @@ -1119,8 +1064,7 @@ static void arm_smmu_free_smr(struct arm_smmu_device *smmu, int idx) static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) { struct arm_smmu_smr *smr = smmu->smrs + idx; - u32 reg = (smr->id & smmu->streamid_mask) << SMR_ID_SHIFT | - (smr->mask & smmu->smr_mask_mask) << SMR_MASK_SHIFT; + u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT; if (smr->valid) reg |= SMR_VALID; @@ -1189,9 +1133,9 @@ err_free_smrs: return -ENOSPC; } -static void arm_smmu_master_free_smes(struct arm_smmu_device *smmu, - struct arm_smmu_master_cfg *cfg) +static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg) { + struct arm_smmu_device *smmu = cfg->smmu; int i; /* @@ -1262,17 +1206,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { int ret; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_device *smmu; - struct arm_smmu_master_cfg *cfg; + struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; - smmu = find_smmu_for_device(dev); - if (!smmu) { + if (!cfg) { dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); return -ENXIO; } /* Ensure that the domain is finalised */ - ret = arm_smmu_init_domain_context(domain, smmu); + ret = arm_smmu_init_domain_context(domain, cfg->smmu); if (ret < 0) return ret; @@ -1280,18 +1222,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) * Sanity check the domain. We don't support domains across * different SMMUs. */ - if (smmu_domain->smmu != smmu) { + if (smmu_domain->smmu != cfg->smmu) { dev_err(dev, "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", - dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); + dev_name(smmu_domain->smmu->dev), dev_name(cfg->smmu->dev)); return -EINVAL; } /* Looks ok, so add the device to the domain */ - cfg = find_smmu_master_cfg(dev); - if (!cfg) - return -ENODEV; - return arm_smmu_domain_add_master(smmu_domain, cfg); } @@ -1411,120 +1349,65 @@ static bool arm_smmu_capable(enum iommu_cap cap) } } -static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) -{ - *((u16 *)data) = alias; - return 0; /* Continue walking */ -} - -static void __arm_smmu_release_pci_iommudata(void *data) -{ - kfree(data); -} - -static int arm_smmu_init_pci_device(struct pci_dev *pdev, - struct iommu_group *group) +static int arm_smmu_add_device(struct device *dev) { struct arm_smmu_master_cfg *cfg; - u16 sid; - int i; - - cfg = iommu_group_get_iommudata(group); - if (!cfg) { - cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); - if (!cfg) - return -ENOMEM; - - iommu_group_set_iommudata(group, cfg, - __arm_smmu_release_pci_iommudata); - } + struct iommu_group *group; + int i, ret; - if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) - return -ENOSPC; + ret = arm_smmu_register_legacy_master(dev); + cfg = dev->archdata.iommu; + if (ret) + goto out_free; - /* - * Assume Stream ID == Requester ID for now. - * We need a way to describe the ID mappings in FDT. - */ - pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); - for (i = 0; i < cfg->num_streamids; ++i) - if (cfg->streamids[i] == sid) - break; + ret = -EINVAL; + for (i = 0; i < cfg->num_streamids; i++) { + u16 sid = cfg->streamids[i]; - /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ - if (i == cfg->num_streamids) { - cfg->streamids[i] = sid; + if (sid & ~cfg->smmu->streamid_mask) { + dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", + sid, cfg->smmu->streamid_mask); + goto out_free; + } cfg->smendx[i] = INVALID_SMENDX; - cfg->num_streamids++; } - return 0; -} - -static int arm_smmu_init_platform_device(struct device *dev, - struct iommu_group *group) -{ - struct arm_smmu_device *smmu = find_smmu_for_device(dev); - struct arm_smmu_master *master; - - if (!smmu) - return -ENODEV; - - master = find_smmu_master(smmu, dev->of_node); - if (!master) - return -ENODEV; - - iommu_group_set_iommudata(group, &master->cfg, NULL); - - return 0; -} - -static int arm_smmu_add_device(struct device *dev) -{ - struct iommu_group *group; - group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) - return PTR_ERR(group); - + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto out_free; + } iommu_group_put(group); return 0; + +out_free: + kfree(cfg); + dev->archdata.iommu = NULL; + return ret; } static void arm_smmu_remove_device(struct device *dev) { - struct arm_smmu_device *smmu = find_smmu_for_device(dev); - struct arm_smmu_master_cfg *cfg = find_smmu_master_cfg(dev); + struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; - if (smmu && cfg) - arm_smmu_master_free_smes(smmu, cfg); + if (!cfg) + return; + arm_smmu_master_free_smes(cfg); iommu_group_remove_device(dev); + kfree(cfg); + dev->archdata.iommu = NULL; } static struct iommu_group *arm_smmu_device_group(struct device *dev) { struct iommu_group *group; - int ret; if (dev_is_pci(dev)) group = pci_device_group(dev); else group = generic_device_group(dev); - if (IS_ERR(group)) - return group; - - if (dev_is_pci(dev)) - ret = arm_smmu_init_pci_device(to_pci_dev(dev), group); - else - ret = arm_smmu_init_platform_device(dev, group); - - if (ret) { - iommu_group_put(group); - group = ERR_PTR(ret); - } - return group; } @@ -1938,9 +1821,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) struct resource *res; struct arm_smmu_device *smmu; struct device *dev = &pdev->dev; - struct rb_node *node; - struct of_phandle_iterator it; - struct arm_smmu_phandle_args *masterspec; int num_irqs, i, err; smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); @@ -2001,37 +1881,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) if (err) return err; - i = 0; - smmu->masters = RB_ROOT; - - err = -ENOMEM; - /* No need to zero the memory for masterspec */ - masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL); - if (!masterspec) - goto out_put_masters; - - of_for_each_phandle(&it, err, dev->of_node, - "mmu-masters", "#stream-id-cells", 0) { - int count = of_phandle_iterator_args(&it, masterspec->args, - MAX_MASTER_STREAMIDS); - masterspec->np = of_node_get(it.node); - masterspec->args_count = count; - - err = register_smmu_master(smmu, dev, masterspec); - if (err) { - dev_err(dev, "failed to add master %s\n", - masterspec->np->name); - kfree(masterspec); - goto out_put_masters; - } - - i++; - } - - dev_notice(dev, "registered %d master devices\n", i); - - kfree(masterspec); - parse_driver_options(smmu); if (smmu->version == ARM_SMMU_V2 && @@ -2039,8 +1888,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) dev_err(dev, "found only %d context interrupt(s) but %d required\n", smmu->num_context_irqs, smmu->num_context_banks); - err = -ENODEV; - goto out_put_masters; + return -ENODEV; } for (i = 0; i < smmu->num_global_irqs; ++i) { @@ -2052,7 +1900,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) if (err) { dev_err(dev, "failed to request global IRQ %d (%u)\n", i, smmu->irqs[i]); - goto out_put_masters; + return err; } } @@ -2063,22 +1911,12 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) arm_smmu_device_reset(smmu); return 0; - -out_put_masters: - for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { - struct arm_smmu_master *master - = container_of(node, struct arm_smmu_master, node); - of_node_put(master->of_node); - } - - return err; } static int arm_smmu_device_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct arm_smmu_device *curr, *smmu = NULL; - struct rb_node *node; spin_lock(&arm_smmu_devices_lock); list_for_each_entry(curr, &arm_smmu_devices, list) { @@ -2093,12 +1931,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev) if (!smmu) return -ENODEV; - for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { - struct arm_smmu_master *master - = container_of(node, struct arm_smmu_master, node); - of_node_put(master->of_node); - } - if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) dev_err(dev, "removing device with active domains!\n"); -- cgit v1.2.3 From d6fc5d977671d16535de66645f83a1bd07e1317d Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:52 +0100 Subject: iommu/arm-smmu: Streamline SMMU data lookups Simplify things somewhat by stashing our arm_smmu_device instance in drvdata, so that it's readily available to our driver model callbacks. Then we can excise the private list entirely, since the driver core already has a perfectly good list of SMMU devices we can use in the one instance we actually need to. Finally, make a further modest code saving with the relatively new of_device_get_match_data() helper. Tested-by: Lorenzo Pieralisi Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 44 +++++++++++--------------------------------- 1 file changed, 11 insertions(+), 33 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 2023a77015a0..b6e26dba2dec 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -370,8 +371,6 @@ struct arm_smmu_device { u32 num_context_irqs; unsigned int *irqs; - struct list_head list; - u32 cavium_id_base; /* Specific to Cavium */ }; @@ -409,9 +408,6 @@ struct arm_smmu_domain { struct iommu_domain domain; }; -static DEFINE_SPINLOCK(arm_smmu_devices_lock); -static LIST_HEAD(arm_smmu_devices); - struct arm_smmu_option_prop { u32 opt; const char *prop; @@ -478,6 +474,8 @@ static int __find_legacy_master_phandle(struct device *dev, void *data) return err == -ENOENT ? 0 : err; } +static struct platform_driver arm_smmu_driver; + static int arm_smmu_register_legacy_master(struct device *dev) { struct arm_smmu_device *smmu; @@ -495,19 +493,16 @@ static int arm_smmu_register_legacy_master(struct device *dev) } it.node = np; - spin_lock(&arm_smmu_devices_lock); - list_for_each_entry(smmu, &arm_smmu_devices, list) { - err = __find_legacy_master_phandle(smmu->dev, &data); - if (err) - break; - } - spin_unlock(&arm_smmu_devices_lock); + err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data, + __find_legacy_master_phandle); of_node_put(np); if (err == 0) return -ENODEV; if (err < 0) return err; + smmu = dev_get_drvdata(data); + if (it.cur_count > MAX_MASTER_STREAMIDS) { dev_err(smmu->dev, "reached maximum number (%d) of stream IDs for master device %s\n", @@ -1816,7 +1811,6 @@ MODULE_DEVICE_TABLE(of, arm_smmu_of_match); static int arm_smmu_device_dt_probe(struct platform_device *pdev) { - const struct of_device_id *of_id; const struct arm_smmu_match_data *data; struct resource *res; struct arm_smmu_device *smmu; @@ -1830,8 +1824,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) } smmu->dev = dev; - of_id = of_match_node(arm_smmu_of_match, dev->of_node); - data = of_id->data; + data = of_device_get_match_data(dev); smmu->version = data->version; smmu->model = data->model; @@ -1904,35 +1897,20 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) } } - INIT_LIST_HEAD(&smmu->list); - spin_lock(&arm_smmu_devices_lock); - list_add(&smmu->list, &arm_smmu_devices); - spin_unlock(&arm_smmu_devices_lock); - + platform_set_drvdata(pdev, smmu); arm_smmu_device_reset(smmu); return 0; } static int arm_smmu_device_remove(struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct arm_smmu_device *curr, *smmu = NULL; - - spin_lock(&arm_smmu_devices_lock); - list_for_each_entry(curr, &arm_smmu_devices, list) { - if (curr->dev == dev) { - smmu = curr; - list_del(&smmu->list); - break; - } - } - spin_unlock(&arm_smmu_devices_lock); + struct arm_smmu_device *smmu = platform_get_drvdata(pdev); if (!smmu) return -ENODEV; if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) - dev_err(dev, "removing device with active domains!\n"); + dev_err(&pdev->dev, "removing device with active domains!\n"); /* Turn the thing off */ writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); -- cgit v1.2.3 From d3097e39302083d58922a3d1032d7d59a63d263d Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:53 +0100 Subject: iommu/arm-smmu: Add a stream map entry iterator We iterate over the SMEs associated with a master config quite a lot in various places, and are about to do so even more. Let's wrap the idiom in a handy iterator macro before the repetition gets out of hand. Tested-by: Lorenzo Pieralisi Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index b6e26dba2dec..f402f9e126a8 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -324,6 +324,8 @@ struct arm_smmu_master_cfg { s16 smendx[MAX_MASTER_STREAMIDS]; }; #define INVALID_SMENDX -1 +#define for_each_cfg_sme(cfg, i, idx) \ + for (i = 0; idx = cfg->smendx[i], i < cfg->num_streamids; ++i) struct arm_smmu_device { struct device *dev; @@ -1090,8 +1092,8 @@ static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu, int i, idx; /* Allocate the SMRs on the SMMU */ - for (i = 0; i < cfg->num_streamids; ++i) { - if (cfg->smendx[i] != INVALID_SMENDX) + for_each_cfg_sme(cfg, i, idx) { + if (idx != INVALID_SMENDX) return -EEXIST; /* ...except on stream indexing hardware, of course */ @@ -1115,8 +1117,8 @@ static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu, return 0; /* It worked! Now, poke the actual hardware */ - for (i = 0; i < cfg->num_streamids; ++i) - arm_smmu_write_smr(smmu, cfg->smendx[i]); + for_each_cfg_sme(cfg, i, idx) + arm_smmu_write_smr(smmu, idx); return 0; @@ -1131,15 +1133,13 @@ err_free_smrs: static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg) { struct arm_smmu_device *smmu = cfg->smmu; - int i; + int i, idx; /* * We *must* clear the S2CR first, because freeing the SMR means * that it can be re-allocated immediately. */ - for (i = 0; i < cfg->num_streamids; ++i) { - int idx = cfg->smendx[i]; - + for_each_cfg_sme(cfg, i, idx) { /* An IOMMU group is torn down by the first device to be removed */ if (idx == INVALID_SMENDX) return; @@ -1151,9 +1151,9 @@ static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg) __iowmb(); /* Invalidate the SMRs before freeing back to the allocator */ - for (i = 0; i < cfg->num_streamids; ++i) { + for_each_cfg_sme(cfg, i, idx) { if (smmu->smrs) - arm_smmu_free_smr(smmu, cfg->smendx[i]); + arm_smmu_free_smr(smmu, idx); cfg->smendx[i] = INVALID_SMENDX; } @@ -1162,7 +1162,7 @@ static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg) static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, struct arm_smmu_master_cfg *cfg) { - int i, ret = 0; + int i, idx, ret = 0; struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_s2cr *s2cr = smmu->s2crs; enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS; @@ -1182,9 +1182,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) type = S2CR_TYPE_BYPASS; - for (i = 0; i < cfg->num_streamids; ++i) { - int idx = cfg->smendx[i]; - + for_each_cfg_sme(cfg, i, idx) { /* Devices in an IOMMU group may already be configured */ if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) break; -- cgit v1.2.3 From 588888a7399db352d2b1a41c9d5b3bf0fd482390 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:54 +0100 Subject: iommu/arm-smmu: Intelligent SMR allocation Stream Match Registers are one of the more awkward parts of the SMMUv2 architecture; there are typically never enough to assign one to each stream ID in the system, and configuring them such that a single ID matches multiple entries is catastrophically bad - at best, every transaction raises a global fault; at worst, they go *somewhere*. To address the former issue, we can mask ID bits such that a single register may be used to match multiple IDs belonging to the same device or group, but doing so also heightens the risk of the latter problem (which can be nasty to debug). Tackle both problems at once by replacing the simple bitmap allocator with something much cleverer. Now that we have convenient in-memory representations of the stream mapping table, it becomes straightforward to properly validate new SMR entries against the current state, opening the door to arbitrary masking and SMR sharing. Another feature which falls out of this is that with IDs shared by separate devices being automatically accounted for, simply associating a group pointer with the S2CR offers appropriate group allocation almost for free, so hook that up in the process. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 197 ++++++++++++++++++++++++++++------------------- 1 file changed, 119 insertions(+), 78 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index f402f9e126a8..7a2bd60a54da 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -302,6 +302,8 @@ enum arm_smmu_implementation { }; struct arm_smmu_s2cr { + struct iommu_group *group; + int count; enum arm_smmu_s2cr_type type; enum arm_smmu_s2cr_privcfg privcfg; u8 cbndx; @@ -363,6 +365,7 @@ struct arm_smmu_device { u16 smr_mask_mask; struct arm_smmu_smr *smrs; struct arm_smmu_s2cr *s2crs; + struct mutex stream_map_mutex; unsigned long va_size; unsigned long ipa_size; @@ -1041,23 +1044,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) kfree(smmu_domain); } -static int arm_smmu_alloc_smr(struct arm_smmu_device *smmu) -{ - int i; - - for (i = 0; i < smmu->num_mapping_groups; i++) - if (!cmpxchg(&smmu->smrs[i].valid, false, true)) - return i; - - return INVALID_SMENDX; -} - -static void arm_smmu_free_smr(struct arm_smmu_device *smmu, int idx) -{ - writel_relaxed(~SMR_VALID, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); - WRITE_ONCE(smmu->smrs[idx].valid, false); -} - static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) { struct arm_smmu_smr *smr = smmu->smrs + idx; @@ -1085,49 +1071,115 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) arm_smmu_write_smr(smmu, idx); } -static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu, - struct arm_smmu_master_cfg *cfg) +static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) { struct arm_smmu_smr *smrs = smmu->smrs; - int i, idx; - - /* Allocate the SMRs on the SMMU */ - for_each_cfg_sme(cfg, i, idx) { - if (idx != INVALID_SMENDX) - return -EEXIST; + int i, free_idx = -ENOSPC; - /* ...except on stream indexing hardware, of course */ - if (!smrs) { - cfg->smendx[i] = cfg->streamids[i]; + /* Stream indexing is blissfully easy */ + if (!smrs) + return id; + + /* Validating SMRs is... less so */ + for (i = 0; i < smmu->num_mapping_groups; ++i) { + if (!smrs[i].valid) { + /* + * Note the first free entry we come across, which + * we'll claim in the end if nothing else matches. + */ + if (free_idx < 0) + free_idx = i; continue; } + /* + * If the new entry is _entirely_ matched by an existing entry, + * then reuse that, with the guarantee that there also cannot + * be any subsequent conflicting entries. In normal use we'd + * expect simply identical entries for this case, but there's + * no harm in accommodating the generalisation. + */ + if ((mask & smrs[i].mask) == mask && + !((id ^ smrs[i].id) & ~smrs[i].mask)) + return i; + /* + * If the new entry has any other overlap with an existing one, + * though, then there always exists at least one stream ID + * which would cause a conflict, and we can't allow that risk. + */ + if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask))) + return -EINVAL; + } - idx = arm_smmu_alloc_smr(smmu); - if (idx < 0) { - dev_err(smmu->dev, "failed to allocate free SMR\n"); - goto err_free_smrs; + return free_idx; +} + +static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx) +{ + if (--smmu->s2crs[idx].count) + return false; + + smmu->s2crs[idx] = s2cr_init_val; + if (smmu->smrs) + smmu->smrs[idx].valid = false; + + return true; +} + +static int arm_smmu_master_alloc_smes(struct device *dev) +{ + struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; + struct arm_smmu_device *smmu = cfg->smmu; + struct arm_smmu_smr *smrs = smmu->smrs; + struct iommu_group *group; + int i, idx, ret; + + mutex_lock(&smmu->stream_map_mutex); + /* Figure out a viable stream map entry allocation */ + for_each_cfg_sme(cfg, i, idx) { + if (idx != INVALID_SMENDX) { + ret = -EEXIST; + goto out_err; } - cfg->smendx[i] = idx; - smrs[idx].id = cfg->streamids[i]; - smrs[idx].mask = 0; /* We don't currently share SMRs */ + ret = arm_smmu_find_sme(smmu, cfg->streamids[i], 0); + if (ret < 0) + goto out_err; + + idx = ret; + if (smrs && smmu->s2crs[idx].count == 0) { + smrs[idx].id = cfg->streamids[i]; + smrs[idx].mask = 0; /* We don't currently share SMRs */ + smrs[idx].valid = true; + } + smmu->s2crs[idx].count++; + cfg->smendx[i] = (s16)idx; } - if (!smrs) - return 0; + group = iommu_group_get_for_dev(dev); + if (!group) + group = ERR_PTR(-ENOMEM); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto out_err; + } + iommu_group_put(group); /* It worked! Now, poke the actual hardware */ - for_each_cfg_sme(cfg, i, idx) - arm_smmu_write_smr(smmu, idx); + for_each_cfg_sme(cfg, i, idx) { + arm_smmu_write_sme(smmu, idx); + smmu->s2crs[idx].group = group; + } + mutex_unlock(&smmu->stream_map_mutex); return 0; -err_free_smrs: +out_err: while (i--) { - arm_smmu_free_smr(smmu, cfg->smendx[i]); + arm_smmu_free_sme(smmu, cfg->smendx[i]); cfg->smendx[i] = INVALID_SMENDX; } - return -ENOSPC; + mutex_unlock(&smmu->stream_map_mutex); + return ret; } static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg) @@ -1135,43 +1187,23 @@ static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg) struct arm_smmu_device *smmu = cfg->smmu; int i, idx; - /* - * We *must* clear the S2CR first, because freeing the SMR means - * that it can be re-allocated immediately. - */ + mutex_lock(&smmu->stream_map_mutex); for_each_cfg_sme(cfg, i, idx) { - /* An IOMMU group is torn down by the first device to be removed */ - if (idx == INVALID_SMENDX) - return; - - smmu->s2crs[idx] = s2cr_init_val; - arm_smmu_write_s2cr(smmu, idx); - } - /* Sync S2CR updates before touching anything else */ - __iowmb(); - - /* Invalidate the SMRs before freeing back to the allocator */ - for_each_cfg_sme(cfg, i, idx) { - if (smmu->smrs) - arm_smmu_free_smr(smmu, idx); - + if (arm_smmu_free_sme(smmu, idx)) + arm_smmu_write_sme(smmu, idx); cfg->smendx[i] = INVALID_SMENDX; } + mutex_unlock(&smmu->stream_map_mutex); } static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, struct arm_smmu_master_cfg *cfg) { - int i, idx, ret = 0; struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_s2cr *s2cr = smmu->s2crs; enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS; u8 cbndx = smmu_domain->cfg.cbndx; - - if (cfg->smendx[0] == INVALID_SMENDX) - ret = arm_smmu_master_alloc_smes(smmu, cfg); - if (ret) - return ret; + int i, idx; /* * FIXME: This won't be needed once we have IOMMU-backed DMA ops @@ -1183,9 +1215,8 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, type = S2CR_TYPE_BYPASS; for_each_cfg_sme(cfg, i, idx) { - /* Devices in an IOMMU group may already be configured */ if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) - break; + continue; s2cr[idx].type = type; s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV; @@ -1345,7 +1376,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) static int arm_smmu_add_device(struct device *dev) { struct arm_smmu_master_cfg *cfg; - struct iommu_group *group; int i, ret; ret = arm_smmu_register_legacy_master(dev); @@ -1365,13 +1395,9 @@ static int arm_smmu_add_device(struct device *dev) cfg->smendx[i] = INVALID_SMENDX; } - group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) { - ret = PTR_ERR(group); - goto out_free; - } - iommu_group_put(group); - return 0; + ret = arm_smmu_master_alloc_smes(dev); + if (!ret) + return ret; out_free: kfree(cfg); @@ -1394,7 +1420,21 @@ static void arm_smmu_remove_device(struct device *dev) static struct iommu_group *arm_smmu_device_group(struct device *dev) { - struct iommu_group *group; + struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; + struct arm_smmu_device *smmu = cfg->smmu; + struct iommu_group *group = NULL; + int i, idx; + + for_each_cfg_sme(cfg, i, idx) { + if (group && smmu->s2crs[idx].group && + group != smmu->s2crs[idx].group) + return ERR_PTR(-EINVAL); + + group = smmu->s2crs[idx].group; + } + + if (group) + return group; if (dev_is_pci(dev)) group = pci_device_group(dev); @@ -1677,6 +1717,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smmu->s2crs[i] = s2cr_init_val; smmu->num_mapping_groups = size; + mutex_init(&smmu->stream_map_mutex); if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; -- cgit v1.2.3 From adfec2e709d2a48dbd756d65fe4fa8e4aae529a3 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:55 +0100 Subject: iommu/arm-smmu: Convert to iommu_fwspec In the final step of preparation for full generic configuration support, swap our fixed-size master_cfg for the generic iommu_fwspec. For the legacy DT bindings, the driver simply gets to act as its own 'firmware'. Farewell, arbitrary MAX_MASTER_STREAMIDS! Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 140 ++++++++++++++++++++++++++--------------------- 1 file changed, 78 insertions(+), 62 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 7a2bd60a54da..9dbb6a37e625 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -51,9 +52,6 @@ #include "io-pgtable.h" -/* Maximum number of stream IDs assigned to a single device */ -#define MAX_MASTER_STREAMIDS 128 - /* Maximum number of context banks per SMMU */ #define ARM_SMMU_MAX_CBS 128 @@ -321,13 +319,13 @@ struct arm_smmu_smr { struct arm_smmu_master_cfg { struct arm_smmu_device *smmu; - int num_streamids; - u16 streamids[MAX_MASTER_STREAMIDS]; - s16 smendx[MAX_MASTER_STREAMIDS]; + s16 smendx[]; }; #define INVALID_SMENDX -1 -#define for_each_cfg_sme(cfg, i, idx) \ - for (i = 0; idx = cfg->smendx[i], i < cfg->num_streamids; ++i) +#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv) +#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu) +#define for_each_cfg_sme(fw, i, idx) \ + for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i) struct arm_smmu_device { struct device *dev; @@ -480,14 +478,16 @@ static int __find_legacy_master_phandle(struct device *dev, void *data) } static struct platform_driver arm_smmu_driver; +static struct iommu_ops arm_smmu_ops; -static int arm_smmu_register_legacy_master(struct device *dev) +static int arm_smmu_register_legacy_master(struct device *dev, + struct arm_smmu_device **smmu) { - struct arm_smmu_device *smmu; - struct arm_smmu_master_cfg *cfg; + struct device *smmu_dev; struct device_node *np; struct of_phandle_iterator it; void *data = ⁢ + u32 *sids; __be32 pci_sid; int err; @@ -500,20 +500,13 @@ static int arm_smmu_register_legacy_master(struct device *dev) it.node = np; err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data, __find_legacy_master_phandle); + smmu_dev = data; of_node_put(np); if (err == 0) return -ENODEV; if (err < 0) return err; - smmu = dev_get_drvdata(data); - - if (it.cur_count > MAX_MASTER_STREAMIDS) { - dev_err(smmu->dev, - "reached maximum number (%d) of stream IDs for master device %s\n", - MAX_MASTER_STREAMIDS, dev_name(dev)); - return -ENOSPC; - } if (dev_is_pci(dev)) { /* "mmu-masters" assumes Stream ID == Requester ID */ pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid, @@ -522,17 +515,20 @@ static int arm_smmu_register_legacy_master(struct device *dev) it.cur_count = 1; } - cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); - if (!cfg) - return -ENOMEM; - - cfg->smmu = smmu; - dev->archdata.iommu = cfg; + err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode, + &arm_smmu_ops); + if (err) + return err; - while (it.cur_count--) - cfg->streamids[cfg->num_streamids++] = be32_to_cpup(it.cur++); + sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL); + if (!sids) + return -ENOMEM; - return 0; + *smmu = dev_get_drvdata(smmu_dev); + of_phandle_iterator_args(&it, sids, it.cur_count); + err = iommu_fwspec_add_ids(dev, sids, it.cur_count); + kfree(sids); + return err; } static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) @@ -1127,7 +1123,8 @@ static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx) static int arm_smmu_master_alloc_smes(struct device *dev) { - struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv; struct arm_smmu_device *smmu = cfg->smmu; struct arm_smmu_smr *smrs = smmu->smrs; struct iommu_group *group; @@ -1135,19 +1132,19 @@ static int arm_smmu_master_alloc_smes(struct device *dev) mutex_lock(&smmu->stream_map_mutex); /* Figure out a viable stream map entry allocation */ - for_each_cfg_sme(cfg, i, idx) { + for_each_cfg_sme(fwspec, i, idx) { if (idx != INVALID_SMENDX) { ret = -EEXIST; goto out_err; } - ret = arm_smmu_find_sme(smmu, cfg->streamids[i], 0); + ret = arm_smmu_find_sme(smmu, fwspec->ids[i], 0); if (ret < 0) goto out_err; idx = ret; if (smrs && smmu->s2crs[idx].count == 0) { - smrs[idx].id = cfg->streamids[i]; + smrs[idx].id = fwspec->ids[i]; smrs[idx].mask = 0; /* We don't currently share SMRs */ smrs[idx].valid = true; } @@ -1165,7 +1162,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev) iommu_group_put(group); /* It worked! Now, poke the actual hardware */ - for_each_cfg_sme(cfg, i, idx) { + for_each_cfg_sme(fwspec, i, idx) { arm_smmu_write_sme(smmu, idx); smmu->s2crs[idx].group = group; } @@ -1182,13 +1179,14 @@ out_err: return ret; } -static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg) +static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec) { - struct arm_smmu_device *smmu = cfg->smmu; + struct arm_smmu_device *smmu = fwspec_smmu(fwspec); + struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv; int i, idx; mutex_lock(&smmu->stream_map_mutex); - for_each_cfg_sme(cfg, i, idx) { + for_each_cfg_sme(fwspec, i, idx) { if (arm_smmu_free_sme(smmu, idx)) arm_smmu_write_sme(smmu, idx); cfg->smendx[i] = INVALID_SMENDX; @@ -1197,7 +1195,7 @@ static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg) } static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, - struct arm_smmu_master_cfg *cfg) + struct iommu_fwspec *fwspec) { struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_s2cr *s2cr = smmu->s2crs; @@ -1214,7 +1212,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) type = S2CR_TYPE_BYPASS; - for_each_cfg_sme(cfg, i, idx) { + for_each_cfg_sme(fwspec, i, idx) { if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) continue; @@ -1229,16 +1227,18 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { int ret; + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + struct arm_smmu_device *smmu; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; - if (!cfg) { + if (!fwspec || fwspec->ops != &arm_smmu_ops) { dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); return -ENXIO; } + smmu = fwspec_smmu(fwspec); /* Ensure that the domain is finalised */ - ret = arm_smmu_init_domain_context(domain, cfg->smmu); + ret = arm_smmu_init_domain_context(domain, smmu); if (ret < 0) return ret; @@ -1246,15 +1246,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) * Sanity check the domain. We don't support domains across * different SMMUs. */ - if (smmu_domain->smmu != cfg->smmu) { + if (smmu_domain->smmu != smmu) { dev_err(dev, "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", - dev_name(smmu_domain->smmu->dev), dev_name(cfg->smmu->dev)); + dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); return -EINVAL; } /* Looks ok, so add the device to the domain */ - return arm_smmu_domain_add_master(smmu_domain, cfg); + return arm_smmu_domain_add_master(smmu_domain, fwspec); } static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, @@ -1375,57 +1375,72 @@ static bool arm_smmu_capable(enum iommu_cap cap) static int arm_smmu_add_device(struct device *dev) { + struct arm_smmu_device *smmu; struct arm_smmu_master_cfg *cfg; + struct iommu_fwspec *fwspec; int i, ret; - ret = arm_smmu_register_legacy_master(dev); - cfg = dev->archdata.iommu; + ret = arm_smmu_register_legacy_master(dev, &smmu); + fwspec = dev->iommu_fwspec; if (ret) goto out_free; ret = -EINVAL; - for (i = 0; i < cfg->num_streamids; i++) { - u16 sid = cfg->streamids[i]; + for (i = 0; i < fwspec->num_ids; i++) { + u16 sid = fwspec->ids[i]; - if (sid & ~cfg->smmu->streamid_mask) { + if (sid & ~smmu->streamid_mask) { dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", sid, cfg->smmu->streamid_mask); goto out_free; } - cfg->smendx[i] = INVALID_SMENDX; } + ret = -ENOMEM; + cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]), + GFP_KERNEL); + if (!cfg) + goto out_free; + + cfg->smmu = smmu; + fwspec->iommu_priv = cfg; + while (i--) + cfg->smendx[i] = INVALID_SMENDX; + ret = arm_smmu_master_alloc_smes(dev); - if (!ret) - return ret; + if (ret) + goto out_free; + + return 0; out_free: - kfree(cfg); - dev->archdata.iommu = NULL; + if (fwspec) + kfree(fwspec->iommu_priv); + iommu_fwspec_free(dev); return ret; } static void arm_smmu_remove_device(struct device *dev) { - struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; + struct iommu_fwspec *fwspec = dev->iommu_fwspec; - if (!cfg) + if (!fwspec || fwspec->ops != &arm_smmu_ops) return; - arm_smmu_master_free_smes(cfg); + arm_smmu_master_free_smes(fwspec); iommu_group_remove_device(dev); - kfree(cfg); - dev->archdata.iommu = NULL; + kfree(fwspec->iommu_priv); + iommu_fwspec_free(dev); } static struct iommu_group *arm_smmu_device_group(struct device *dev) { - struct arm_smmu_master_cfg *cfg = dev->archdata.iommu; - struct arm_smmu_device *smmu = cfg->smmu; + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + struct arm_smmu_device *smmu = fwspec_smmu(fwspec); struct iommu_group *group = NULL; int i, idx; - for_each_cfg_sme(cfg, i, idx) { + for_each_cfg_sme(fwspec, i, idx) { if (group && smmu->s2crs[idx].group && group != smmu->s2crs[idx].group) return ERR_PTR(-EINVAL); @@ -1936,6 +1951,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) } } + of_iommu_set_ops(dev->of_node, &arm_smmu_ops); platform_set_drvdata(pdev, smmu); arm_smmu_device_reset(smmu); return 0; -- cgit v1.2.3 From 021bb8420d44cf56102d44fca9af628625e75482 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 14 Sep 2016 15:26:46 +0100 Subject: iommu/arm-smmu: Wire up generic configuration support With everything else now in place, fill in an of_xlate callback and the appropriate registration to plumb into the generic configuration machinery, and watch everything just work. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 168 ++++++++++++++++++++++++++++++----------------- 1 file changed, 108 insertions(+), 60 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 9dbb6a37e625..fd6cc19c4ced 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -418,6 +418,8 @@ struct arm_smmu_option_prop { static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0); +static bool using_legacy_binding, using_generic_binding; + static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, { 0, NULL}, @@ -817,12 +819,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, if (smmu_domain->smmu) goto out_unlock; - /* We're bypassing these SIDs, so don't allocate an actual context */ - if (domain->type == IOMMU_DOMAIN_DMA) { - smmu_domain->smmu = smmu; - goto out_unlock; - } - /* * Mapping the requested stage onto what we support is surprisingly * complicated, mainly because the spec allows S1+S2 SMMUs without @@ -981,7 +977,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) void __iomem *cb_base; int irq; - if (!smmu || domain->type == IOMMU_DOMAIN_DMA) + if (!smmu) return; /* @@ -1015,8 +1011,8 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) if (!smmu_domain) return NULL; - if (type == IOMMU_DOMAIN_DMA && - iommu_get_dma_cookie(&smmu_domain->domain)) { + if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding || + iommu_get_dma_cookie(&smmu_domain->domain))) { kfree(smmu_domain); return NULL; } @@ -1133,19 +1129,22 @@ static int arm_smmu_master_alloc_smes(struct device *dev) mutex_lock(&smmu->stream_map_mutex); /* Figure out a viable stream map entry allocation */ for_each_cfg_sme(fwspec, i, idx) { + u16 sid = fwspec->ids[i]; + u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT; + if (idx != INVALID_SMENDX) { ret = -EEXIST; goto out_err; } - ret = arm_smmu_find_sme(smmu, fwspec->ids[i], 0); + ret = arm_smmu_find_sme(smmu, sid, mask); if (ret < 0) goto out_err; idx = ret; if (smrs && smmu->s2crs[idx].count == 0) { - smrs[idx].id = fwspec->ids[i]; - smrs[idx].mask = 0; /* We don't currently share SMRs */ + smrs[idx].id = sid; + smrs[idx].mask = mask; smrs[idx].valid = true; } smmu->s2crs[idx].count++; @@ -1203,15 +1202,6 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, u8 cbndx = smmu_domain->cfg.cbndx; int i, idx; - /* - * FIXME: This won't be needed once we have IOMMU-backed DMA ops - * for all devices behind the SMMU. Note that we need to take - * care configuring SMRs for devices both a platform_device and - * and a PCI device (i.e. a PCI host controller) - */ - if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) - type = S2CR_TYPE_BYPASS; - for_each_cfg_sme(fwspec, i, idx) { if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) continue; @@ -1373,25 +1363,50 @@ static bool arm_smmu_capable(enum iommu_cap cap) } } +static int arm_smmu_match_node(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np) +{ + struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL, + np, arm_smmu_match_node); + put_device(dev); + return dev ? dev_get_drvdata(dev) : NULL; +} + static int arm_smmu_add_device(struct device *dev) { struct arm_smmu_device *smmu; struct arm_smmu_master_cfg *cfg; - struct iommu_fwspec *fwspec; + struct iommu_fwspec *fwspec = dev->iommu_fwspec; int i, ret; - ret = arm_smmu_register_legacy_master(dev, &smmu); - fwspec = dev->iommu_fwspec; - if (ret) - goto out_free; + if (using_legacy_binding) { + ret = arm_smmu_register_legacy_master(dev, &smmu); + fwspec = dev->iommu_fwspec; + if (ret) + goto out_free; + } else if (fwspec) { + smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode)); + } else { + return -ENODEV; + } ret = -EINVAL; for (i = 0; i < fwspec->num_ids; i++) { u16 sid = fwspec->ids[i]; + u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT; if (sid & ~smmu->streamid_mask) { dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", - sid, cfg->smmu->streamid_mask); + sid, smmu->streamid_mask); + goto out_free; + } + if (mask & ~smmu->smr_mask_mask) { + dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n", + sid, smmu->smr_mask_mask); goto out_free; } } @@ -1503,6 +1518,19 @@ out_unlock: return ret; } +static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) +{ + u32 fwid = 0; + + if (args->args_count > 0) + fwid |= (u16)args->args[0]; + + if (args->args_count > 1) + fwid |= (u16)args->args[1] << SMR_MASK_SHIFT; + + return iommu_fwspec_add_ids(dev, &fwid, 1); +} + static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, @@ -1517,6 +1545,7 @@ static struct iommu_ops arm_smmu_ops = { .device_group = arm_smmu_device_group, .domain_get_attr = arm_smmu_domain_get_attr, .domain_set_attr = arm_smmu_domain_set_attr, + .of_xlate = arm_smmu_of_xlate, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; @@ -1870,6 +1899,19 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) struct arm_smmu_device *smmu; struct device *dev = &pdev->dev; int num_irqs, i, err; + bool legacy_binding; + + legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL); + if (legacy_binding && !using_generic_binding) { + if (!using_legacy_binding) + pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n"); + using_legacy_binding = true; + } else if (!legacy_binding && !using_legacy_binding) { + using_generic_binding = true; + } else { + dev_err(dev, "not probing due to mismatched DT properties\n"); + return -ENODEV; + } smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); if (!smmu) { @@ -1954,6 +1996,20 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) of_iommu_set_ops(dev->of_node, &arm_smmu_ops); platform_set_drvdata(pdev, smmu); arm_smmu_device_reset(smmu); + + /* Oh, for a proper bus abstraction */ + if (!iommu_present(&platform_bus_type)) + bus_set_iommu(&platform_bus_type, &arm_smmu_ops); +#ifdef CONFIG_ARM_AMBA + if (!iommu_present(&amba_bustype)) + bus_set_iommu(&amba_bustype, &arm_smmu_ops); +#endif +#ifdef CONFIG_PCI + if (!iommu_present(&pci_bus_type)) { + pci_request_acs(); + bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + } +#endif return 0; } @@ -1983,41 +2039,14 @@ static struct platform_driver arm_smmu_driver = { static int __init arm_smmu_init(void) { - struct device_node *np; - int ret; - - /* - * Play nice with systems that don't have an ARM SMMU by checking that - * an ARM SMMU exists in the system before proceeding with the driver - * and IOMMU bus operation registration. - */ - np = of_find_matching_node(NULL, arm_smmu_of_match); - if (!np) - return 0; - - of_node_put(np); - - ret = platform_driver_register(&arm_smmu_driver); - if (ret) - return ret; - - /* Oh, for a proper bus abstraction */ - if (!iommu_present(&platform_bus_type)) - bus_set_iommu(&platform_bus_type, &arm_smmu_ops); - -#ifdef CONFIG_ARM_AMBA - if (!iommu_present(&amba_bustype)) - bus_set_iommu(&amba_bustype, &arm_smmu_ops); -#endif + static bool registered; + int ret = 0; -#ifdef CONFIG_PCI - if (!iommu_present(&pci_bus_type)) { - pci_request_acs(); - bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + if (!registered) { + ret = platform_driver_register(&arm_smmu_driver); + registered = !ret; } -#endif - - return 0; + return ret; } static void __exit arm_smmu_exit(void) @@ -2028,6 +2057,25 @@ static void __exit arm_smmu_exit(void) subsys_initcall(arm_smmu_init); module_exit(arm_smmu_exit); +static int __init arm_smmu_of_init(struct device_node *np) +{ + int ret = arm_smmu_init(); + + if (ret) + return ret; + + if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) + return -ENODEV; + + return 0; +} +IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init); +IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init); +IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init); +IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init); +IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init); +IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init); + MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); MODULE_AUTHOR("Will Deacon "); MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 455eb7d34ad11b09490f70c33973f9f3e31c4df6 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:58 +0100 Subject: iommu/arm-smmu: Set domain geometry For non-aperture-based IOMMUs, the domain geometry seems to have become the de-facto way of indicating the input address space size. That is quite a useful thing from the users' perspective, so let's do the same. Reviewed-by: Eric Auger Tested-by: Lorenzo Pieralisi Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 2 ++ drivers/iommu/arm-smmu.c | 2 ++ 2 files changed, 4 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 0c45c1e02e04..15c01c3cd540 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1551,6 +1551,8 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) return -ENOMEM; domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; + domain->geometry.aperture_end = (1UL << ias) - 1; + domain->geometry.force_aperture = true; smmu_domain->pgtbl_ops = pgtbl_ops; ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index fd6cc19c4ced..c841eb7a1a74 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -939,6 +939,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, /* Update the domain's page sizes to reflect the page table format */ domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; + domain->geometry.aperture_end = (1UL << ias) - 1; + domain->geometry.force_aperture = true; /* Initialise the context bank with our page table cfg */ arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); -- cgit v1.2.3 From 44bb7e243bd4b4e5c79de2452cd9762582f58925 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:13:59 +0100 Subject: iommu/dma: Add support for mapping MSIs When an MSI doorbell is located downstream of an IOMMU, attaching devices to a DMA ops domain and switching on translation leads to a rude shock when their attempt to write to the physical address returned by the irqchip driver faults (or worse, writes into some already-mapped buffer) and no interrupt is forthcoming. Address this by adding a hook for relevant irqchip drivers to call from their compose_msi_msg() callback, to swizzle the physical address with an appropriatly-mapped IOVA for any device attached to one of our DMA ops domains. Acked-by: Thomas Gleixner Acked-by: Marc Zyngier Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/dma-iommu.c | 136 ++++++++++++++++++++++++++++++++++----- drivers/irqchip/irq-gic-v2m.c | 3 + drivers/irqchip/irq-gic-v3-its.c | 3 + include/linux/dma-iommu.h | 9 +++ 4 files changed, 136 insertions(+), 15 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 00c8a08d56e7..4329d18080cf 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -25,10 +25,28 @@ #include #include #include +#include #include #include #include +struct iommu_dma_msi_page { + struct list_head list; + dma_addr_t iova; + phys_addr_t phys; +}; + +struct iommu_dma_cookie { + struct iova_domain iovad; + struct list_head msi_page_list; + spinlock_t msi_lock; +}; + +static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) +{ + return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; +} + int iommu_dma_init(void) { return iova_cache_get(); @@ -43,15 +61,19 @@ int iommu_dma_init(void) */ int iommu_get_dma_cookie(struct iommu_domain *domain) { - struct iova_domain *iovad; + struct iommu_dma_cookie *cookie; if (domain->iova_cookie) return -EEXIST; - iovad = kzalloc(sizeof(*iovad), GFP_KERNEL); - domain->iova_cookie = iovad; + cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); + if (!cookie) + return -ENOMEM; - return iovad ? 0 : -ENOMEM; + spin_lock_init(&cookie->msi_lock); + INIT_LIST_HEAD(&cookie->msi_page_list); + domain->iova_cookie = cookie; + return 0; } EXPORT_SYMBOL(iommu_get_dma_cookie); @@ -63,14 +85,20 @@ EXPORT_SYMBOL(iommu_get_dma_cookie); */ void iommu_put_dma_cookie(struct iommu_domain *domain) { - struct iova_domain *iovad = domain->iova_cookie; + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iommu_dma_msi_page *msi, *tmp; - if (!iovad) + if (!cookie) return; - if (iovad->granule) - put_iova_domain(iovad); - kfree(iovad); + if (cookie->iovad.granule) + put_iova_domain(&cookie->iovad); + + list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { + list_del(&msi->list); + kfree(msi); + } + kfree(cookie); domain->iova_cookie = NULL; } EXPORT_SYMBOL(iommu_put_dma_cookie); @@ -88,7 +116,7 @@ EXPORT_SYMBOL(iommu_put_dma_cookie); */ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size) { - struct iova_domain *iovad = domain->iova_cookie; + struct iova_domain *iovad = cookie_iovad(domain); unsigned long order, base_pfn, end_pfn; if (!iovad) @@ -155,7 +183,7 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, dma_addr_t dma_limit) { - struct iova_domain *iovad = domain->iova_cookie; + struct iova_domain *iovad = cookie_iovad(domain); unsigned long shift = iova_shift(iovad); unsigned long length = iova_align(iovad, size) >> shift; @@ -171,7 +199,7 @@ static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, /* The IOVA allocator knows what we mapped, so just unmap whatever that was */ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr) { - struct iova_domain *iovad = domain->iova_cookie; + struct iova_domain *iovad = cookie_iovad(domain); unsigned long shift = iova_shift(iovad); unsigned long pfn = dma_addr >> shift; struct iova *iova = find_iova(iovad, pfn); @@ -294,7 +322,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, void (*flush_page)(struct device *, const void *, phys_addr_t)) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iova_domain *iovad = domain->iova_cookie; + struct iova_domain *iovad = cookie_iovad(domain); struct iova *iova; struct page **pages; struct sg_table sgt; @@ -386,7 +414,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, { dma_addr_t dma_addr; struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iova_domain *iovad = domain->iova_cookie; + struct iova_domain *iovad = cookie_iovad(domain); phys_addr_t phys = page_to_phys(page) + offset; size_t iova_off = iova_offset(iovad, phys); size_t len = iova_align(iovad, size + iova_off); @@ -495,7 +523,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int prot) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iova_domain *iovad = domain->iova_cookie; + struct iova_domain *iovad = cookie_iovad(domain); struct iova *iova; struct scatterlist *s, *prev = NULL; dma_addr_t dma_addr; @@ -587,3 +615,81 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dma_addr == DMA_ERROR_CODE; } + +static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, + phys_addr_t msi_addr, struct iommu_domain *domain) +{ + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iommu_dma_msi_page *msi_page; + struct iova_domain *iovad = &cookie->iovad; + struct iova *iova; + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; + + msi_addr &= ~(phys_addr_t)iova_mask(iovad); + list_for_each_entry(msi_page, &cookie->msi_page_list, list) + if (msi_page->phys == msi_addr) + return msi_page; + + msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); + if (!msi_page) + return NULL; + + iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); + if (!iova) + goto out_free_page; + + msi_page->phys = msi_addr; + msi_page->iova = iova_dma_addr(iovad, iova); + if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) + goto out_free_iova; + + INIT_LIST_HEAD(&msi_page->list); + list_add(&msi_page->list, &cookie->msi_page_list); + return msi_page; + +out_free_iova: + __free_iova(iovad, iova); +out_free_page: + kfree(msi_page); + return NULL; +} + +void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) +{ + struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + struct iommu_dma_cookie *cookie; + struct iommu_dma_msi_page *msi_page; + phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; + unsigned long flags; + + if (!domain || !domain->iova_cookie) + return; + + cookie = domain->iova_cookie; + + /* + * We disable IRQs to rule out a possible inversion against + * irq_desc_lock if, say, someone tries to retarget the affinity + * of an MSI from within an IPI handler. + */ + spin_lock_irqsave(&cookie->msi_lock, flags); + msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); + spin_unlock_irqrestore(&cookie->msi_lock, flags); + + if (WARN_ON(!msi_page)) { + /* + * We're called from a void callback, so the best we can do is + * 'fail' by filling the message with obviously bogus values. + * Since we got this far due to an IOMMU being present, it's + * not like the existing address would have worked anyway... + */ + msg->address_hi = ~0U; + msg->address_lo = ~0U; + msg->data = ~0U; + } else { + msg->address_hi = upper_32_bits(msi_page->iova); + msg->address_lo &= iova_mask(&cookie->iovad); + msg->address_lo += lower_32_bits(msi_page->iova); + } +} diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 35eb7ac5d21f..863e073c6f7f 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -16,6 +16,7 @@ #define pr_fmt(fmt) "GICv2m: " fmt #include +#include #include #include #include @@ -108,6 +109,8 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET) msg->data -= v2m->spi_offset; + + iommu_dma_map_msi_msg(data->irq, msg); } static struct irq_chip gicv2m_irq_chip = { diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 36b9c28a5c91..98ff669d5962 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -655,6 +656,8 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) msg->address_lo = addr & ((1UL << 32) - 1); msg->address_hi = addr >> 32; msg->data = its_get_event_id(d); + + iommu_dma_map_msi_msg(d->irq, msg); } static struct irq_chip its_irq_chip = { diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 81c5c8d167ad..5ee806e41b5c 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -21,6 +21,7 @@ #ifdef CONFIG_IOMMU_DMA #include +#include int iommu_dma_init(void); @@ -62,9 +63,13 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int iommu_dma_supported(struct device *dev, u64 mask); int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); +/* The DMA API isn't _quite_ the whole story, though... */ +void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); + #else struct iommu_domain; +struct msi_msg; static inline int iommu_dma_init(void) { @@ -80,6 +85,10 @@ static inline void iommu_put_dma_cookie(struct iommu_domain *domain) { } +static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) +{ +} + #endif /* CONFIG_IOMMU_DMA */ #endif /* __KERNEL__ */ #endif /* __DMA_IOMMU_H */ -- cgit v1.2.3 From fade1ec055dc6b6373e7487906b7899b41d0c46f Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 12 Sep 2016 17:14:00 +0100 Subject: iommu/dma: Avoid PCI host bridge windows With our DMA ops enabled for PCI devices, we should avoid allocating IOVAs which a host bridge might misinterpret as peer-to-peer DMA and lead to faults, corruption or other badness. To be safe, punch out holes for all of the relevant host bridge's windows when initialising a DMA domain for a PCI device. CC: Marek Szyprowski CC: Inki Dae Reported-by: Lorenzo Pieralisi Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/mm/dma-mapping.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_iommu.h | 2 +- drivers/iommu/dma-iommu.c | 25 ++++++++++++++++++++++++- include/linux/dma-iommu.h | 3 ++- 4 files changed, 28 insertions(+), 4 deletions(-) (limited to 'drivers/iommu') diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index c4284c432ae8..610d8e53011e 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -827,7 +827,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, * then the IOMMU core will have already configured a group for this * device, and allocated the default domain for that group. */ - if (!domain || iommu_dma_init_domain(domain, dma_base, size)) { + if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) { pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", dev_name(dev)); return false; diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h index c8de4913fdbe..87f6b5672e11 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h @@ -66,7 +66,7 @@ static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv, if (ret) goto free_domain; - ret = iommu_dma_init_domain(domain, start, size); + ret = iommu_dma_init_domain(domain, start, size, NULL); if (ret) goto put_cookie; diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 4329d18080cf..c5ab8667e6f2 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -103,18 +104,38 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) } EXPORT_SYMBOL(iommu_put_dma_cookie); +static void iova_reserve_pci_windows(struct pci_dev *dev, + struct iova_domain *iovad) +{ + struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); + struct resource_entry *window; + unsigned long lo, hi; + + resource_list_for_each_entry(window, &bridge->windows) { + if (resource_type(window->res) != IORESOURCE_MEM && + resource_type(window->res) != IORESOURCE_IO) + continue; + + lo = iova_pfn(iovad, window->res->start - window->offset); + hi = iova_pfn(iovad, window->res->end - window->offset); + reserve_iova(iovad, lo, hi); + } +} + /** * iommu_dma_init_domain - Initialise a DMA mapping domain * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() * @base: IOVA at which the mappable address space starts * @size: Size of IOVA space + * @dev: Device the domain is being initialised for * * @base and @size should be exact multiples of IOMMU page granularity to * avoid rounding surprises. If necessary, we reserve the page at address 0 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but * any change which could make prior IOVAs invalid will fail. */ -int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size) +int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, + u64 size, struct device *dev) { struct iova_domain *iovad = cookie_iovad(domain); unsigned long order, base_pfn, end_pfn; @@ -152,6 +173,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size iovad->dma_32bit_pfn = end_pfn; } else { init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); + if (dev && dev_is_pci(dev)) + iova_reserve_pci_windows(to_pci_dev(dev), iovad); } return 0; } diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 5ee806e41b5c..32c589062bd9 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -30,7 +30,8 @@ int iommu_get_dma_cookie(struct iommu_domain *domain); void iommu_put_dma_cookie(struct iommu_domain *domain); /* Setup call for arch DMA mapping code */ -int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size); +int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, + u64 size, struct device *dev); /* General helpers for DMA-API <-> IOMMU-API interaction */ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); -- cgit v1.2.3 From 82db33dc5e49fb625262d81125625d07a0d6184e Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 13 Sep 2016 18:02:02 +0100 Subject: iommu/io-pgtable-arm: Check for v7s-incapable systems On machines with no 32-bit addressable RAM whatsoever, we shouldn't even touch the v7s format as it's never going to work. Fixes: e5fc9753b1a8 ("iommu/io-pgtable: Add ARMv7 short descriptor support") Reported-by: Eric Auger Tested-by: Eric Auger Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index def8ca1c982d..f50e51c1a9c8 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -633,6 +633,10 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, { struct arm_v7s_io_pgtable *data; +#ifdef PHYS_OFFSET + if (upper_32_bits(PHYS_OFFSET)) + return NULL; +#endif if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS) return NULL; -- cgit v1.2.3 From 134414fffc7fdad81e228d232f8ae12a0456127a Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Thu, 15 Sep 2016 16:50:50 +0800 Subject: iommu/amd: Clean up the cmpxchg64 invocation Change it as it's designed for and keep it consistent with other places. Signed-off-by: Baoquan He Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index bebe728019af..897940c79a54 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1342,7 +1342,8 @@ static u64 *alloc_pte(struct protection_domain *domain, __npte = PM_LEVEL_PDE(level, virt_to_phys(page)); - if (cmpxchg64(pte, __pte, __npte)) { + /* pte could have been changed somewhere. */ + if (cmpxchg64(pte, __pte, __npte) != __pte) { free_page((unsigned long)page); continue; } -- cgit v1.2.3 From 5c87f62dd1ea9dab8101a020fa2c6ab47c37797e Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Thu, 15 Sep 2016 16:50:51 +0800 Subject: iommu/amd: Use standard bitmap operation to set bitmap It will be more readable and safer than the old setting. Signed-off-by: Baoquan He Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_init.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index cd1713631a4a..157e93421fb8 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -2285,7 +2286,7 @@ static int __init early_amd_iommu_init(void) * never allocate domain 0 because its used as the non-allocated and * error value placeholder */ - amd_iommu_pd_alloc_bitmap[0] = 1; + __set_bit(0, amd_iommu_pd_alloc_bitmap); spin_lock_init(&amd_iommu_pd_lock); -- cgit v1.2.3 From c3db901c54466a9c135d1e6e95fec452e8a42666 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Thu, 15 Sep 2016 16:50:52 +0800 Subject: iommu/amd: Free domain id when free a domain of struct dma_ops_domain The current code missed freeing domain id when free a domain of struct dma_ops_domain. Signed-off-by: Baoquan He Fixes: ec487d1a110a ('x86, AMD IOMMU: add domain allocation and deallocation functions') Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 897940c79a54..b5b117b44026 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1723,6 +1723,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) free_pagetable(&dom->domain); + if (dom->domain.id) + domain_id_free(dom->domain.id); + kfree(dom); } -- cgit v1.2.3 From 09284b9c4a80299b25b913c976dcaced09cc12f4 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Tue, 20 Sep 2016 09:05:34 +0800 Subject: iommu/amd: No need to wait iommu completion if no dte irq entry change This is a clean up. In get_irq_table() only if DTE entry is changed iommu_completion_wait() need be called. Otherwise no need to do it. Signed-off-by: Baoquan He Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index b5b117b44026..a2479d0df292 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3634,7 +3634,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) table = irq_lookup_table[devid]; if (table) - goto out; + goto out_unlock; alias = amd_iommu_alias_table[devid]; table = irq_lookup_table[alias]; @@ -3648,7 +3648,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) /* Nothing there yet, allocate new irq remapping table */ table = kzalloc(sizeof(*table), GFP_ATOMIC); if (!table) - goto out; + goto out_unlock; /* Initialize table spin-lock */ spin_lock_init(&table->lock); @@ -3661,7 +3661,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) if (!table->table) { kfree(table); table = NULL; - goto out; + goto out_unlock; } if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) -- cgit v1.2.3