diff options
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/Kconfig | 4 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 1 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 82 | ||||
-rw-r--r-- | drivers/iommu/dmar.c | 2 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 12 | ||||
-rw-r--r-- | drivers/iommu/intel_irq_remapping.c | 13 | ||||
-rw-r--r-- | drivers/iommu/iommu-traces.c | 27 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 21 |
8 files changed, 106 insertions, 56 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index fe302e33f72e..9fd51e51e78b 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -52,7 +52,7 @@ config AMD_IOMMU select PCI_PRI select PCI_PASID select IOMMU_API - depends on X86_64 && PCI && ACPI && X86_IO_APIC + depends on X86_64 && PCI && ACPI ---help--- With this option you can enable support for AMD IOMMU hardware in your system. An IOMMU is a hardware component which provides @@ -206,7 +206,7 @@ config SHMOBILE_IPMMU_TLB config SHMOBILE_IOMMU bool "IOMMU for Renesas IPMMU/IPMMUI" default n - depends on (ARM && ARCH_SHMOBILE) + depends on ARM || COMPILE_TEST select IOMMU_API select ARM_DMA_USE_IOMMU select SHMOBILE_IPMMU diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 14c1f474cf11..5d58bf16e9e3 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_IOMMU_API) += iommu.o +obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index f417e89e1e7e..0f45a489ccf9 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -377,6 +377,7 @@ struct arm_smmu_cfg { u32 cbar; pgd_t *pgd; }; +#define INVALID_IRPTNDX 0xff #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) @@ -589,6 +590,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) ret = IRQ_HANDLED; resume = RESUME_RETRY; } else { + dev_err_ratelimited(smmu->dev, + "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", + iova, fsynr, root_cfg->cbndx); ret = IRQ_NONE; resume = RESUME_TERMINATE; } @@ -777,7 +781,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) #ifdef __BIG_ENDIAN reg |= SCTLR_E; #endif - writel(reg, cb_base + ARM_SMMU_CB_SCTLR); + writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR); } static int arm_smmu_init_domain_context(struct iommu_domain *domain, @@ -840,7 +844,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, if (IS_ERR_VALUE(ret)) { dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", root_cfg->irptndx, irq); - root_cfg->irptndx = -1; + root_cfg->irptndx = INVALID_IRPTNDX; goto out_free_context; } @@ -869,7 +873,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); arm_smmu_tlb_inv_context(root_cfg); - if (root_cfg->irptndx != -1) { + if (root_cfg->irptndx != INVALID_IRPTNDX) { irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; free_irq(irq, domain); } @@ -1558,9 +1562,13 @@ static struct iommu_ops arm_smmu_ops = { static void arm_smmu_device_reset(struct arm_smmu_device *smmu) { void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - void __iomem *sctlr_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB_SCTLR; + void __iomem *cb_base; int i = 0; - u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); + u32 reg; + + /* Clear Global FSR */ + reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); + writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR); /* Mark all SMRn as invalid and all S2CRn as bypass */ for (i = 0; i < smmu->num_mapping_groups; ++i) { @@ -1568,33 +1576,38 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); } - /* Make sure all context banks are disabled */ - for (i = 0; i < smmu->num_context_banks; ++i) - writel_relaxed(0, sctlr_base + ARM_SMMU_CB(smmu, i)); + /* Make sure all context banks are disabled and clear CB_FSR */ + for (i = 0; i < smmu->num_context_banks; ++i) { + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i); + writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); + writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); + } /* Invalidate the TLB, just in case */ writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); + reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); + /* Enable fault reporting */ - scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); + reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); /* Disable TLB broadcasting. */ - scr0 |= (sCR0_VMIDPNE | sCR0_PTM); + reg |= (sCR0_VMIDPNE | sCR0_PTM); /* Enable client access, but bypass when no mapping is found */ - scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG); + reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG); /* Disable forced broadcasting */ - scr0 &= ~sCR0_FB; + reg &= ~sCR0_FB; /* Don't upgrade barriers */ - scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); + reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); /* Push the button */ arm_smmu_tlb_sync(smmu); - writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0); + writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0); } static int arm_smmu_id_size_to_bits(int size) @@ -1699,13 +1712,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K; - /* Check that we ioremapped enough */ + /* Check for size mismatch of SMMU address space from mapped region */ size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); size *= (smmu->pagesize << 1); - if (smmu->size < size) - dev_warn(smmu->dev, - "device is 0x%lx bytes but only mapped 0x%lx!\n", - size, smmu->size); + if (smmu->size != size) + dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs " + "from mapped region size (0x%lx)!\n", size, smmu->size); smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; @@ -1780,15 +1792,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) smmu->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "missing base address/size\n"); - return -ENODEV; - } - + smmu->base = devm_ioremap_resource(dev, res); + if (IS_ERR(smmu->base)) + return PTR_ERR(smmu->base); smmu->size = resource_size(res); - smmu->base = devm_request_and_ioremap(dev, res); - if (!smmu->base) - return -EADDRNOTAVAIL; if (of_property_read_u32(dev->of_node, "#global-interrupts", &smmu->num_global_irqs)) { @@ -1803,12 +1810,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) smmu->num_context_irqs++; } - if (num_irqs < smmu->num_global_irqs) { - dev_warn(dev, "found %d interrupts but expected at least %d\n", - num_irqs, smmu->num_global_irqs); - smmu->num_global_irqs = num_irqs; + if (!smmu->num_context_irqs) { + dev_err(dev, "found %d interrupts but expected at least %d\n", + num_irqs, smmu->num_global_irqs + 1); + return -ENODEV; } - smmu->num_context_irqs = num_irqs - smmu->num_global_irqs; smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, GFP_KERNEL); @@ -1857,8 +1863,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) goto out_put_parent; } - arm_smmu_device_reset(smmu); - for (i = 0; i < smmu->num_global_irqs; ++i) { err = request_irq(smmu->irqs[i], arm_smmu_global_fault, @@ -1876,6 +1880,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) spin_lock(&arm_smmu_devices_lock); list_add(&smmu->list, &arm_smmu_devices); spin_unlock(&arm_smmu_devices_lock); + + arm_smmu_device_reset(smmu); return 0; out_free_irqs: @@ -1932,7 +1938,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) free_irq(smmu->irqs[i], smmu); /* Turn the thing off */ - writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); + writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); return 0; } @@ -1966,10 +1972,10 @@ static int __init arm_smmu_init(void) return ret; /* Oh, for a proper bus abstraction */ - if (!iommu_present(&platform_bus_type)); + if (!iommu_present(&platform_bus_type)) bus_set_iommu(&platform_bus_type, &arm_smmu_ops); - if (!iommu_present(&amba_bustype)); + if (!iommu_present(&amba_bustype)) bus_set_iommu(&amba_bustype, &arm_smmu_ops); return 0; @@ -1980,7 +1986,7 @@ static void __exit arm_smmu_exit(void) return platform_driver_unregister(&arm_smmu_driver); } -module_init(arm_smmu_init); +subsys_initcall(arm_smmu_init); module_exit(arm_smmu_exit); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 785675a56a10..da2d0d926e40 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -403,7 +403,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev) dev = pci_physfn(dev); - list_for_each_entry(dmaru, &dmar_drhd_units, list) { + for_each_drhd_unit(dmaru) { drhd = container_of(dmaru->hdr, struct acpi_dmar_hardware_unit, header); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 15e9b57e9cf0..43b9bfea48fa 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -782,7 +782,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, int offset; BUG_ON(!domain->pgd); - BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); + + if (addr_width < BITS_PER_LONG && pfn >> addr_width) + /* Address beyond IOMMU's addressing capabilities. */ + return NULL; + parent = domain->pgd; while (level > 0) { @@ -3777,11 +3781,10 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, static void domain_remove_one_dev_info(struct dmar_domain *domain, struct pci_dev *pdev) { - struct device_domain_info *info; + struct device_domain_info *info, *tmp; struct intel_iommu *iommu; unsigned long flags; int found = 0; - struct list_head *entry, *tmp; iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, pdev->devfn); @@ -3789,8 +3792,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, return; spin_lock_irqsave(&device_domain_lock, flags); - list_for_each_safe(entry, tmp, &domain->devices) { - info = list_entry(entry, struct device_domain_info, link); + list_for_each_entry_safe(info, tmp, &domain->devices, link) { if (info->segment == pci_domain_nr(pdev->bus) && info->bus == pdev->bus->number && info->devfn == pdev->devfn) { diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index f71673dbb23d..b97d70b1abe0 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -525,12 +525,13 @@ static int __init intel_irq_remapping_supported(void) if (disable_irq_remap) return 0; if (irq_remap_broken) { - WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, - "This system BIOS has enabled interrupt remapping\n" - "on a chipset that contains an erratum making that\n" - "feature unstable. To maintain system stability\n" - "interrupt remapping is being disabled. Please\n" - "contact your BIOS vendor for an update\n"); + printk(KERN_WARNING + "This system BIOS has enabled interrupt remapping\n" + "on a chipset that contains an erratum making that\n" + "feature unstable. To maintain system stability\n" + "interrupt remapping is being disabled. Please\n" + "contact your BIOS vendor for an update\n"); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); disable_irq_remap = 1; return 0; } diff --git a/drivers/iommu/iommu-traces.c b/drivers/iommu/iommu-traces.c new file mode 100644 index 000000000000..bf3b317ff0c1 --- /dev/null +++ b/drivers/iommu/iommu-traces.c @@ -0,0 +1,27 @@ +/* + * iommu trace points + * + * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com> + * + */ + +#include <linux/string.h> +#include <linux/types.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/iommu.h> + +/* iommu_group_event */ +EXPORT_TRACEPOINT_SYMBOL_GPL(add_device_to_group); +EXPORT_TRACEPOINT_SYMBOL_GPL(remove_device_from_group); + +/* iommu_device_event */ +EXPORT_TRACEPOINT_SYMBOL_GPL(attach_device_to_domain); +EXPORT_TRACEPOINT_SYMBOL_GPL(detach_device_from_domain); + +/* iommu_map_unmap */ +EXPORT_TRACEPOINT_SYMBOL_GPL(map); +EXPORT_TRACEPOINT_SYMBOL_GPL(unmap); + +/* iommu_error */ +EXPORT_TRACEPOINT_SYMBOL_GPL(io_page_fault); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index fbe9ca734f8f..e5555fcfe703 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -29,6 +29,7 @@ #include <linux/idr.h> #include <linux/notifier.h> #include <linux/err.h> +#include <trace/events/iommu.h> static struct kset *iommu_group_kset; static struct ida iommu_group_ida; @@ -363,6 +364,8 @@ rename: /* Notify any listeners about change to group. */ blocking_notifier_call_chain(&group->notifier, IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); + + trace_add_device_to_group(group->id, dev); return 0; } EXPORT_SYMBOL_GPL(iommu_group_add_device); @@ -399,6 +402,8 @@ void iommu_group_remove_device(struct device *dev) sysfs_remove_link(group->devices_kobj, device->name); sysfs_remove_link(&dev->kobj, "iommu_group"); + trace_remove_device_from_group(group->id, dev); + kfree(device->name); kfree(device); dev->iommu_group = NULL; @@ -680,10 +685,14 @@ EXPORT_SYMBOL_GPL(iommu_domain_free); int iommu_attach_device(struct iommu_domain *domain, struct device *dev) { + int ret; if (unlikely(domain->ops->attach_dev == NULL)) return -ENODEV; - return domain->ops->attach_dev(domain, dev); + ret = domain->ops->attach_dev(domain, dev); + if (!ret) + trace_attach_device_to_domain(dev); + return ret; } EXPORT_SYMBOL_GPL(iommu_attach_device); @@ -693,6 +702,7 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) return; domain->ops->detach_dev(domain, dev); + trace_detach_device_from_domain(dev); } EXPORT_SYMBOL_GPL(iommu_detach_device); @@ -807,17 +817,17 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, * size of the smallest page supported by the hardware */ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { - pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n", + pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", iova, &paddr, size, min_pagesz); return -EINVAL; } - pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size); + pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); while (size) { size_t pgsize = iommu_pgsize(domain, iova | paddr, size); - pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n", + pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", iova, &paddr, pgsize); ret = domain->ops->map(domain, iova, paddr, pgsize, prot); @@ -832,6 +842,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, /* unroll mapping in case something went wrong */ if (ret) iommu_unmap(domain, orig_iova, orig_size - size); + else + trace_map(iova, paddr, size); return ret; } @@ -880,6 +892,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) unmapped += unmapped_page; } + trace_unmap(iova, 0, size); return unmapped; } EXPORT_SYMBOL_GPL(iommu_unmap); |