diff options
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 320 |
1 files changed, 187 insertions, 133 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 74788fdeb773..8c469b51185f 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -80,11 +80,12 @@ */ #define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38)) -static DEFINE_RWLOCK(amd_iommu_devtable_lock); +static DEFINE_SPINLOCK(amd_iommu_devtable_lock); +static DEFINE_SPINLOCK(pd_bitmap_lock); +static DEFINE_SPINLOCK(iommu_table_lock); /* List of all available dev_data structures */ -static LIST_HEAD(dev_data_list); -static DEFINE_SPINLOCK(dev_data_list_lock); +static LLIST_HEAD(dev_data_list); LIST_HEAD(ioapic_map); LIST_HEAD(hpet_map); @@ -203,40 +204,33 @@ static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain static struct iommu_dev_data *alloc_dev_data(u16 devid) { struct iommu_dev_data *dev_data; - unsigned long flags; dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); if (!dev_data) return NULL; dev_data->devid = devid; - - spin_lock_irqsave(&dev_data_list_lock, flags); - list_add_tail(&dev_data->dev_data_list, &dev_data_list); - spin_unlock_irqrestore(&dev_data_list_lock, flags); - ratelimit_default_init(&dev_data->rs); + llist_add(&dev_data->dev_data_list, &dev_data_list); return dev_data; } static struct iommu_dev_data *search_dev_data(u16 devid) { struct iommu_dev_data *dev_data; - unsigned long flags; + struct llist_node *node; - spin_lock_irqsave(&dev_data_list_lock, flags); - list_for_each_entry(dev_data, &dev_data_list, dev_data_list) { + if (llist_empty(&dev_data_list)) + return NULL; + + node = dev_data_list.first; + llist_for_each_entry(dev_data, node, dev_data_list) { if (dev_data->devid == devid) - goto out_unlock; + return dev_data; } - dev_data = NULL; - -out_unlock: - spin_unlock_irqrestore(&dev_data_list_lock, flags); - - return dev_data; + return NULL; } static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) @@ -310,6 +304,8 @@ static struct iommu_dev_data *find_dev_data(u16 devid) if (dev_data == NULL) { dev_data = alloc_dev_data(devid); + if (!dev_data) + return NULL; if (translation_pre_enabled(iommu)) dev_data->defer_attach = true; @@ -547,6 +543,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id, static void iommu_print_event(struct amd_iommu *iommu, void *__evt) { + struct device *dev = iommu->iommu.dev; int type, devid, domid, flags; volatile u32 *event = __evt; int count = 0; @@ -573,53 +570,53 @@ retry: amd_iommu_report_page_fault(devid, domid, address, flags); return; } else { - printk(KERN_ERR "AMD-Vi: Event logged ["); + dev_err(dev, "AMD-Vi: Event logged ["); } switch (type) { case EVENT_TYPE_ILL_DEV: - printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x " - "address=0x%016llx flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), - address, flags); + dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x " + "address=0x%016llx flags=0x%04x]\n", + PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + address, flags); dump_dte_entry(devid); break; case EVENT_TYPE_DEV_TAB_ERR: - printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x " - "address=0x%016llx flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), - address, flags); + dev_err(dev, "DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x " + "address=0x%016llx flags=0x%04x]\n", + PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + address, flags); break; case EVENT_TYPE_PAGE_TAB_ERR: - printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x " - "domain=0x%04x address=0x%016llx flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), - domid, address, flags); + dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x " + "domain=0x%04x address=0x%016llx flags=0x%04x]\n", + PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + domid, address, flags); break; case EVENT_TYPE_ILL_CMD: - printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); + dev_err(dev, "ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); dump_command(address); break; case EVENT_TYPE_CMD_HARD_ERR: - printk("COMMAND_HARDWARE_ERROR address=0x%016llx " - "flags=0x%04x]\n", address, flags); + dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx " + "flags=0x%04x]\n", address, flags); break; case EVENT_TYPE_IOTLB_INV_TO: - printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x " - "address=0x%016llx]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), - address); + dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x " + "address=0x%016llx]\n", + PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + address); break; case EVENT_TYPE_INV_DEV_REQ: - printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x " - "address=0x%016llx flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), - address, flags); + dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x " + "address=0x%016llx flags=0x%04x]\n", + PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + address, flags); break; default: - printk(KERN_ERR "UNKNOWN type=0x%02x event[0]=0x%08x " - "event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n", - type, event[0], event[1], event[2], event[3]); + dev_err(dev, KERN_ERR "UNKNOWN event[0]=0x%08x event[1]=0x%08x " + "event[2]=0x%08x event[3]=0x%08x\n", + event[0], event[1], event[2], event[3]); } memset(__evt, 0, 4 * sizeof(u32)); @@ -1056,9 +1053,9 @@ static int iommu_queue_command_sync(struct amd_iommu *iommu, unsigned long flags; int ret; - spin_lock_irqsave(&iommu->lock, flags); + raw_spin_lock_irqsave(&iommu->lock, flags); ret = __iommu_queue_command_sync(iommu, cmd, sync); - spin_unlock_irqrestore(&iommu->lock, flags); + raw_spin_unlock_irqrestore(&iommu->lock, flags); return ret; } @@ -1084,7 +1081,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu) build_completion_wait(&cmd, (u64)&iommu->cmd_sem); - spin_lock_irqsave(&iommu->lock, flags); + raw_spin_lock_irqsave(&iommu->lock, flags); iommu->cmd_sem = 0; @@ -1095,7 +1092,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu) ret = wait_on_sem(&iommu->cmd_sem); out_unlock: - spin_unlock_irqrestore(&iommu->lock, flags); + raw_spin_unlock_irqrestore(&iommu->lock, flags); return ret; } @@ -1605,29 +1602,26 @@ static void del_domain_from_list(struct protection_domain *domain) static u16 domain_id_alloc(void) { - unsigned long flags; int id; - write_lock_irqsave(&amd_iommu_devtable_lock, flags); + spin_lock(&pd_bitmap_lock); id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID); BUG_ON(id == 0); if (id > 0 && id < MAX_DOMAIN_ID) __set_bit(id, amd_iommu_pd_alloc_bitmap); else id = 0; - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); + spin_unlock(&pd_bitmap_lock); return id; } static void domain_id_free(int id) { - unsigned long flags; - - write_lock_irqsave(&amd_iommu_devtable_lock, flags); + spin_lock(&pd_bitmap_lock); if (id > 0 && id < MAX_DOMAIN_ID) __clear_bit(id, amd_iommu_pd_alloc_bitmap); - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); + spin_unlock(&pd_bitmap_lock); } #define DEFINE_FREE_PT_FN(LVL, FN) \ @@ -2103,9 +2097,9 @@ static int attach_device(struct device *dev, } skip_ats_check: - write_lock_irqsave(&amd_iommu_devtable_lock, flags); + spin_lock_irqsave(&amd_iommu_devtable_lock, flags); ret = __attach_device(dev_data, domain); - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); + spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags); /* * We might boot into a crash-kernel here. The crashed kernel @@ -2155,9 +2149,9 @@ static void detach_device(struct device *dev) domain = dev_data->domain; /* lock device table */ - write_lock_irqsave(&amd_iommu_devtable_lock, flags); + spin_lock_irqsave(&amd_iommu_devtable_lock, flags); __detach_device(dev_data); - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); + spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags); if (!dev_is_pci(dev)) return; @@ -2820,7 +2814,7 @@ static void cleanup_domain(struct protection_domain *domain) struct iommu_dev_data *entry; unsigned long flags; - write_lock_irqsave(&amd_iommu_devtable_lock, flags); + spin_lock_irqsave(&amd_iommu_devtable_lock, flags); while (!list_empty(&domain->dev_list)) { entry = list_first_entry(&domain->dev_list, @@ -2828,7 +2822,7 @@ static void cleanup_domain(struct protection_domain *domain) __detach_device(entry); } - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); + spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags); } static void protection_domain_free(struct protection_domain *domain) @@ -3050,15 +3044,12 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, size_t unmap_size; if (domain->mode == PAGE_MODE_NONE) - return -EINVAL; + return 0; mutex_lock(&domain->api_lock); unmap_size = iommu_unmap_page(domain, iova, page_size); mutex_unlock(&domain->api_lock); - domain_flush_tlb_pde(domain); - domain_flush_complete(domain); - return unmap_size; } @@ -3176,6 +3167,19 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, return dev_data->defer_attach; } +static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) +{ + struct protection_domain *dom = to_pdomain(domain); + + domain_flush_tlb_pde(dom); + domain_flush_complete(dom); +} + +static void amd_iommu_iotlb_range_add(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ +} + const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, @@ -3194,6 +3198,9 @@ const struct iommu_ops amd_iommu_ops = { .apply_resv_region = amd_iommu_apply_resv_region, .is_attach_deferred = amd_iommu_is_attach_deferred, .pgsize_bitmap = AMD_IOMMU_PGSIZES, + .flush_iotlb_all = amd_iommu_flush_iotlb_all, + .iotlb_range_add = amd_iommu_iotlb_range_add, + .iotlb_sync = amd_iommu_flush_iotlb_all, }; /***************************************************************************** @@ -3595,14 +3602,62 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) amd_iommu_dev_table[devid].data[2] = dte; } -static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) +static struct irq_remap_table *get_irq_table(u16 devid) +{ + struct irq_remap_table *table; + + if (WARN_ONCE(!amd_iommu_rlookup_table[devid], + "%s: no iommu for devid %x\n", __func__, devid)) + return NULL; + + table = irq_lookup_table[devid]; + if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid)) + return NULL; + + return table; +} + +static struct irq_remap_table *__alloc_irq_table(void) +{ + struct irq_remap_table *table; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return NULL; + + table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL); + if (!table->table) { + kfree(table); + return NULL; + } + raw_spin_lock_init(&table->lock); + + if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) + memset(table->table, 0, + MAX_IRQS_PER_TABLE * sizeof(u32)); + else + memset(table->table, 0, + (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2))); + return table; +} + +static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, + struct irq_remap_table *table) +{ + irq_lookup_table[devid] = table; + set_dte_irq_entry(devid, table); + iommu_flush_dte(iommu, devid); +} + +static struct irq_remap_table *alloc_irq_table(u16 devid) { struct irq_remap_table *table = NULL; + struct irq_remap_table *new_table = NULL; struct amd_iommu *iommu; unsigned long flags; u16 alias; - write_lock_irqsave(&amd_iommu_devtable_lock, flags); + spin_lock_irqsave(&iommu_table_lock, flags); iommu = amd_iommu_rlookup_table[devid]; if (!iommu) @@ -3615,60 +3670,45 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) alias = amd_iommu_alias_table[devid]; table = irq_lookup_table[alias]; if (table) { - irq_lookup_table[devid] = table; - set_dte_irq_entry(devid, table); - iommu_flush_dte(iommu, devid); - goto out; + set_remap_table_entry(iommu, devid, table); + goto out_wait; } + spin_unlock_irqrestore(&iommu_table_lock, flags); /* Nothing there yet, allocate new irq remapping table */ - table = kzalloc(sizeof(*table), GFP_ATOMIC); - if (!table) - goto out_unlock; - - /* Initialize table spin-lock */ - spin_lock_init(&table->lock); + new_table = __alloc_irq_table(); + if (!new_table) + return NULL; - if (ioapic) - /* Keep the first 32 indexes free for IOAPIC interrupts */ - table->min_index = 32; + spin_lock_irqsave(&iommu_table_lock, flags); - table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC); - if (!table->table) { - kfree(table); - table = NULL; + table = irq_lookup_table[devid]; + if (table) goto out_unlock; - } - if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) - memset(table->table, 0, - MAX_IRQS_PER_TABLE * sizeof(u32)); - else - memset(table->table, 0, - (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2))); - - if (ioapic) { - int i; - - for (i = 0; i < 32; ++i) - iommu->irte_ops->set_allocated(table, i); + table = irq_lookup_table[alias]; + if (table) { + set_remap_table_entry(iommu, devid, table); + goto out_wait; } - irq_lookup_table[devid] = table; - set_dte_irq_entry(devid, table); - iommu_flush_dte(iommu, devid); - if (devid != alias) { - irq_lookup_table[alias] = table; - set_dte_irq_entry(alias, table); - iommu_flush_dte(iommu, alias); - } + table = new_table; + new_table = NULL; -out: + set_remap_table_entry(iommu, devid, table); + if (devid != alias) + set_remap_table_entry(iommu, alias, table); + +out_wait: iommu_completion_wait(iommu); out_unlock: - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); + spin_unlock_irqrestore(&iommu_table_lock, flags); + if (new_table) { + kmem_cache_free(amd_iommu_irq_cache, new_table->table); + kfree(new_table); + } return table; } @@ -3682,14 +3722,14 @@ static int alloc_irq_index(u16 devid, int count, bool align) if (!iommu) return -ENODEV; - table = get_irq_table(devid, false); + table = alloc_irq_table(devid); if (!table) return -ENODEV; if (align) alignment = roundup_pow_of_two(count); - spin_lock_irqsave(&table->lock, flags); + raw_spin_lock_irqsave(&table->lock, flags); /* Scan table for free entries */ for (index = ALIGN(table->min_index, alignment), c = 0; @@ -3716,7 +3756,7 @@ static int alloc_irq_index(u16 devid, int count, bool align) index = -ENOSPC; out: - spin_unlock_irqrestore(&table->lock, flags); + raw_spin_unlock_irqrestore(&table->lock, flags); return index; } @@ -3733,11 +3773,11 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, if (iommu == NULL) return -EINVAL; - table = get_irq_table(devid, false); + table = get_irq_table(devid); if (!table) return -ENOMEM; - spin_lock_irqsave(&table->lock, flags); + raw_spin_lock_irqsave(&table->lock, flags); entry = (struct irte_ga *)table->table; entry = &entry[index]; @@ -3748,7 +3788,7 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, if (data) data->ref = entry; - spin_unlock_irqrestore(&table->lock, flags); + raw_spin_unlock_irqrestore(&table->lock, flags); iommu_flush_irt(iommu, devid); iommu_completion_wait(iommu); @@ -3766,13 +3806,13 @@ static int modify_irte(u16 devid, int index, union irte *irte) if (iommu == NULL) return -EINVAL; - table = get_irq_table(devid, false); + table = get_irq_table(devid); if (!table) return -ENOMEM; - spin_lock_irqsave(&table->lock, flags); + raw_spin_lock_irqsave(&table->lock, flags); table->table[index] = irte->val; - spin_unlock_irqrestore(&table->lock, flags); + raw_spin_unlock_irqrestore(&table->lock, flags); iommu_flush_irt(iommu, devid); iommu_completion_wait(iommu); @@ -3790,13 +3830,13 @@ static void free_irte(u16 devid, int index) if (iommu == NULL) return; - table = get_irq_table(devid, false); + table = get_irq_table(devid); if (!table) return; - spin_lock_irqsave(&table->lock, flags); + raw_spin_lock_irqsave(&table->lock, flags); iommu->irte_ops->clear_allocated(table, index); - spin_unlock_irqrestore(&table->lock, flags); + raw_spin_unlock_irqrestore(&table->lock, flags); iommu_flush_irt(iommu, devid); iommu_completion_wait(iommu); @@ -3877,10 +3917,8 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index, u8 vector, u32 dest_apicid) { struct irte_ga *irte = (struct irte_ga *) entry; - struct iommu_dev_data *dev_data = search_dev_data(devid); - if (!dev_data || !dev_data->use_vapic || - !irte->lo.fields_remap.guest_mode) { + if (!irte->lo.fields_remap.guest_mode) { irte->hi.fields.vector = vector; irte->lo.fields_remap.destination = dest_apicid; modify_irte_ga(devid, index, irte, NULL); @@ -4086,7 +4124,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, struct amd_ir_data *data = NULL; struct irq_cfg *cfg; int i, ret, devid; - int index = -1; + int index; if (!info) return -EINVAL; @@ -4110,10 +4148,26 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, return ret; if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) { - if (get_irq_table(devid, true)) + struct irq_remap_table *table; + struct amd_iommu *iommu; + + table = alloc_irq_table(devid); + if (table) { + if (!table->min_index) { + /* + * Keep the first 32 indexes free for IOAPIC + * interrupts. + */ + table->min_index = 32; + iommu = amd_iommu_rlookup_table[devid]; + for (i = 0; i < 32; ++i) + iommu->irte_ops->set_allocated(table, i); + } + WARN_ON(table->min_index != 32); index = info->ioapic_pin; - else - ret = -ENOMEM; + } else { + index = -ENOMEM; + } } else { bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI); @@ -4379,7 +4433,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data) { unsigned long flags; struct amd_iommu *iommu; - struct irq_remap_table *irt; + struct irq_remap_table *table; struct amd_ir_data *ir_data = (struct amd_ir_data *)data; int devid = ir_data->irq_2_irte.devid; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; @@ -4393,11 +4447,11 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data) if (!iommu) return -ENODEV; - irt = get_irq_table(devid, false); - if (!irt) + table = get_irq_table(devid); + if (!table) return -ENODEV; - spin_lock_irqsave(&irt->lock, flags); + raw_spin_lock_irqsave(&table->lock, flags); if (ref->lo.fields_vapic.guest_mode) { if (cpu >= 0) @@ -4406,7 +4460,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data) barrier(); } - spin_unlock_irqrestore(&irt->lock, flags); + raw_spin_unlock_irqrestore(&table->lock, flags); iommu_flush_irt(iommu, devid); iommu_completion_wait(iommu); |