summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2016-01-12 13:18:08 -0700
committerBjorn Helgaas <bhelgaas@google.com>2016-01-15 13:54:55 -0600
commitd9c3d6ff227cbdf05399ba8303bdcd0d35bbfccb (patch)
tree30d6c505b91234f96b6f7acb0feab98a22f59d9a
parent64bce3e8bfb3cf2cbc638296390af3e02e747347 (diff)
x86/PCI: Allow DMA ops specific to a PCI domain
The Intel Volume Management Device (VMD) is a PCIe endpoint that acts as a host bridge to another PCI domain. When devices below the VMD perform DMA, the VMD replaces their DMA source IDs with its own source ID. Therefore, those devices require special DMA ops. Add interfaces to allow the VMD driver to set up dma_ops for the devices below it. [bhelgaas: remove "extern", add "static", changelog] Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
-rw-r--r--arch/x86/include/asm/device.h10
-rw-r--r--arch/x86/pci/common.c38
2 files changed, 48 insertions, 0 deletions
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 03dd72957d2f..684ed6c3aa67 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -10,6 +10,16 @@ struct dev_archdata {
#endif
};
+#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
+struct dma_domain {
+ struct list_head node;
+ struct dma_map_ops *dma_ops;
+ int domain_nr;
+};
+void add_dma_domain(struct dma_domain *domain);
+void del_dma_domain(struct dma_domain *domain);
+#endif
+
struct pdev_archdata {
};
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index eccd4d99e6a4..2879efc73a96 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -641,6 +641,43 @@ unsigned int pcibios_assign_all_busses(void)
return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
}
+#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
+static LIST_HEAD(dma_domain_list);
+static DEFINE_SPINLOCK(dma_domain_list_lock);
+
+void add_dma_domain(struct dma_domain *domain)
+{
+ spin_lock(&dma_domain_list_lock);
+ list_add(&domain->node, &dma_domain_list);
+ spin_unlock(&dma_domain_list_lock);
+}
+EXPORT_SYMBOL_GPL(add_dma_domain);
+
+void del_dma_domain(struct dma_domain *domain)
+{
+ spin_lock(&dma_domain_list_lock);
+ list_del(&domain->node);
+ spin_unlock(&dma_domain_list_lock);
+}
+EXPORT_SYMBOL_GPL(del_dma_domain);
+
+static void set_dma_domain_ops(struct pci_dev *pdev)
+{
+ struct dma_domain *domain;
+
+ spin_lock(&dma_domain_list_lock);
+ list_for_each_entry(domain, &dma_domain_list, node) {
+ if (pci_domain_nr(pdev->bus) == domain->domain_nr) {
+ pdev->dev.archdata.dma_ops = domain->dma_ops;
+ break;
+ }
+ }
+ spin_unlock(&dma_domain_list_lock);
+}
+#else
+static void set_dma_domain_ops(struct pci_dev *pdev) {}
+#endif
+
int pcibios_add_device(struct pci_dev *dev)
{
struct setup_data *data;
@@ -670,6 +707,7 @@ int pcibios_add_device(struct pci_dev *dev)
pa_data = data->next;
iounmap(data);
}
+ set_dma_domain_ops(dev);
return 0;
}