diff options
Diffstat (limited to 'drivers')
91 files changed, 2719 insertions, 880 deletions
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 547f56341705..3999305b5356 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -370,7 +370,7 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id /* AHCI controllers often implement SFF compatible interface. * Grab all PCI BARs just in case. */ - rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); + rc = pcim_request_all_regions(pdev, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) @@ -386,7 +386,9 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) pci_enable_msi(pdev); - hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; + hpriv->mmio = pcim_iomap(pdev, AHCI_PCI_BAR, 0); + if (!hpriv->mmio) + return -ENOMEM; /* save initial config */ ahci_save_initial_config(&pdev->dev, hpriv); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 45f63b09828a..2043dfb52ae8 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1869,7 +1869,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* AHCI controllers often implement SFF compatible interface. * Grab all PCI BARs just in case. */ - rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); + rc = pcim_request_all_regions(pdev, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) @@ -1893,7 +1893,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (ahci_sb600_enable_64bit(pdev)) hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY; - hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; + hpriv->mmio = pcim_iomap(pdev, ahci_pci_bar, 0); + if (!hpriv->mmio) + return -ENOMEM; /* detect remapped nvme devices */ ahci_remap_check(pdev, ahci_pci_bar, hpriv); diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c index f49818a13013..788a11cdb34b 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -129,16 +129,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Find and map all the device's BARS */ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; - ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); + ret = pcim_request_all_regions(pdev, pci_name(pdev)); if (ret) { - dev_err(&pdev->dev, "Failed to map pci regions.\n"); + dev_err(&pdev->dev, "Failed to request PCI regions.\n"); goto out_err; } i = 0; for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) { bar = &accel_pci_dev->pci_bars[i++]; - bar->virt_addr = pcim_iomap_table(pdev)[bar_nr]; + bar->virt_addr = pcim_iomap(pdev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to ioremap PCI region.\n"); + ret = -ENOMEM; + goto out_err; + } } pci_set_master(pdev); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 659905e45950..115eabfd1f6b 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -131,16 +131,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Find and map all the device's BARS */ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; - ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); + ret = pcim_request_all_regions(pdev, pci_name(pdev)); if (ret) { - dev_err(&pdev->dev, "Failed to map pci regions.\n"); + dev_err(&pdev->dev, "Failed to request PCI regions.\n"); goto out_err; } i = 0; for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) { bar = &accel_pci_dev->pci_bars[i++]; - bar->virt_addr = pcim_iomap_table(pdev)[bar_nr]; + bar->virt_addr = pcim_iomap(pdev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to ioremap PCI region.\n"); + ret = -ENOMEM; + goto out_err; + } } pci_set_master(pdev); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c index 400e36d9908f..94d0e73e42de 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c @@ -739,18 +739,22 @@ static int otx2_cptpf_probe(struct pci_dev *pdev, dev_err(dev, "Unable to get usable DMA configuration\n"); goto clear_drvdata; } - /* Map PF's configuration registers */ - err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM, - OTX2_CPT_DRV_NAME); + err = pcim_request_all_regions(pdev, OTX2_CPT_DRV_NAME); if (err) { - dev_err(dev, "Couldn't get PCI resources 0x%x\n", err); + dev_err(dev, "Couldn't request PCI resources 0x%x\n", err); goto clear_drvdata; } pci_set_master(pdev); pci_set_drvdata(pdev, cptpf); cptpf->pdev = pdev; - cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM]; + /* Map PF's configuration registers */ + cptpf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); + if (!cptpf->reg_base) { + err = -ENOMEM; + dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", err); + goto clear_drvdata; + } /* Check if AF driver is up, otherwise defer probe */ err = cpt_is_pf_usable(cptpf); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c index 527d34cc258b..d0b6ee901f62 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c @@ -358,9 +358,8 @@ static int otx2_cptvf_probe(struct pci_dev *pdev, dev_err(dev, "Unable to get usable DMA configuration\n"); goto clear_drvdata; } - /* Map VF's configuration registers */ - ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM, - OTX2_CPTVF_DRV_NAME); + + ret = pcim_request_all_regions(pdev, OTX2_CPTVF_DRV_NAME); if (ret) { dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret); goto clear_drvdata; @@ -369,7 +368,13 @@ static int otx2_cptvf_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, cptvf); cptvf->pdev = pdev; - cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM]; + /* Map VF's configuration registers */ + cptvf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); + if (!cptvf->reg_base) { + ret = -ENOMEM; + dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", ret); + goto clear_drvdata; + } otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag); diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 80cac3a5f976..602807d6afcc 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -39,14 +39,6 @@ struct cci_drvdata { struct dfl_fpga_cdev *cdev; /* container device */ }; -static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev) -{ - if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME)) - return NULL; - - return pcim_iomap_table(pcidev)[0]; -} - static int cci_pci_alloc_irq(struct pci_dev *pcidev) { int ret, nvec = pci_msix_vec_count(pcidev); @@ -235,9 +227,9 @@ static int find_dfls_by_default(struct pci_dev *pcidev, u64 v; /* start to find Device Feature List from Bar 0 */ - base = cci_pci_ioremap_bar0(pcidev); - if (!base) - return -ENOMEM; + base = pcim_iomap_region(pcidev, 0, DRV_NAME); + if (IS_ERR(base)) + return PTR_ERR(base); /* * PF device has FME and Ports/AFUs, and VF device only has one @@ -296,7 +288,7 @@ static int find_dfls_by_default(struct pci_dev *pcidev, } /* release I/O mappings for next step enumeration */ - pcim_iounmap_regions(pcidev, BIT(0)); + pcim_iounmap_region(pcidev, 0); return ret; } diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c index 421d7e3a6c66..cd20604f26de 100644 --- a/drivers/gpio/gpio-merrifield.c +++ b/drivers/gpio/gpio-merrifield.c @@ -78,24 +78,25 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id if (retval) return retval; - retval = pcim_iomap_regions(pdev, BIT(1) | BIT(0), pci_name(pdev)); - if (retval) - return dev_err_probe(dev, retval, "I/O memory mapping error\n"); - - base = pcim_iomap_table(pdev)[1]; + base = pcim_iomap_region(pdev, 1, pci_name(pdev)); + if (IS_ERR(base)) + return dev_err_probe(dev, PTR_ERR(base), "I/O memory mapping error\n"); irq_base = readl(base + 0 * sizeof(u32)); gpio_base = readl(base + 1 * sizeof(u32)); /* Release the IO mapping, since we already get the info from BAR1 */ - pcim_iounmap_regions(pdev, BIT(1)); + pcim_iounmap_region(pdev, 1); priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = dev; - priv->reg_base = pcim_iomap_table(pdev)[0]; + priv->reg_base = pcim_iomap_region(pdev, 0, pci_name(pdev)); + if (IS_ERR(priv->reg_base)) + return dev_err_probe(dev, PTR_ERR(priv->reg_base), + "I/O memory mapping error\n"); priv->pin_info.pin_ranges = mrfld_gpio_ranges; priv->pin_info.nranges = ARRAY_SIZE(mrfld_gpio_ranges); diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 0d7b9839e5b6..e9d8d28e055f 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -23,7 +23,6 @@ enum { TH_PCI_RTIT_BAR = 4, }; -#define BAR_MASK (BIT(TH_PCI_CONFIG_BAR) | BIT(TH_PCI_STH_SW_BAR)) #define PCI_REG_NPKDSC 0x80 #define NPKDSC_TSACT BIT(5) @@ -83,10 +82,16 @@ static int intel_th_pci_probe(struct pci_dev *pdev, if (err) return err; - err = pcim_iomap_regions_request_all(pdev, BAR_MASK, DRIVER_NAME); + err = pcim_request_all_regions(pdev, DRIVER_NAME); if (err) return err; + if (!pcim_iomap(pdev, TH_PCI_CONFIG_BAR, 0)) + return -ENOMEM; + + if (!pcim_iomap(pdev, TH_PCI_STH_SW_BAR, 0)) + return -ENOMEM; + if (pdev->resource[TH_PCI_RTIT_BAR].start) { resource[TH_MMIO_RTIT] = pdev->resource[TH_PCI_RTIT_BAR]; r++; diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c index 9fd717b9cf69..984f0dd7b62e 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c @@ -239,12 +239,11 @@ static int cavium_ptp_probe(struct pci_dev *pdev, if (err) goto error_free; - err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev)); + clock->reg_base = pcim_iomap_region(pdev, PCI_PTP_BAR_NO, pci_name(pdev)); + err = PTR_ERR_OR_ZERO(clock->reg_base); if (err) goto error_free; - clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO]; - spin_lock_init(&clock->spin_lock); cc = &clock->cycle_counter; @@ -292,7 +291,7 @@ error_stop: clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG); clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN; writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG); - pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO); + pcim_iounmap_region(pdev, PCI_PTP_BAR_NO); error_free: devm_kfree(dev, clock); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 3b9943eb6934..4839dcb199c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3533,7 +3533,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie, **priv; struct iwl_trans *trans; int ret, addr_size; - void __iomem * const *table; u32 bar0; /* reassign our BAR 0 if invalid due to possible runtime PM races */ @@ -3659,22 +3658,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } - ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); + ret = pcim_request_all_regions(pdev, DRV_NAME); if (ret) { - dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); + dev_err(&pdev->dev, "Requesting all PCI BARs failed.\n"); goto out_no_pci; } - table = pcim_iomap_table(pdev); - if (!table) { - dev_err(&pdev->dev, "pcim_iomap_table failed\n"); - ret = -ENOMEM; - goto out_no_pci; - } - - trans_pcie->hw_base = table[0]; + trans_pcie->hw_base = pcim_iomap(pdev, 0, 0); if (!trans_pcie->hw_base) { - dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n"); + dev_err(&pdev->dev, "Could not ioremap PCI BAR 0.\n"); ret = -ENODEV; goto out_no_pci; } diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c index 6fc9dfe82474..544d8a4d2af5 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.c +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -2671,15 +2671,20 @@ static int idt_init_pci(struct idt_ntb_dev *ndev) */ pci_set_master(pdev); - /* Request all BARs resources and map BAR0 only */ - ret = pcim_iomap_regions_request_all(pdev, 1, NTB_NAME); + /* Request all BARs resources */ + ret = pcim_request_all_regions(pdev, NTB_NAME); if (ret != 0) { dev_err(&pdev->dev, "Failed to request resources\n"); goto err_clear_master; } - /* Retrieve virtual address of BAR0 - PCI configuration space */ - ndev->cfgspc = pcim_iomap_table(pdev)[0]; + /* ioremap BAR0 - PCI configuration space */ + ndev->cfgspc = pcim_iomap(pdev, 0, 0); + if (!ndev->cfgspc) { + dev_err(&pdev->dev, "Failed to ioremap BAR 0\n"); + ret = -ENOMEM; + goto err_clear_master; + } /* Put the IDT driver data pointer to the PCI-device private pointer */ pci_set_drvdata(pdev, ndev); diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 0d94e4a967d8..2fbd379923fd 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -173,6 +173,15 @@ config PCI_PASID If unsure, say N. +config PCIE_TPH + bool "TLP Processing Hints" + help + This option adds support for PCIe TLP Processing Hints (TPH). + TPH allows endpoint devices to provide optimization hints, such as + desired caching behavior, for requests that target memory space. + These hints, called Steering Tags, can empower the system hardware + to optimize the utilization of platform resources. + config PCI_P2PDMA bool "PCI peer-to-peer transfer support" depends on ZONE_DEVICE @@ -305,6 +314,6 @@ source "drivers/pci/hotplug/Kconfig" source "drivers/pci/controller/Kconfig" source "drivers/pci/endpoint/Kconfig" source "drivers/pci/switch/Kconfig" -source "drivers/pci/pwrctl/Kconfig" +source "drivers/pci/pwrctrl/Kconfig" endif diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 374c5c06d92f..67647f1880fb 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \ obj-$(CONFIG_PCI) += msi/ obj-$(CONFIG_PCI) += pcie/ -obj-$(CONFIG_PCI) += pwrctl/ +obj-$(CONFIG_PCI) += pwrctrl/ ifdef CONFIG_PCI obj-$(CONFIG_PROC_FS) += proc.o @@ -36,6 +36,7 @@ obj-$(CONFIG_VGA_ARB) += vgaarb.o obj-$(CONFIG_PCI_DOE) += doe.o obj-$(CONFIG_PCI_DYNAMIC_OF_NODES) += of_property.o obj-$(CONFIG_PCI_NPEM) += npem.o +obj-$(CONFIG_PCIE_TPH) += tph.o # Endpoint library must be initialized before its users obj-$(CONFIG_PCI_ENDPOINT) += endpoint/ diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 55c853686051..98910bc0fcc4 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -13,11 +13,24 @@ #include <linux/ioport.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include "pci.h" +/* + * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond + * to P2P or CardBus bridge windows) go in a table. Additional ones (for + * buses below host bridges or subtractive decode bridges) go in the list. + * Use pci_bus_for_each_resource() to iterate through all the resources. + */ + +struct pci_bus_resource { + struct list_head list; + struct resource *res; +}; + void pci_add_resource_offset(struct list_head *resources, struct resource *res, resource_size_t offset) { @@ -46,8 +59,7 @@ void pci_free_resource_list(struct list_head *resources) } EXPORT_SYMBOL(pci_free_resource_list); -void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, - unsigned int flags) +void pci_bus_add_resource(struct pci_bus *bus, struct resource *res) { struct pci_bus_resource *bus_res; @@ -58,7 +70,6 @@ void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, } bus_res->res = res; - bus_res->flags = flags; list_add_tail(&bus_res->list, &bus->resources); } @@ -320,6 +331,47 @@ void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } void __weak pcibios_bus_add_device(struct pci_dev *pdev) { } +/* + * Create pwrctrl devices (if required) for the PCI devices to handle the power + * state. + */ +static void pci_pwrctrl_create_devices(struct pci_dev *dev) +{ + struct device_node *np = dev_of_node(&dev->dev); + struct device *parent = &dev->dev; + struct platform_device *pdev; + + /* + * First ensure that we are starting from a PCI bridge and it has a + * corresponding devicetree node. + */ + if (np && pci_is_bridge(dev)) { + /* + * Now look for the child PCI device nodes and create pwrctrl + * devices for them. The pwrctrl device drivers will manage the + * power state of the devices. + */ + for_each_available_child_of_node_scoped(np, child) { + /* + * First check whether the pwrctrl device really + * needs to be created or not. This is decided + * based on at least one of the power supplies + * being defined in the devicetree node of the + * device. + */ + if (!of_pci_supply_present(child)) { + pci_dbg(dev, "skipping OF node: %s\n", child->name); + return; + } + + /* Now create the pwrctrl device */ + pdev = of_platform_device_create(child, NULL, parent); + if (!pdev) + pci_err(dev, "failed to create OF node: %s\n", child->name); + } + } +} + /** * pci_bus_add_device - start driver for a single device * @dev: device to add @@ -329,6 +381,7 @@ void __weak pcibios_bus_add_device(struct pci_dev *pdev) { } void pci_bus_add_device(struct pci_dev *dev) { struct device_node *dn = dev->dev.of_node; + struct platform_device *pdev; int retval; /* @@ -343,20 +396,28 @@ void pci_bus_add_device(struct pci_dev *dev) pci_proc_attach_device(dev); pci_bridge_d3_update(dev); + pci_pwrctrl_create_devices(dev); + + /* + * If the PCI device is associated with a pwrctrl device with a + * power supply, create a device link between the PCI device and + * pwrctrl device. This ensures that pwrctrl drivers are probed + * before PCI client drivers. + */ + pdev = of_find_device_by_node(dn); + if (pdev && of_pci_supply_present(dn)) { + if (!device_link_add(&dev->dev, &pdev->dev, + DL_FLAG_AUTOREMOVE_CONSUMER)) + pci_err(dev, "failed to add device link to power control device %s\n", + pdev->name); + } + dev->match_driver = !dn || of_device_is_available(dn); retval = device_attach(&dev->dev); if (retval < 0 && retval != -EPROBE_DEFER) pci_warn(dev, "device attach failed (%d)\n", retval); - pci_dev_assign_added(dev, true); - - if (dev_of_node(&dev->dev) && pci_is_bridge(dev)) { - retval = of_platform_populate(dev_of_node(&dev->dev), NULL, NULL, - &dev->dev); - if (retval) - pci_err(dev, "failed to populate child OF nodes (%d)\n", - retval); - } + pci_dev_assign_added(dev); } EXPORT_SYMBOL_GPL(pci_bus_add_device); @@ -389,41 +450,23 @@ void pci_bus_add_devices(const struct pci_bus *bus) } EXPORT_SYMBOL(pci_bus_add_devices); -static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), - void *userdata, bool locked) +static int __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), + void *userdata) { struct pci_dev *dev; - struct pci_bus *bus; - struct list_head *next; - int retval; + int ret = 0; - bus = top; - if (!locked) - down_read(&pci_bus_sem); - next = top->devices.next; - for (;;) { - if (next == &bus->devices) { - /* end of this bus, go up or finish */ - if (bus == top) + list_for_each_entry(dev, &top->devices, bus_list) { + ret = cb(dev, userdata); + if (ret) + break; + if (dev->subordinate) { + ret = __pci_walk_bus(dev->subordinate, cb, userdata); + if (ret) break; - next = bus->self->bus_list.next; - bus = bus->self->bus; - continue; } - dev = list_entry(next, struct pci_dev, bus_list); - if (dev->subordinate) { - /* this is a pci-pci bridge, do its devices next */ - next = dev->subordinate->devices.next; - bus = dev->subordinate; - } else - next = dev->bus_list.next; - - retval = cb(dev, userdata); - if (retval) - break; } - if (!locked) - up_read(&pci_bus_sem); + return ret; } /** @@ -441,7 +484,9 @@ static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void */ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata) { - __pci_walk_bus(top, cb, userdata, false); + down_read(&pci_bus_sem); + __pci_walk_bus(top, cb, userdata); + up_read(&pci_bus_sem); } EXPORT_SYMBOL_GPL(pci_walk_bus); @@ -449,9 +494,8 @@ void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void * { lockdep_assert_held(&pci_bus_sem); - __pci_walk_bus(top, cb, userdata, true); + __pci_walk_bus(top, cb, userdata); } -EXPORT_SYMBOL_GPL(pci_walk_bus_locked); struct pci_bus *pci_bus_get(struct pci_bus *bus) { diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 284f2e0e4d26..0341d51d6aed 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -386,6 +386,13 @@ static const struct j721e_pcie_data j784s4_pcie_ep_data = { .max_lanes = 4, }; +static const struct j721e_pcie_data j722s_pcie_rc_data = { + .mode = PCI_MODE_RC, + .linkdown_irq_regfield = J7200_LINK_DOWN, + .byte_access_allowed = true, + .max_lanes = 1, +}; + static const struct of_device_id of_j721e_pcie_match[] = { { .compatible = "ti,j721e-pcie-host", @@ -419,6 +426,10 @@ static const struct of_device_id of_j721e_pcie_match[] = { .compatible = "ti,j784s4-pcie-ep", .data = &j784s4_pcie_ep_data, }, + { + .compatible = "ti,j722s-pcie-host", + .data = &j722s_pcie_rc_data, + }, {}, }; @@ -572,15 +583,14 @@ static int j721e_pcie_probe(struct platform_device *pdev) pcie->refclk = clk; /* - * The "Power Sequencing and Reset Signal Timings" table of the - * PCI Express Card Electromechanical Specification, Revision - * 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST# - * should be deasserted after minimum of 100us once REFCLK is - * stable. The REFCLK to the connector in RC mode is selected - * while enabling the PHY. So deassert PERST# after 100 us. + * Section 2.2 of the PCI Express Card Electromechanical + * Specification (Revision 5.1) mandates that the deassertion + * of the PERST# signal should be delayed by 100 ms (TPVPERL). + * This shall ensure that the power and the reference clock + * are stable. */ if (gpiod) { - fsleep(PCIE_T_PERST_CLK_US); + msleep(PCIE_T_PVPERL_MS); gpiod_set_value_cansleep(gpiod, 1); } @@ -671,15 +681,14 @@ static int j721e_pcie_resume_noirq(struct device *dev) return ret; /* - * The "Power Sequencing and Reset Signal Timings" table of the - * PCI Express Card Electromechanical Specification, Revision - * 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST# - * should be deasserted after minimum of 100us once REFCLK is - * stable. The REFCLK to the connector in RC mode is selected - * while enabling the PHY. So deassert PERST# after 100 us. + * Section 2.2 of the PCI Express Card Electromechanical + * Specification (Revision 5.1) mandates that the deassertion + * of the PERST# signal should be delayed by 100 ms (TPVPERL). + * This shall ensure that the power and the reference clock + * are stable. */ if (pcie->reset_gpio) { - fsleep(PCIE_T_PERST_CLK_US); + msleep(PCIE_T_PVPERL_MS); gpiod_set_value_cansleep(pcie->reset_gpio, 1); } @@ -712,7 +721,7 @@ static DEFINE_NOIRQ_DEV_PM_OPS(j721e_pcie_pm_ops, static struct platform_driver j721e_pcie_driver = { .probe = j721e_pcie_probe, - .remove_new = j721e_pcie_remove, + .remove = j721e_pcie_remove, .driver = { .name = "j721e-pcie", .of_match_table = of_j721e_pcie_match, diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c index 4251fac5e310..204e045aed8c 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.c +++ b/drivers/pci/controller/cadence/pcie-cadence.c @@ -197,7 +197,7 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie) phy_count = of_property_count_strings(np, "phy-names"); if (phy_count < 1) { - dev_err(dev, "no phy-names. PHY will not be initialized\n"); + dev_info(dev, "no \"phy-names\" property found; PHY will not be initialized\n"); pcie->phy_count = 0; return 0; } @@ -260,7 +260,7 @@ static int cdns_pcie_resume_noirq(struct device *dev) ret = cdns_pcie_enable_phy(pcie); if (ret) { - dev_err(dev, "failed to enable phy\n"); + dev_err(dev, "failed to enable PHY\n"); return ret; } diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index fa45da28a218..6a830166d37f 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -383,7 +383,7 @@ static const struct of_device_id exynos_pcie_of_match[] = { static struct platform_driver exynos_pcie_driver = { .probe = exynos_pcie_probe, - .remove_new = exynos_pcie_remove, + .remove = exynos_pcie_remove, .driver = { .name = "exynos-pcie", .of_match_table = exynos_pcie_of_match, diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 808d1f105417..c8d5c90aa4d4 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -82,6 +82,11 @@ enum imx_pcie_variants { #define IMX_PCIE_FLAG_HAS_SERDES BIT(6) #define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7) #define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8) +/* + * Because of ERR005723 (PCIe does not support L2 power down) we need to + * workaround suspend resume on some devices which are affected by this errata. + */ +#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9) #define imx_check_flag(pci, val) (pci->drvdata->flags & val) @@ -1237,9 +1242,19 @@ static int imx_pcie_suspend_noirq(struct device *dev) return 0; imx_pcie_msi_save_restore(imx_pcie, true); - imx_pcie_pm_turnoff(imx_pcie); - imx_pcie_stop_link(imx_pcie->pci); - imx_pcie_host_exit(pp); + if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { + /* + * The minimum for a workaround would be to set PERST# and to + * set the PCIE_TEST_PD flag. However, we can also disable the + * clock which saves some power. + */ + imx_pcie_assert_core_reset(imx_pcie); + imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); + } else { + imx_pcie_pm_turnoff(imx_pcie); + imx_pcie_stop_link(imx_pcie->pci); + imx_pcie_host_exit(pp); + } return 0; } @@ -1253,14 +1268,32 @@ static int imx_pcie_resume_noirq(struct device *dev) if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; - ret = imx_pcie_host_init(pp); - if (ret) - return ret; - imx_pcie_msi_save_restore(imx_pcie, false); - dw_pcie_setup_rc(pp); + if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { + ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); + if (ret) + return ret; + ret = imx_pcie_deassert_core_reset(imx_pcie); + if (ret) + return ret; + /* + * Using PCIE_TEST_PD seems to disable MSI and powers down the + * root complex. This is why we have to setup the rc again and + * why we have to restore the MSI register. + */ + ret = dw_pcie_setup_rc(&imx_pcie->pci->pp); + if (ret) + return ret; + imx_pcie_msi_save_restore(imx_pcie, false); + } else { + ret = imx_pcie_host_init(pp); + if (ret) + return ret; + imx_pcie_msi_save_restore(imx_pcie, false); + dw_pcie_setup_rc(pp); - if (imx_pcie->link_is_up) - imx_pcie_start_link(imx_pcie->pci); + if (imx_pcie->link_is_up) + imx_pcie_start_link(imx_pcie->pci); + } return 0; } @@ -1485,7 +1518,9 @@ static const struct imx_pcie_drvdata drvdata[] = { [IMX6Q] = { .variant = IMX6Q, .flags = IMX_PCIE_FLAG_IMX_PHY | - IMX_PCIE_FLAG_IMX_SPEED_CHANGE, + IMX_PCIE_FLAG_IMX_SPEED_CHANGE | + IMX_PCIE_FLAG_BROKEN_SUSPEND | + IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", .clk_names = imx6q_clks, diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 44b34559de1a..63bd5003da45 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -1375,7 +1375,7 @@ static void ks_pcie_remove(struct platform_device *pdev) static struct platform_driver ks_pcie_driver = { .probe = ks_pcie_probe, - .remove_new = ks_pcie_remove, + .remove = ks_pcie_remove, .driver = { .name = "keystone-pcie", .of_match_table = ks_pcie_of_match, diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c index 76d0ddea8007..1340edc18d12 100644 --- a/drivers/pci/controller/dwc/pcie-bt1.c +++ b/drivers/pci/controller/dwc/pcie-bt1.c @@ -632,7 +632,7 @@ MODULE_DEVICE_TABLE(of, bt1_pcie_of_match); static struct platform_driver bt1_pcie_driver = { .probe = bt1_pcie_probe, - .remove_new = bt1_pcie_remove, + .remove = bt1_pcie_remove, .driver = { .name = "bt1-pcie", .of_match_table = bt1_pcie_of_match, diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 43ba5c6738df..f3ac7d46a855 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -268,6 +268,20 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, return -EINVAL; } +static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr, + size_t *pci_size, size_t *offset) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u64 mask = pci->region_align - 1; + size_t ofst = pci_addr & mask; + + *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size); + *offset = ofst; + + return pci_addr & ~mask; +} + static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t addr) { @@ -280,6 +294,7 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, if (ret < 0) return; + ep->outbound_addr[atu_index] = 0; dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index); clear_bit(atu_index, ep->ob_window_map); } @@ -444,6 +459,7 @@ static const struct pci_epc_ops epc_ops = { .write_header = dw_pcie_ep_write_header, .set_bar = dw_pcie_ep_set_bar, .clear_bar = dw_pcie_ep_clear_bar, + .align_addr = dw_pcie_ep_align_addr, .map_addr = dw_pcie_ep_map_addr, .unmap_addr = dw_pcie_ep_unmap_addr, .set_msi = dw_pcie_ep_set_msi, @@ -488,7 +504,8 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u32 msg_addr_lower, msg_addr_upper, reg; struct dw_pcie_ep_func *ep_func; struct pci_epc *epc = ep->epc; - unsigned int aligned_offset; + size_t map_size = sizeof(u32); + size_t offset; u16 msg_ctrl, msg_data; bool has_upper; u64 msg_addr; @@ -516,14 +533,13 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, } msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower; - aligned_offset = msg_addr & (epc->mem->window.page_size - 1); - msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); + msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, - epc->mem->window.page_size); + map_size); if (ret) return ret; - writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); + writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset); dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); @@ -574,8 +590,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, struct pci_epf_msix_tbl *msix_tbl; struct dw_pcie_ep_func *ep_func; struct pci_epc *epc = ep->epc; + size_t map_size = sizeof(u32); + size_t offset; u32 reg, msg_data, vec_ctrl; - unsigned int aligned_offset; u32 tbl_offset; u64 msg_addr; int ret; @@ -600,14 +617,13 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, return -EPERM; } - aligned_offset = msg_addr & (epc->mem->window.page_size - 1); - msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); + msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, - epc->mem->window.page_size); + map_size); if (ret) return ret; - writel(msg_data, ep->msi_mem + aligned_offset); + writel(msg_data, ep->msi_mem + offset); dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); @@ -689,7 +705,7 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci) * for 1 MB BAR size only. */ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) - dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); + dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4)); } dw_pcie_setup(pci); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 3e41865c7290..d2291c3ceb8b 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -474,8 +474,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) if (pci_msi_enabled()) { pp->has_msi_ctrl = !(pp->ops->msi_init || - of_property_read_bool(np, "msi-parent") || - of_property_read_bool(np, "msi-map")); + of_property_present(np, "msi-parent") || + of_property_present(np, "msi-map")); /* * For the has_msi_ctrl case the default assignment is handled diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 7a11c618b9d9..615a0e3e6d7e 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -439,7 +439,7 @@ MODULE_DEVICE_TABLE(of, histb_pcie_of_match); static struct platform_driver histb_pcie_platform_driver = { .probe = histb_pcie_probe, - .remove_new = histb_pcie_remove, + .remove = histb_pcie_remove, .driver = { .name = "histb-pcie", .of_match_table = histb_pcie_of_match, diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index 676d2aba4fbd..9b53b8f6f268 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -443,7 +443,7 @@ static const struct of_device_id of_intel_pcie_match[] = { static struct platform_driver intel_pcie_driver = { .probe = intel_pcie_probe, - .remove_new = intel_pcie_remove, + .remove = intel_pcie_remove, .driver = { .name = "intel-gw-pcie", .of_match_table = of_intel_pcie_match, diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 85a2c77b1835..1b2088acb538 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -769,7 +769,7 @@ static int kirin_pcie_probe(struct platform_device *pdev) static struct platform_driver kirin_pcie_driver = { .probe = kirin_pcie_probe, - .remove_new = kirin_pcie_remove, + .remove = kirin_pcie_remove, .driver = { .name = "kirin-pcie", .of_match_table = kirin_pcie_match, diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index e588fcc54589..d663c5206450 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -937,7 +937,7 @@ MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match); static struct platform_driver qcom_pcie_ep_driver = { .probe = qcom_pcie_ep_probe, - .remove_new = qcom_pcie_ep_remove, + .remove = qcom_pcie_ep_remove, .driver = { .name = "qcom-pcie-ep", .of_match_table = qcom_pcie_ep_match, diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c index 3a5511c3f7d9..fc872dd35029 100644 --- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c +++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c @@ -775,7 +775,7 @@ static struct platform_driver rcar_gen4_pcie_driver = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, .probe = rcar_gen4_pcie_probe, - .remove_new = rcar_gen4_pcie_remove, + .remove = rcar_gen4_pcie_remove, }; module_platform_driver(rcar_gen4_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index c1394f2ab63f..f4bd7f5275cf 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -2493,7 +2493,7 @@ static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { static struct platform_driver tegra_pcie_dw_driver = { .probe = tegra_pcie_dw_probe, - .remove_new = tegra_pcie_dw_remove, + .remove = tegra_pcie_dw_remove, .shutdown = tegra_pcie_dw_shutdown, .driver = { .name = "tegra194-pcie", diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index a598a98247ce..a29796cce420 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -1996,7 +1996,7 @@ static struct platform_driver advk_pcie_driver = { .of_match_table = advk_pcie_of_match_table, }, .probe = advk_pcie_probe, - .remove_new = advk_pcie_remove, + .remove = advk_pcie_remove, }; module_platform_driver(advk_pcie_driver); diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c index 5f06f94db7b1..4051b9b61dac 100644 --- a/drivers/pci/controller/pci-host-generic.c +++ b/drivers/pci/controller/pci-host-generic.c @@ -82,7 +82,7 @@ static struct platform_driver gen_pci_driver = { .of_match_table = gen_pci_of_match, }, .probe = pci_host_common_probe, - .remove_new = pci_host_common_remove, + .remove = pci_host_common_remove, }; module_platform_driver(gen_pci_driver); diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 29fe09c99e7d..46d3afe1d308 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -1727,7 +1727,7 @@ static struct platform_driver mvebu_pcie_driver = { .pm = &mvebu_pcie_pm_ops, }, .probe = mvebu_pcie_probe, - .remove_new = mvebu_pcie_remove, + .remove = mvebu_pcie_remove, }; module_platform_driver(mvebu_pcie_driver); diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index d7517c3976e7..b3cdbc5927de 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -1460,7 +1460,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) pcie->cs = *res; /* constrain configuration space to 4 KiB */ - pcie->cs.end = pcie->cs.start + SZ_4K - 1; + resource_set_size(&pcie->cs, SZ_4K); pcie->cfg = devm_ioremap_resource(dev, &pcie->cs); if (IS_ERR(pcie->cfg)) { @@ -2800,6 +2800,6 @@ static struct platform_driver tegra_pcie_driver = { .pm = &tegra_pcie_pm_ops, }, .probe = tegra_pcie_probe, - .remove_new = tegra_pcie_remove, + .remove = tegra_pcie_remove, }; module_platform_driver(tegra_pcie_driver); diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c index 06a9855cb431..f1bd5de67997 100644 --- a/drivers/pci/controller/pci-thunder-pem.c +++ b/drivers/pci/controller/pci-thunder-pem.c @@ -400,9 +400,9 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg) * Reserve 64K size PEM specific resources. The full 16M range * size is required for thunder_pem_init() call. */ - res_pem->end = res_pem->start + SZ_64K - 1; + resource_set_size(res_pem, SZ_64K); thunder_pem_reserve_range(dev, root->segment, res_pem); - res_pem->end = res_pem->start + SZ_16M - 1; + resource_set_size(res_pem, SZ_16M); /* Reserve PCI configuration space as well. */ thunder_pem_reserve_range(dev, root->segment, &cfg->res); diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index 3ce38dfd0d29..88c0977bc41a 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -518,7 +518,7 @@ static struct platform_driver xgene_msi_driver = { .of_match_table = xgene_msi_match_table, }, .probe = xgene_msi_probe, - .remove_new = xgene_msi_remove, + .remove = xgene_msi_remove, }; static int __init xgene_pcie_msi_init(void) diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c index e36a6e158d23..e1cee3c0575f 100644 --- a/drivers/pci/controller/pcie-altera-msi.c +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -267,7 +267,7 @@ static struct platform_driver altera_msi_driver = { .of_match_table = altera_msi_of_match, }, .probe = altera_msi_probe, - .remove_new = altera_msi_remove, + .remove = altera_msi_remove, }; static int __init altera_msi_init(void) diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 650b2dd81c48..eb55a7f8573a 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -815,10 +815,10 @@ static void altera_pcie_remove(struct platform_device *pdev) } static struct platform_driver altera_pcie_driver = { - .probe = altera_pcie_probe, - .remove_new = altera_pcie_remove, + .probe = altera_pcie_probe, + .remove = altera_pcie_remove, .driver = { - .name = "altera-pcie", + .name = "altera-pcie", .of_match_table = altera_pcie_of_match, }, }; diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 9321280f6edb..e733a27dc8df 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -1928,7 +1928,7 @@ static const struct dev_pm_ops brcm_pcie_pm_ops = { static struct platform_driver brcm_pcie_driver = { .probe = brcm_pcie_probe, - .remove_new = brcm_pcie_remove, + .remove = brcm_pcie_remove, .driver = { .name = "brcm-pcie", .of_match_table = brcm_pcie_match, diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c index ad9d5ffcd9e3..aaf1ed2b6e59 100644 --- a/drivers/pci/controller/pcie-hisi-error.c +++ b/drivers/pci/controller/pcie-hisi-error.c @@ -317,7 +317,7 @@ static struct platform_driver hisi_pcie_error_handler_driver = { .acpi_match_table = hisi_pcie_acpi_match, }, .probe = hisi_pcie_error_handler_probe, - .remove_new = hisi_pcie_error_handler_remove, + .remove = hisi_pcie_error_handler_remove, }; module_platform_driver(hisi_pcie_error_handler_driver); diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index 4e6aa882a567..0cb78c583c7e 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -134,7 +134,7 @@ static struct platform_driver iproc_pltfm_pcie_driver = { .of_match_table = of_match_ptr(iproc_pcie_of_match_table), }, .probe = iproc_pltfm_pcie_probe, - .remove_new = iproc_pltfm_pcie_remove, + .remove = iproc_pltfm_pcie_remove, .shutdown = iproc_pltfm_pcie_shutdown, }; module_platform_driver(iproc_pltfm_pcie_driver); diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index 66ce4b5d309b..969f62e9cf01 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -1225,7 +1225,7 @@ MODULE_DEVICE_TABLE(of, mtk_pcie_of_match); static struct platform_driver mtk_pcie_driver = { .probe = mtk_pcie_probe, - .remove_new = mtk_pcie_remove, + .remove = mtk_pcie_remove, .driver = { .name = "mtk-pcie-gen3", .of_match_table = mtk_pcie_of_match, diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 7f7d04c2ea57..3bcfc4e58ba2 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -1235,7 +1235,7 @@ MODULE_DEVICE_TABLE(of, mtk_pcie_ids); static struct platform_driver mtk_pcie_driver = { .probe = mtk_pcie_probe, - .remove_new = mtk_pcie_remove, + .remove = mtk_pcie_remove, .driver = { .name = "mtk-pcie", .of_match_table = mtk_pcie_ids, diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c index 9b4754a45515..776caa0b1011 100644 --- a/drivers/pci/controller/pcie-mt7621.c +++ b/drivers/pci/controller/pcie-mt7621.c @@ -541,7 +541,7 @@ MODULE_DEVICE_TABLE(of, mt7621_pcie_ids); static struct platform_driver mt7621_pcie_driver = { .probe = mt7621_pcie_probe, - .remove_new = mt7621_pcie_remove, + .remove = mt7621_pcie_remove, .driver = { .name = "mt7621-pci", .of_match_table = mt7621_pcie_ids, diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index cbec71114825..06cdb68f0920 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -1050,7 +1050,7 @@ static struct platform_driver rockchip_pcie_driver = { .pm = &rockchip_pcie_pm_ops, }, .probe = rockchip_pcie_probe, - .remove_new = rockchip_pcie_remove, + .remove = rockchip_pcie_remove, }; module_platform_driver(rockchip_pcie_driver); diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index a8ae14474dd0..8d6e2a89b067 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -916,6 +916,6 @@ static struct platform_driver nwl_pcie_driver = { .of_match_table = nwl_pcie_of_match, }, .probe = nwl_pcie_probe, - .remove_new = nwl_pcie_remove, + .remove = nwl_pcie_remove, }; builtin_platform_driver(nwl_pcie_driver); diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c index c9933ecf6833..e73c1b7bc8ef 100644 --- a/drivers/pci/controller/plda/pcie-starfive.c +++ b/drivers/pci/controller/plda/pcie-starfive.c @@ -404,6 +404,9 @@ static int starfive_pcie_probe(struct platform_device *pdev) if (ret) return ret; + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + plda->host_ops = &sf_host_ops; plda->num_events = PLDA_MAX_EVENT_NUM; /* mask doorbell event */ @@ -413,11 +416,12 @@ static int starfive_pcie_probe(struct platform_device *pdev) plda->events_bitmap <<= PLDA_NUM_DMA_EVENTS; ret = plda_pcie_host_init(&pcie->plda, &starfive_pcie_ops, &stf_pcie_event); - if (ret) + if (ret) { + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); return ret; + } - pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); platform_set_drvdata(pdev, pcie); return 0; @@ -480,7 +484,7 @@ static struct platform_driver starfive_pcie_driver = { .pm = pm_sleep_ptr(&starfive_pcie_pm_ops), }, .probe = starfive_pcie_probe, - .remove_new = starfive_pcie_remove, + .remove = starfive_pcie_remove, }; module_platform_driver(starfive_pcie_driver); diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 264a180403a0..11870d1fc818 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -740,11 +740,9 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata) if (!(features & VMD_FEAT_BIOS_PM_QUIRK)) return 0; - pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR); if (!pos) - return 0; + goto out_state_change; /* * Skip if the max snoop LTR is non-zero, indicating BIOS has set it @@ -752,7 +750,7 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata) */ pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, <r_reg); if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK))) - return 0; + goto out_state_change; /* * Set the default values to the maximum required by the platform to @@ -764,6 +762,13 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata) pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg); pci_info(pdev, "VMD: Default LTR value set by driver\n"); +out_state_change: + /* + * Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per + * PCIe r6.0, sec 5.5.4. + */ + pci_set_power_state_locked(pdev, PCI_D0); + pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); return 0; } diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c index b133967faef8..3b59a86a764b 100644 --- a/drivers/pci/devres.c +++ b/drivers/pci/devres.c @@ -773,7 +773,7 @@ EXPORT_SYMBOL(pcim_iomap_region); * Unmap a BAR and release its region manually. Only pass BARs that were * previously mapped by pcim_iomap_region(). */ -static void pcim_iounmap_region(struct pci_dev *pdev, int bar) +void pcim_iounmap_region(struct pci_dev *pdev, int bar) { struct pcim_addr_devres res_searched; @@ -784,6 +784,7 @@ static void pcim_iounmap_region(struct pci_dev *pdev, int bar) devres_release(&pdev->dev, pcim_addr_resource_release, pcim_addr_resources_match, &res_searched); } +EXPORT_SYMBOL(pcim_iounmap_region); /** * pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED) @@ -939,7 +940,7 @@ static void pcim_release_all_regions(struct pci_dev *pdev) * desired, release individual regions with pcim_release_region() or all of * them at once with pcim_release_all_regions(). */ -static int pcim_request_all_regions(struct pci_dev *pdev, const char *name) +int pcim_request_all_regions(struct pci_dev *pdev, const char *name) { int ret; int bar; @@ -957,69 +958,17 @@ err: return ret; } +EXPORT_SYMBOL(pcim_request_all_regions); /** - * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones - * (DEPRECATED) - * @pdev: PCI device to map IO resources for - * @mask: Mask of BARs to iomap - * @name: Name associated with the requests - * - * Returns: 0 on success, negative error code on failure. - * - * Request all PCI BARs and iomap regions specified by @mask. - * - * To release these resources manually, call pcim_release_region() for the - * regions and pcim_iounmap() for the mappings. - * - * This function is DEPRECATED. Don't use it in new code. Instead, use one - * of the pcim_* region request functions in combination with a pcim_* - * mapping function. - */ -int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, - const char *name) -{ - int bar; - int ret; - void __iomem **legacy_iomap_table; - - ret = pcim_request_all_regions(pdev, name); - if (ret != 0) - return ret; - - for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { - if (!mask_contains_bar(mask, bar)) - continue; - if (!pcim_iomap(pdev, bar, 0)) - goto err; - } - - return 0; - -err: - /* - * If bar is larger than 0, then pcim_iomap() above has most likely - * failed because of -EINVAL. If it is equal 0, most likely the table - * couldn't be created, indicating -ENOMEM. - */ - ret = bar > 0 ? -EINVAL : -ENOMEM; - legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev); - - while (--bar >= 0) - pcim_iounmap(pdev, legacy_iomap_table[bar]); - - pcim_release_all_regions(pdev); - - return ret; -} -EXPORT_SYMBOL(pcim_iomap_regions_request_all); - -/** - * pcim_iounmap_regions - Unmap and release PCI BARs + * pcim_iounmap_regions - Unmap and release PCI BARs (DEPRECATED) * @pdev: PCI device to map IO resources for * @mask: Mask of BARs to unmap and release * * Unmap and release regions specified by @mask. + * + * This function is DEPRECATED. Do not use it in new code. + * Use pcim_iounmap_region() instead. */ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) { diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c index 652d63df9d22..7bd7892c5222 100644 --- a/drivers/pci/doe.c +++ b/drivers/pci/doe.c @@ -146,6 +146,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb, { struct pci_dev *pdev = doe_mb->pdev; int offset = doe_mb->cap_offset; + unsigned long timeout_jiffies; size_t length, remainder; u32 val; int i; @@ -155,8 +156,19 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb, * someone other than Linux (e.g. firmware) is using the mailbox. Note * it is expected that firmware and OS will negotiate access rights via * an, as yet to be defined, method. + * + * Wait up to one PCI_DOE_TIMEOUT period to allow the prior command to + * finish. Otherwise, simply error out as unable to field the request. + * + * PCIe r6.2 sec 6.30.3 states no interrupt is raised when the DOE Busy + * bit is cleared, so polling here is our best option for the moment. */ - pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); + timeout_jiffies = jiffies + PCI_DOE_TIMEOUT; + do { + pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); + } while (FIELD_GET(PCI_DOE_STATUS_BUSY, val) && + !time_after(jiffies, timeout_jiffies)); + if (FIELD_GET(PCI_DOE_STATUS_BUSY, val)) return -EBUSY; diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c index 1c40d2506aef..260b7de2dbd5 100644 --- a/drivers/pci/ecam.c +++ b/drivers/pci/ecam.c @@ -55,7 +55,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev, bus_range_max = resource_size(cfgres) >> bus_shift; if (bus_range > bus_range_max) { bus_range = bus_range_max; - cfg->busr.end = busr->start + bus_range - 1; + resource_set_size(&cfg->busr, bus_range); dev_warn(dev, "ECAM area %pR can only accommodate %pR (reduced from %pR desired)\n", cfgres, &cfg->busr, busr); } diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c index 7d070b1def11..54286a40bdfb 100644 --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c @@ -867,12 +867,18 @@ static int pci_epf_mhi_bind(struct pci_epf *epf) { struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf); struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; struct platform_device *pdev = to_platform_device(epc->dev.parent); struct resource *res; int ret; /* Get MMIO base address from Endpoint controller */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio"); + if (!res) { + dev_err(dev, "Failed to get \"mmio\" resource\n"); + return -ENODEV; + } + epf_mhi->mmio_phys = res->start; epf_mhi->mmio_size = resource_size(res); diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 7c2ed6eae53a..ef6677f34116 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -291,8 +291,6 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test) dma_release_channel(epf_test->dma_chan_rx); epf_test->dma_chan_rx = NULL; - - return; } static void pci_epf_test_print_rate(struct pci_epf_test *epf_test, @@ -317,91 +315,92 @@ static void pci_epf_test_print_rate(struct pci_epf_test *epf_test, static void pci_epf_test_copy(struct pci_epf_test *epf_test, struct pci_epf_test_reg *reg) { - int ret; - void __iomem *src_addr; - void __iomem *dst_addr; - phys_addr_t src_phys_addr; - phys_addr_t dst_phys_addr; + int ret = 0; struct timespec64 start, end; struct pci_epf *epf = epf_test->epf; - struct device *dev = &epf->dev; struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; + struct pci_epc_map src_map, dst_map; + u64 src_addr = reg->src_addr; + u64 dst_addr = reg->dst_addr; + size_t copy_size = reg->size; + ssize_t map_size = 0; + void *copy_buf = NULL, *buf; - src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size); - if (!src_addr) { - dev_err(dev, "Failed to allocate source address\n"); - reg->status = STATUS_SRC_ADDR_INVALID; - ret = -ENOMEM; - goto err; - } - - ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr, - reg->src_addr, reg->size); - if (ret) { - dev_err(dev, "Failed to map source address\n"); - reg->status = STATUS_SRC_ADDR_INVALID; - goto err_src_addr; - } - - dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size); - if (!dst_addr) { - dev_err(dev, "Failed to allocate destination address\n"); - reg->status = STATUS_DST_ADDR_INVALID; - ret = -ENOMEM; - goto err_src_map_addr; - } - - ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr, - reg->dst_addr, reg->size); - if (ret) { - dev_err(dev, "Failed to map destination address\n"); - reg->status = STATUS_DST_ADDR_INVALID; - goto err_dst_addr; - } - - ktime_get_ts64(&start); if (reg->flags & FLAG_USE_DMA) { if (epf_test->dma_private) { dev_err(dev, "Cannot transfer data using DMA\n"); ret = -EINVAL; - goto err_map_addr; + goto set_status; } - - ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, - src_phys_addr, reg->size, 0, - DMA_MEM_TO_MEM); - if (ret) - dev_err(dev, "Data transfer failed\n"); } else { - void *buf; - - buf = kzalloc(reg->size, GFP_KERNEL); - if (!buf) { + copy_buf = kzalloc(copy_size, GFP_KERNEL); + if (!copy_buf) { ret = -ENOMEM; - goto err_map_addr; + goto set_status; } - - memcpy_fromio(buf, src_addr, reg->size); - memcpy_toio(dst_addr, buf, reg->size); - kfree(buf); + buf = copy_buf; } - ktime_get_ts64(&end); - pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end, - reg->flags & FLAG_USE_DMA); -err_map_addr: - pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr); + while (copy_size) { + ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no, + src_addr, copy_size, &src_map); + if (ret) { + dev_err(dev, "Failed to map source address\n"); + reg->status = STATUS_SRC_ADDR_INVALID; + goto free_buf; + } + + ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no, + dst_addr, copy_size, &dst_map); + if (ret) { + dev_err(dev, "Failed to map destination address\n"); + reg->status = STATUS_DST_ADDR_INVALID; + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, + &src_map); + goto free_buf; + } -err_dst_addr: - pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size); + map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size); -err_src_map_addr: - pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr); + ktime_get_ts64(&start); + if (reg->flags & FLAG_USE_DMA) { + ret = pci_epf_test_data_transfer(epf_test, + dst_map.phys_addr, src_map.phys_addr, + map_size, 0, DMA_MEM_TO_MEM); + if (ret) { + dev_err(dev, "Data transfer failed\n"); + goto unmap; + } + } else { + memcpy_fromio(buf, src_map.virt_addr, map_size); + memcpy_toio(dst_map.virt_addr, buf, map_size); + buf += map_size; + } + ktime_get_ts64(&end); -err_src_addr: - pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size); + copy_size -= map_size; + src_addr += map_size; + dst_addr += map_size; -err: + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map); + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map); + map_size = 0; + } + + pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, + &end, reg->flags & FLAG_USE_DMA); + +unmap: + if (map_size) { + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map); + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map); + } + +free_buf: + kfree(copy_buf); + +set_status: if (!ret) reg->status |= STATUS_COPY_SUCCESS; else @@ -411,82 +410,89 @@ err: static void pci_epf_test_read(struct pci_epf_test *epf_test, struct pci_epf_test_reg *reg) { - int ret; - void __iomem *src_addr; - void *buf; + int ret = 0; + void *src_buf, *buf; u32 crc32; - phys_addr_t phys_addr; + struct pci_epc_map map; phys_addr_t dst_phys_addr; struct timespec64 start, end; struct pci_epf *epf = epf_test->epf; - struct device *dev = &epf->dev; struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; struct device *dma_dev = epf->epc->dev.parent; + u64 src_addr = reg->src_addr; + size_t src_size = reg->size; + ssize_t map_size = 0; - src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); - if (!src_addr) { - dev_err(dev, "Failed to allocate address\n"); - reg->status = STATUS_SRC_ADDR_INVALID; + src_buf = kzalloc(src_size, GFP_KERNEL); + if (!src_buf) { ret = -ENOMEM; - goto err; + goto set_status; } + buf = src_buf; - ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, - reg->src_addr, reg->size); - if (ret) { - dev_err(dev, "Failed to map address\n"); - reg->status = STATUS_SRC_ADDR_INVALID; - goto err_addr; - } - - buf = kzalloc(reg->size, GFP_KERNEL); - if (!buf) { - ret = -ENOMEM; - goto err_map_addr; - } + while (src_size) { + ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no, + src_addr, src_size, &map); + if (ret) { + dev_err(dev, "Failed to map address\n"); + reg->status = STATUS_SRC_ADDR_INVALID; + goto free_buf; + } - if (reg->flags & FLAG_USE_DMA) { - dst_phys_addr = dma_map_single(dma_dev, buf, reg->size, - DMA_FROM_DEVICE); - if (dma_mapping_error(dma_dev, dst_phys_addr)) { - dev_err(dev, "Failed to map destination buffer addr\n"); - ret = -ENOMEM; - goto err_dma_map; + map_size = map.pci_size; + if (reg->flags & FLAG_USE_DMA) { + dst_phys_addr = dma_map_single(dma_dev, buf, map_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(dma_dev, dst_phys_addr)) { + dev_err(dev, + "Failed to map destination buffer addr\n"); + ret = -ENOMEM; + goto unmap; + } + + ktime_get_ts64(&start); + ret = pci_epf_test_data_transfer(epf_test, + dst_phys_addr, map.phys_addr, + map_size, src_addr, DMA_DEV_TO_MEM); + if (ret) + dev_err(dev, "Data transfer failed\n"); + ktime_get_ts64(&end); + + dma_unmap_single(dma_dev, dst_phys_addr, map_size, + DMA_FROM_DEVICE); + + if (ret) + goto unmap; + } else { + ktime_get_ts64(&start); + memcpy_fromio(buf, map.virt_addr, map_size); + ktime_get_ts64(&end); } - ktime_get_ts64(&start); - ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, - phys_addr, reg->size, - reg->src_addr, DMA_DEV_TO_MEM); - if (ret) - dev_err(dev, "Data transfer failed\n"); - ktime_get_ts64(&end); + src_size -= map_size; + src_addr += map_size; + buf += map_size; - dma_unmap_single(dma_dev, dst_phys_addr, reg->size, - DMA_FROM_DEVICE); - } else { - ktime_get_ts64(&start); - memcpy_fromio(buf, src_addr, reg->size); - ktime_get_ts64(&end); + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); + map_size = 0; } - pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end, - reg->flags & FLAG_USE_DMA); + pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, + &end, reg->flags & FLAG_USE_DMA); - crc32 = crc32_le(~0, buf, reg->size); + crc32 = crc32_le(~0, src_buf, reg->size); if (crc32 != reg->checksum) ret = -EIO; -err_dma_map: - kfree(buf); - -err_map_addr: - pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); +unmap: + if (map_size) + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); -err_addr: - pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size); +free_buf: + kfree(src_buf); -err: +set_status: if (!ret) reg->status |= STATUS_READ_SUCCESS; else @@ -496,71 +502,79 @@ err: static void pci_epf_test_write(struct pci_epf_test *epf_test, struct pci_epf_test_reg *reg) { - int ret; - void __iomem *dst_addr; - void *buf; - phys_addr_t phys_addr; + int ret = 0; + void *dst_buf, *buf; + struct pci_epc_map map; phys_addr_t src_phys_addr; struct timespec64 start, end; struct pci_epf *epf = epf_test->epf; - struct device *dev = &epf->dev; struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; struct device *dma_dev = epf->epc->dev.parent; + u64 dst_addr = reg->dst_addr; + size_t dst_size = reg->size; + ssize_t map_size = 0; - dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); - if (!dst_addr) { - dev_err(dev, "Failed to allocate address\n"); - reg->status = STATUS_DST_ADDR_INVALID; + dst_buf = kzalloc(dst_size, GFP_KERNEL); + if (!dst_buf) { ret = -ENOMEM; - goto err; + goto set_status; } + get_random_bytes(dst_buf, dst_size); + reg->checksum = crc32_le(~0, dst_buf, dst_size); + buf = dst_buf; - ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, - reg->dst_addr, reg->size); - if (ret) { - dev_err(dev, "Failed to map address\n"); - reg->status = STATUS_DST_ADDR_INVALID; - goto err_addr; - } - - buf = kzalloc(reg->size, GFP_KERNEL); - if (!buf) { - ret = -ENOMEM; - goto err_map_addr; - } - - get_random_bytes(buf, reg->size); - reg->checksum = crc32_le(~0, buf, reg->size); - - if (reg->flags & FLAG_USE_DMA) { - src_phys_addr = dma_map_single(dma_dev, buf, reg->size, - DMA_TO_DEVICE); - if (dma_mapping_error(dma_dev, src_phys_addr)) { - dev_err(dev, "Failed to map source buffer addr\n"); - ret = -ENOMEM; - goto err_dma_map; + while (dst_size) { + ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no, + dst_addr, dst_size, &map); + if (ret) { + dev_err(dev, "Failed to map address\n"); + reg->status = STATUS_DST_ADDR_INVALID; + goto free_buf; } - ktime_get_ts64(&start); + map_size = map.pci_size; + if (reg->flags & FLAG_USE_DMA) { + src_phys_addr = dma_map_single(dma_dev, buf, map_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, src_phys_addr)) { + dev_err(dev, + "Failed to map source buffer addr\n"); + ret = -ENOMEM; + goto unmap; + } + + ktime_get_ts64(&start); + + ret = pci_epf_test_data_transfer(epf_test, + map.phys_addr, src_phys_addr, + map_size, dst_addr, + DMA_MEM_TO_DEV); + if (ret) + dev_err(dev, "Data transfer failed\n"); + ktime_get_ts64(&end); + + dma_unmap_single(dma_dev, src_phys_addr, map_size, + DMA_TO_DEVICE); + + if (ret) + goto unmap; + } else { + ktime_get_ts64(&start); + memcpy_toio(map.virt_addr, buf, map_size); + ktime_get_ts64(&end); + } - ret = pci_epf_test_data_transfer(epf_test, phys_addr, - src_phys_addr, reg->size, - reg->dst_addr, - DMA_MEM_TO_DEV); - if (ret) - dev_err(dev, "Data transfer failed\n"); - ktime_get_ts64(&end); + dst_size -= map_size; + dst_addr += map_size; + buf += map_size; - dma_unmap_single(dma_dev, src_phys_addr, reg->size, - DMA_TO_DEVICE); - } else { - ktime_get_ts64(&start); - memcpy_toio(dst_addr, buf, reg->size); - ktime_get_ts64(&end); + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); + map_size = 0; } - pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end, - reg->flags & FLAG_USE_DMA); + pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, + &end, reg->flags & FLAG_USE_DMA); /* * wait 1ms inorder for the write to complete. Without this delay L3 @@ -568,16 +582,14 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test, */ usleep_range(1000, 2000); -err_dma_map: - kfree(buf); - -err_map_addr: - pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); +unmap: + if (map_size) + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); -err_addr: - pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size); +free_buf: + kfree(dst_buf); -err: +set_status: if (!ret) reg->status |= STATUS_WRITE_SUCCESS; else @@ -786,7 +798,7 @@ static void pci_epf_test_epc_deinit(struct pci_epf *epf) { struct pci_epf_test *epf_test = epf_get_drvdata(epf); - cancel_delayed_work(&epf_test->cmd_handler); + cancel_delayed_work_sync(&epf_test->cmd_handler); pci_epf_test_clean_dma_chan(epf_test); pci_epf_test_clear_bar(epf); } @@ -917,7 +929,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf) struct pci_epf_test *epf_test = epf_get_drvdata(epf); struct pci_epc *epc = epf->epc; - cancel_delayed_work(&epf_test->cmd_handler); + cancel_delayed_work_sync(&epf_test->cmd_handler); if (epc->init_complete) { pci_epf_test_clean_dma_chan(epf_test); pci_epf_test_clear_bar(epf); diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 17f007109255..bed7c7d1fe3c 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -128,6 +128,18 @@ enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features } EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar); +static bool pci_epc_function_is_valid(struct pci_epc *epc, + u8 func_no, u8 vfunc_no) +{ + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) + return false; + + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + return false; + + return true; +} + /** * pci_epc_get_features() - get the features supported by EPC * @epc: the features supported by *this* EPC device will be returned @@ -145,10 +157,7 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, { const struct pci_epc_features *epc_features; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return NULL; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return NULL; if (!epc->ops->get_features) @@ -218,10 +227,7 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { int ret; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return -EINVAL; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; if (!epc->ops->raise_irq) @@ -262,10 +268,7 @@ int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { int ret; - if (IS_ERR_OR_NULL(epc)) - return -EINVAL; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; if (!epc->ops->map_msi_irq) @@ -293,10 +296,7 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { int interrupt; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return 0; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return 0; if (!epc->ops->get_msi) @@ -329,11 +329,10 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts) int ret; u8 encode_int; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || - interrupts < 1 || interrupts > 32) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (interrupts < 1 || interrupts > 32) return -EINVAL; if (!epc->ops->set_msi) @@ -361,10 +360,7 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { int interrupt; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return 0; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return 0; if (!epc->ops->get_msix) @@ -397,11 +393,10 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { int ret; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || - interrupts < 1 || interrupts > 2048) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (interrupts < 1 || interrupts > 2048) return -EINVAL; if (!epc->ops->set_msix) @@ -428,10 +423,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix); void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr) { - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return; if (!epc->ops->unmap_addr) @@ -459,10 +451,7 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { int ret; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return -EINVAL; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; if (!epc->ops->map_addr) @@ -478,6 +467,109 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, EXPORT_SYMBOL_GPL(pci_epc_map_addr); /** + * pci_epc_mem_map() - allocate and map a PCI address to a CPU address + * @epc: the EPC device on which the CPU address is to be allocated and mapped + * @func_no: the physical endpoint function number in the EPC device + * @vfunc_no: the virtual endpoint function number in the physical function + * @pci_addr: PCI address to which the CPU address should be mapped + * @pci_size: the number of bytes to map starting from @pci_addr + * @map: where to return the mapping information + * + * Allocate a controller memory address region and map it to a RC PCI address + * region, taking into account the controller physical address mapping + * constraints using the controller operation align_addr(). If this operation is + * not defined, we assume that there are no alignment constraints for the + * mapping. + * + * The effective size of the PCI address range mapped from @pci_addr is + * indicated by @map->pci_size. This size may be less than the requested + * @pci_size. The local virtual CPU address for the mapping is indicated by + * @map->virt_addr (@map->phys_addr indicates the physical address). + * The size and CPU address of the controller memory allocated and mapped are + * respectively indicated by @map->map_size and @map->virt_base (and + * @map->phys_base for the physical address of @map->virt_base). + * + * Returns 0 on success and a negative error code in case of error. + */ +int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u64 pci_addr, size_t pci_size, struct pci_epc_map *map) +{ + size_t map_size = pci_size; + size_t map_offset = 0; + int ret; + + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) + return -EINVAL; + + if (!pci_size || !map) + return -EINVAL; + + /* + * Align the PCI address to map. If the controller defines the + * .align_addr() operation, use it to determine the PCI address to map + * and the size of the mapping. Otherwise, assume that the controller + * has no alignment constraint. + */ + memset(map, 0, sizeof(*map)); + map->pci_addr = pci_addr; + if (epc->ops->align_addr) + map->map_pci_addr = + epc->ops->align_addr(epc, pci_addr, + &map_size, &map_offset); + else + map->map_pci_addr = pci_addr; + map->map_size = map_size; + if (map->map_pci_addr + map->map_size < pci_addr + pci_size) + map->pci_size = map->map_pci_addr + map->map_size - pci_addr; + else + map->pci_size = pci_size; + + map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base, + map->map_size); + if (!map->virt_base) + return -ENOMEM; + + map->phys_addr = map->phys_base + map_offset; + map->virt_addr = map->virt_base + map_offset; + + ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base, + map->map_pci_addr, map->map_size); + if (ret) { + pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base, + map->map_size); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(pci_epc_mem_map); + +/** + * pci_epc_mem_unmap() - unmap and free a CPU address region + * @epc: the EPC device on which the CPU address is allocated and mapped + * @func_no: the physical endpoint function number in the EPC device + * @vfunc_no: the virtual endpoint function number in the physical function + * @map: the mapping information + * + * Unmap and free a CPU address region that was allocated and mapped with + * pci_epc_mem_map(). + */ +void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + struct pci_epc_map *map) +{ + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) + return; + + if (!map || !map->virt_base) + return; + + pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base); + pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base, + map->map_size); +} +EXPORT_SYMBOL_GPL(pci_epc_mem_unmap); + +/** * pci_epc_clear_bar() - reset the BAR * @epc: the EPC device for which the BAR has to be cleared * @func_no: the physical endpoint function number in the EPC device @@ -489,12 +581,11 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr); void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar) { - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || - (epf_bar->barno == BAR_5 && - epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return; - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (epf_bar->barno == BAR_5 && + epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) return; if (!epc->ops->clear_bar) @@ -521,18 +612,16 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, int ret; int flags = epf_bar->flags; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || - (epf_bar->barno == BAR_5 && - flags & PCI_BASE_ADDRESS_MEM_TYPE_64) || + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) + return -EINVAL; + + if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) || (flags & PCI_BASE_ADDRESS_SPACE_IO && flags & PCI_BASE_ADDRESS_IO_MASK) || (upper_32_bits(epf_bar->size) && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))) return -EINVAL; - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) - return -EINVAL; - if (!epc->ops->set_bar) return 0; @@ -561,10 +650,7 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { int ret; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) - return -EINVAL; - - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; /* Only Virtual Function #1 has deviceID */ @@ -660,18 +746,18 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, if (IS_ERR_OR_NULL(epc) || !epf) return; + mutex_lock(&epc->list_lock); if (type == PRIMARY_INTERFACE) { func_no = epf->func_no; list = &epf->list; + epf->epc = NULL; } else { func_no = epf->sec_epc_func_no; list = &epf->sec_epc_list; + epf->sec_epc = NULL; } - - mutex_lock(&epc->list_lock); clear_bit(func_no, &epc->function_num_map); list_del(list); - epf->epc = NULL; mutex_unlock(&epc->list_lock); } EXPORT_SYMBOL_GPL(pci_epc_remove_epf); @@ -837,11 +923,10 @@ EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify); void pci_epc_destroy(struct pci_epc *epc) { pci_ep_cfs_remove_epc_group(epc->group); - device_unregister(&epc->dev); - #ifdef CONFIG_PCI_DOMAINS_GENERIC - pci_bus_release_domain_nr(&epc->dev, epc->domain_nr); + pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr); #endif + device_unregister(&epc->dev); } EXPORT_SYMBOL_GPL(pci_epc_destroy); diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c index a9c028f58da1..218a60e945db 100644 --- a/drivers/pci/endpoint/pci-epc-mem.c +++ b/drivers/pci/endpoint/pci-epc-mem.c @@ -178,7 +178,7 @@ EXPORT_SYMBOL_GPL(pci_epc_mem_exit); void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, phys_addr_t *phys_addr, size_t size) { - void __iomem *virt_addr = NULL; + void __iomem *virt_addr; struct pci_epc_mem *mem; unsigned int page_shift; size_t align_size; @@ -188,10 +188,13 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, for (i = 0; i < epc->num_windows; i++) { mem = epc->windows[i]; - mutex_lock(&mem->lock); + if (size > mem->window.size) + continue; + align_size = ALIGN(size, mem->window.page_size); order = pci_epc_mem_get_order(mem, align_size); + mutex_lock(&mem->lock); pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order); if (pageno >= 0) { @@ -211,7 +214,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, mutex_unlock(&mem->lock); } - return virt_addr; + return NULL; } EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr); diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index 1472aef0fb81..123c4c7c2ab5 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig @@ -118,6 +118,16 @@ config HOTPLUG_PCI_CPCI_GENERIC When in doubt, say N. +config HOTPLUG_PCI_OCTEONEP + bool "Marvell OCTEON PCI Hotplug driver" + depends on HOTPLUG_PCI + help + Say Y here if you have an OCTEON PCIe device with a hotplug + controller. This driver enables the non-controller functions of the + device to be registered as hotplug slots. + + When in doubt, say N. + config HOTPLUG_PCI_SHPC bool "SHPC PCI Hotplug driver" help diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 240c99517d5e..40aaf31fe338 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o +obj-$(CONFIG_HOTPLUG_PCI_OCTEONEP) += octep_hp.o # acpiphp_ibm extends acpiphp, so should be linked afterwards. diff --git a/drivers/pci/hotplug/acpiphp_ampere_altra.c b/drivers/pci/hotplug/acpiphp_ampere_altra.c index f5c9e741c1d4..70dbc0431fc6 100644 --- a/drivers/pci/hotplug/acpiphp_ampere_altra.c +++ b/drivers/pci/hotplug/acpiphp_ampere_altra.c @@ -119,7 +119,7 @@ static struct platform_driver altra_led_driver = { .acpi_match_table = altra_led_ids, }, .probe = altra_led_probe, - .remove_new = altra_led_remove, + .remove = altra_led_remove, }; module_platform_driver(altra_led_driver); diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h index 6d8970d8c3f2..03fa39ab0c88 100644 --- a/drivers/pci/hotplug/cpci_hotplug.h +++ b/drivers/pci/hotplug/cpci_hotplug.h @@ -44,7 +44,6 @@ struct cpci_hp_controller_ops { int (*enable_irq)(void); int (*disable_irq)(void); int (*check_irq)(void *dev_id); - int (*hardware_test)(struct slot *slot, u32 value); u8 (*get_power)(struct slot *slot); int (*set_power)(struct slot *slot, int value); }; diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c index 718bc6cf12cb..ef7534a3ca40 100644 --- a/drivers/pci/hotplug/cpqphp_pci.c +++ b/drivers/pci/hotplug/cpqphp_pci.c @@ -12,8 +12,11 @@ * */ +#define pr_fmt(fmt) "cpqphp: " fmt + #include <linux/module.h> #include <linux/kernel.h> +#include <linux/printk.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/workqueue.h> @@ -132,18 +135,6 @@ int cpqhp_unconfigure_device(struct pci_func *func) return 0; } -static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 offset, u32 *value) -{ - u32 vendID = 0; - - if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID) == -1) - return -1; - if (PCI_POSSIBLE_ERROR(vendID)) - return -1; - return pci_bus_read_config_dword(bus, devfn, offset, value); -} - - /* * cpqhp_set_irq * @@ -202,13 +193,16 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_ { u16 tdevice; u32 work; - u8 tbus; + int ret = -1; ctrl->pci_bus->number = bus_num; for (tdevice = 0; tdevice < 0xFF; tdevice++) { /* Scan for access first */ - if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) + if (!pci_bus_read_dev_vendor_id(ctrl->pci_bus, tdevice, &work, 0)) + continue; + ret = pci_bus_read_config_dword(ctrl->pci_bus, tdevice, PCI_CLASS_REVISION, &work); + if (ret) continue; dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice); /* Yep we got one. Not a bridge ? */ @@ -216,23 +210,20 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_ *dev_num = tdevice; dbg("found it !\n"); return 0; - } - } - for (tdevice = 0; tdevice < 0xFF; tdevice++) { - /* Scan for access first */ - if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) - continue; - dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice); - /* Yep we got one. bridge ? */ - if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { - pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus); - /* XXX: no recursion, wtf? */ - dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice); - return 0; + } else { + /* + * XXX: Code whose debug printout indicated + * recursion to buses underneath bridges might be + * necessary was removed because it never did + * any recursion. + */ + ret = 0; + pr_warn("missing feature: bridge scan recursion not implemented\n"); } } - return -1; + + return ret; } diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c index fed1360ee9b1..6143ebf71f21 100644 --- a/drivers/pci/hotplug/cpqphp_sysfs.c +++ b/drivers/pci/hotplug/cpqphp_sysfs.c @@ -123,7 +123,6 @@ static int spew_debug_info(struct controller *ctrl, char *data, int size) struct ctrl_dbg { int size; char *data; - struct controller *ctrl; }; #define MAX_OUTPUT (4*PAGE_SIZE) diff --git a/drivers/pci/hotplug/octep_hp.c b/drivers/pci/hotplug/octep_hp.c new file mode 100644 index 000000000000..2bce7296c050 --- /dev/null +++ b/drivers/pci/hotplug/octep_hp.c @@ -0,0 +1,427 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Marvell. */ + +#include <linux/cleanup.h> +#include <linux/container_of.h> +#include <linux/delay.h> +#include <linux/dev_printk.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/pci_hotplug.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#define OCTEP_HP_INTR_OFFSET(x) (0x20400 + ((x) << 4)) +#define OCTEP_HP_INTR_VECTOR(x) (16 + (x)) +#define OCTEP_HP_DRV_NAME "octep_hp" + +/* + * Type of MSI-X interrupts. OCTEP_HP_INTR_VECTOR() and + * OCTEP_HP_INTR_OFFSET() generate the vector and offset for an interrupt + * type. + */ +enum octep_hp_intr_type { + OCTEP_HP_INTR_INVALID = -1, + OCTEP_HP_INTR_ENA = 0, + OCTEP_HP_INTR_DIS = 1, + OCTEP_HP_INTR_MAX = 2, +}; + +struct octep_hp_cmd { + struct list_head list; + enum octep_hp_intr_type intr_type; + u64 intr_val; +}; + +struct octep_hp_slot { + struct list_head list; + struct hotplug_slot slot; + u16 slot_number; + struct pci_dev *hp_pdev; + unsigned int hp_devfn; + struct octep_hp_controller *ctrl; +}; + +struct octep_hp_intr_info { + enum octep_hp_intr_type type; + int number; + char name[16]; +}; + +struct octep_hp_controller { + void __iomem *base; + struct pci_dev *pdev; + struct octep_hp_intr_info intr[OCTEP_HP_INTR_MAX]; + struct work_struct work; + struct list_head slot_list; + struct mutex slot_lock; /* Protects slot_list */ + struct list_head hp_cmd_list; + spinlock_t hp_cmd_lock; /* Protects hp_cmd_list */ +}; + +static void octep_hp_enable_pdev(struct octep_hp_controller *hp_ctrl, + struct octep_hp_slot *hp_slot) +{ + guard(mutex)(&hp_ctrl->slot_lock); + if (hp_slot->hp_pdev) { + pci_dbg(hp_slot->hp_pdev, "Slot %s is already enabled\n", + hotplug_slot_name(&hp_slot->slot)); + return; + } + + /* Scan the device and add it to the bus */ + hp_slot->hp_pdev = pci_scan_single_device(hp_ctrl->pdev->bus, + hp_slot->hp_devfn); + pci_bus_assign_resources(hp_ctrl->pdev->bus); + pci_bus_add_device(hp_slot->hp_pdev); + + dev_dbg(&hp_slot->hp_pdev->dev, "Enabled slot %s\n", + hotplug_slot_name(&hp_slot->slot)); +} + +static void octep_hp_disable_pdev(struct octep_hp_controller *hp_ctrl, + struct octep_hp_slot *hp_slot) +{ + guard(mutex)(&hp_ctrl->slot_lock); + if (!hp_slot->hp_pdev) { + pci_dbg(hp_ctrl->pdev, "Slot %s is already disabled\n", + hotplug_slot_name(&hp_slot->slot)); + return; + } + + pci_dbg(hp_slot->hp_pdev, "Disabling slot %s\n", + hotplug_slot_name(&hp_slot->slot)); + + /* Remove the device from the bus */ + pci_stop_and_remove_bus_device_locked(hp_slot->hp_pdev); + hp_slot->hp_pdev = NULL; +} + +static int octep_hp_enable_slot(struct hotplug_slot *slot) +{ + struct octep_hp_slot *hp_slot = + container_of(slot, struct octep_hp_slot, slot); + + octep_hp_enable_pdev(hp_slot->ctrl, hp_slot); + return 0; +} + +static int octep_hp_disable_slot(struct hotplug_slot *slot) +{ + struct octep_hp_slot *hp_slot = + container_of(slot, struct octep_hp_slot, slot); + + octep_hp_disable_pdev(hp_slot->ctrl, hp_slot); + return 0; +} + +static struct hotplug_slot_ops octep_hp_slot_ops = { + .enable_slot = octep_hp_enable_slot, + .disable_slot = octep_hp_disable_slot, +}; + +#define SLOT_NAME_SIZE 16 +static struct octep_hp_slot * +octep_hp_register_slot(struct octep_hp_controller *hp_ctrl, + struct pci_dev *pdev, u16 slot_number) +{ + char slot_name[SLOT_NAME_SIZE]; + struct octep_hp_slot *hp_slot; + int ret; + + hp_slot = kzalloc(sizeof(*hp_slot), GFP_KERNEL); + if (!hp_slot) + return ERR_PTR(-ENOMEM); + + hp_slot->ctrl = hp_ctrl; + hp_slot->hp_pdev = pdev; + hp_slot->hp_devfn = pdev->devfn; + hp_slot->slot_number = slot_number; + hp_slot->slot.ops = &octep_hp_slot_ops; + + snprintf(slot_name, sizeof(slot_name), "octep_hp_%u", slot_number); + ret = pci_hp_register(&hp_slot->slot, hp_ctrl->pdev->bus, + PCI_SLOT(pdev->devfn), slot_name); + if (ret) { + kfree(hp_slot); + return ERR_PTR(ret); + } + + pci_info(pdev, "Registered slot %s for device %s\n", + slot_name, pci_name(pdev)); + + list_add_tail(&hp_slot->list, &hp_ctrl->slot_list); + octep_hp_disable_pdev(hp_ctrl, hp_slot); + + return hp_slot; +} + +static void octep_hp_deregister_slot(void *data) +{ + struct octep_hp_slot *hp_slot = data; + struct octep_hp_controller *hp_ctrl = hp_slot->ctrl; + + pci_hp_deregister(&hp_slot->slot); + octep_hp_enable_pdev(hp_ctrl, hp_slot); + list_del(&hp_slot->list); + kfree(hp_slot); +} + +static const char *octep_hp_cmd_name(enum octep_hp_intr_type type) +{ + switch (type) { + case OCTEP_HP_INTR_ENA: + return "hotplug enable"; + case OCTEP_HP_INTR_DIS: + return "hotplug disable"; + default: + return "invalid"; + } +} + +static void octep_hp_cmd_handler(struct octep_hp_controller *hp_ctrl, + struct octep_hp_cmd *hp_cmd) +{ + struct octep_hp_slot *hp_slot; + + /* + * Enable or disable the slots based on the slot mask. + * intr_val is a bit mask where each bit represents a slot. + */ + list_for_each_entry(hp_slot, &hp_ctrl->slot_list, list) { + if (!(hp_cmd->intr_val & BIT(hp_slot->slot_number))) + continue; + + pci_info(hp_ctrl->pdev, "Received %s command for slot %s\n", + octep_hp_cmd_name(hp_cmd->intr_type), + hotplug_slot_name(&hp_slot->slot)); + + switch (hp_cmd->intr_type) { + case OCTEP_HP_INTR_ENA: + octep_hp_enable_pdev(hp_ctrl, hp_slot); + break; + case OCTEP_HP_INTR_DIS: + octep_hp_disable_pdev(hp_ctrl, hp_slot); + break; + default: + break; + } + } +} + +static void octep_hp_work_handler(struct work_struct *work) +{ + struct octep_hp_controller *hp_ctrl; + struct octep_hp_cmd *hp_cmd; + unsigned long flags; + + hp_ctrl = container_of(work, struct octep_hp_controller, work); + + /* Process all the hotplug commands */ + spin_lock_irqsave(&hp_ctrl->hp_cmd_lock, flags); + while (!list_empty(&hp_ctrl->hp_cmd_list)) { + hp_cmd = list_first_entry(&hp_ctrl->hp_cmd_list, + struct octep_hp_cmd, list); + list_del(&hp_cmd->list); + spin_unlock_irqrestore(&hp_ctrl->hp_cmd_lock, flags); + + octep_hp_cmd_handler(hp_ctrl, hp_cmd); + kfree(hp_cmd); + + spin_lock_irqsave(&hp_ctrl->hp_cmd_lock, flags); + } + spin_unlock_irqrestore(&hp_ctrl->hp_cmd_lock, flags); +} + +static enum octep_hp_intr_type octep_hp_intr_type(struct octep_hp_intr_info *intr, + int irq) +{ + enum octep_hp_intr_type type; + + for (type = OCTEP_HP_INTR_ENA; type < OCTEP_HP_INTR_MAX; type++) { + if (intr[type].number == irq) + return type; + } + + return OCTEP_HP_INTR_INVALID; +} + +static irqreturn_t octep_hp_intr_handler(int irq, void *data) +{ + struct octep_hp_controller *hp_ctrl = data; + struct pci_dev *pdev = hp_ctrl->pdev; + enum octep_hp_intr_type type; + struct octep_hp_cmd *hp_cmd; + u64 intr_val; + + type = octep_hp_intr_type(hp_ctrl->intr, irq); + if (type == OCTEP_HP_INTR_INVALID) { + pci_err(pdev, "Invalid interrupt %d\n", irq); + return IRQ_HANDLED; + } + + /* Read and clear the interrupt */ + intr_val = readq(hp_ctrl->base + OCTEP_HP_INTR_OFFSET(type)); + writeq(intr_val, hp_ctrl->base + OCTEP_HP_INTR_OFFSET(type)); + + hp_cmd = kzalloc(sizeof(*hp_cmd), GFP_ATOMIC); + if (!hp_cmd) + return IRQ_HANDLED; + + hp_cmd->intr_val = intr_val; + hp_cmd->intr_type = type; + + /* Add the command to the list and schedule the work */ + spin_lock(&hp_ctrl->hp_cmd_lock); + list_add_tail(&hp_cmd->list, &hp_ctrl->hp_cmd_list); + spin_unlock(&hp_ctrl->hp_cmd_lock); + schedule_work(&hp_ctrl->work); + + return IRQ_HANDLED; +} + +static void octep_hp_irq_cleanup(void *data) +{ + struct octep_hp_controller *hp_ctrl = data; + + pci_free_irq_vectors(hp_ctrl->pdev); + flush_work(&hp_ctrl->work); +} + +static int octep_hp_request_irq(struct octep_hp_controller *hp_ctrl, + enum octep_hp_intr_type type) +{ + struct pci_dev *pdev = hp_ctrl->pdev; + struct octep_hp_intr_info *intr; + int irq; + + irq = pci_irq_vector(pdev, OCTEP_HP_INTR_VECTOR(type)); + if (irq < 0) + return irq; + + intr = &hp_ctrl->intr[type]; + intr->number = irq; + intr->type = type; + snprintf(intr->name, sizeof(intr->name), "octep_hp_%d", type); + + return devm_request_irq(&pdev->dev, irq, octep_hp_intr_handler, + IRQF_SHARED, intr->name, hp_ctrl); +} + +static int octep_hp_controller_setup(struct pci_dev *pdev, + struct octep_hp_controller *hp_ctrl) +{ + struct device *dev = &pdev->dev; + enum octep_hp_intr_type type; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable PCI device\n"); + + hp_ctrl->base = pcim_iomap_region(pdev, 0, OCTEP_HP_DRV_NAME); + if (IS_ERR(hp_ctrl->base)) + return dev_err_probe(dev, PTR_ERR(hp_ctrl->base), + "Failed to map PCI device region\n"); + + pci_set_master(pdev); + pci_set_drvdata(pdev, hp_ctrl); + + INIT_LIST_HEAD(&hp_ctrl->slot_list); + INIT_LIST_HEAD(&hp_ctrl->hp_cmd_list); + mutex_init(&hp_ctrl->slot_lock); + spin_lock_init(&hp_ctrl->hp_cmd_lock); + INIT_WORK(&hp_ctrl->work, octep_hp_work_handler); + hp_ctrl->pdev = pdev; + + ret = pci_alloc_irq_vectors(pdev, 1, + OCTEP_HP_INTR_VECTOR(OCTEP_HP_INTR_MAX), + PCI_IRQ_MSIX); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to alloc MSI-X vectors\n"); + + ret = devm_add_action(&pdev->dev, octep_hp_irq_cleanup, hp_ctrl); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to add IRQ cleanup action\n"); + + for (type = OCTEP_HP_INTR_ENA; type < OCTEP_HP_INTR_MAX; type++) { + ret = octep_hp_request_irq(hp_ctrl, type); + if (ret) + return dev_err_probe(dev, ret, + "Failed to request IRQ for vector %d\n", + OCTEP_HP_INTR_VECTOR(type)); + } + + return 0; +} + +static int octep_hp_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct octep_hp_controller *hp_ctrl; + struct pci_dev *tmp_pdev, *next; + struct octep_hp_slot *hp_slot; + u16 slot_number = 0; + int ret; + + hp_ctrl = devm_kzalloc(&pdev->dev, sizeof(*hp_ctrl), GFP_KERNEL); + if (!hp_ctrl) + return -ENOMEM; + + ret = octep_hp_controller_setup(pdev, hp_ctrl); + if (ret) + return ret; + + /* + * Register all hotplug slots. Hotplug controller is the first function + * of the PCI device. The hotplug slots are the remaining functions of + * the PCI device. The hotplug slot functions are logically removed from + * the bus during probing and are re-enabled by the driver when a + * hotplug event is received. + */ + list_for_each_entry_safe(tmp_pdev, next, &pdev->bus->devices, bus_list) { + if (tmp_pdev == pdev) + continue; + + hp_slot = octep_hp_register_slot(hp_ctrl, tmp_pdev, slot_number); + if (IS_ERR(hp_slot)) + return dev_err_probe(&pdev->dev, PTR_ERR(hp_slot), + "Failed to register hotplug slot %u\n", + slot_number); + + ret = devm_add_action(&pdev->dev, octep_hp_deregister_slot, + hp_slot); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to add action for deregistering slot %u\n", + slot_number); + slot_number++; + } + + return 0; +} + +#define PCI_DEVICE_ID_CAVIUM_OCTEP_HP_CTLR 0xa0e3 +static struct pci_device_id octep_hp_pci_map[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_OCTEP_HP_CTLR) }, + { }, +}; + +static struct pci_driver octep_hp = { + .name = OCTEP_HP_DRV_NAME, + .id_table = octep_hp_pci_map, + .probe = octep_hp_pci_probe, +}; + +module_pci_driver(octep_hp); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Marvell"); +MODULE_DESCRIPTION("Marvell OCTEON PCI Hotplug driver"); diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index dcdbfcf404dd..d603a7aa7483 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -19,6 +19,8 @@ #include <linux/types.h> #include <linux/pm_runtime.h> #include <linux/pci.h> + +#include "../pci.h" #include "pciehp.h" /* The following routines constitute the bulk of the @@ -127,6 +129,9 @@ static void remove_board(struct controller *ctrl, bool safe_removal) pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, INDICATOR_NOOP); + + /* Don't carry LBMS indications across */ + pcie_reset_lbms_count(ctrl->pcie->port); } static int pciehp_enable_slot(struct controller *ctrl); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 736ad8baa2a5..bb5a8d9f03ad 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -319,7 +319,7 @@ int pciehp_check_link_status(struct controller *ctrl) return -1; } - pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); + __pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); if (!found) { ctrl_info(ctrl, "Slot(%s): No device found\n", diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index aaa33e8dc4c9..4be402fe9ab9 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -327,8 +327,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id) virtfn->resource[i].name = pci_name(virtfn); virtfn->resource[i].flags = res->flags; size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); - virtfn->resource[i].start = res->start + size * id; - virtfn->resource[i].end = virtfn->resource[i].start + size - 1; + resource_set_range(&virtfn->resource[i], + res->start + size * id, size); rc = request_resource(res, &virtfn->resource[i]); BUG_ON(rc); } @@ -804,7 +804,7 @@ found: goto failed; } iov->barsz[i] = resource_size(res); - res->end = res->start + resource_size(res) * total - 1; + resource_set_size(res, resource_size(res) * total); pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n", res_name, res, i, total); i += bar64; diff --git a/drivers/pci/of.c b/drivers/pci/of.c index dacea3fc5128..52f770bcc481 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -728,6 +728,33 @@ out_free_name: } #endif +/** + * of_pci_supply_present() - Check if the power supply is present for the PCI + * device + * @np: Device tree node + * + * Check if the power supply for the PCI device is present in the device tree + * node or not. + * + * Return: true if at least one power supply exists; false otherwise. + */ +bool of_pci_supply_present(struct device_node *np) +{ + struct property *prop; + char *supply; + + if (!np) + return false; + + for_each_property_of_node(np, prop) { + supply = strrchr(prop->name, '-'); + if (supply && !strcmp(supply, "-supply")) + return true; + } + + return false; +} + #endif /* CONFIG_PCI */ /** diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c index 5a0b98e69795..886c236e5de6 100644 --- a/drivers/pci/of_property.c +++ b/drivers/pci/of_property.c @@ -126,7 +126,7 @@ static int of_pci_prop_ranges(struct pci_dev *pdev, struct of_changeset *ocs, if (of_pci_get_addr_flags(&res[j], &flags)) continue; - val64 = res[j].start; + val64 = pci_bus_address(pdev, &res[j] - pdev->resource); of_pci_set_address(pdev, rp[i].parent_addr, val64, 0, flags, false); if (pci_is_bridge(pdev)) { diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 5d0f4db1cab7..3e5a117f5b5d 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -521,6 +521,31 @@ static ssize_t bus_rescan_store(struct device *dev, static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL, bus_rescan_store); +static ssize_t reset_subordinate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_bus *bus = pdev->subordinate; + unsigned long val; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (kstrtoul(buf, 0, &val) < 0) + return -EINVAL; + + if (val) { + int ret = __pci_reset_bus(bus); + + if (ret) + return ret; + } + + return count; +} +static DEVICE_ATTR_WO(reset_subordinate); + #if defined(CONFIG_PM) && defined(CONFIG_ACPI) static ssize_t d3cold_allowed_store(struct device *dev, struct device_attribute *attr, @@ -625,6 +650,7 @@ static struct attribute *pci_dev_attrs[] = { static struct attribute *pci_bridge_attrs[] = { &dev_attr_subordinate_bus_number.attr, &dev_attr_secondary_bus_number.attr, + &dev_attr_reset_subordinate.attr, NULL, }; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7d85c04fbba2..43c9a0e029c9 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1828,6 +1828,7 @@ int pci_save_state(struct pci_dev *dev) pci_save_dpc_state(dev); pci_save_aer_state(dev); pci_save_ptm_state(dev); + pci_save_tph_state(dev); return pci_save_vc_state(dev); } EXPORT_SYMBOL(pci_save_state); @@ -1933,6 +1934,7 @@ void pci_restore_state(struct pci_dev *dev) pci_restore_rebar_state(dev); pci_restore_dpc_state(dev); pci_restore_ptm_state(dev); + pci_restore_tph_state(dev); pci_aer_clear_status(dev); pci_restore_aer_state(dev); @@ -4740,7 +4742,7 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) * to track link speed or width changes made by hardware itself * in attempt to correct unreliable link operation. */ - pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS); + pcie_reset_lbms_count(pdev); return rc; } @@ -5158,6 +5160,8 @@ static void pci_dev_save_and_disable(struct pci_dev *dev) */ if (err_handler && err_handler->reset_prepare) err_handler->reset_prepare(dev); + else if (dev->driver) + pci_warn(dev, "resetting"); /* * Wake-up device prior to save. PM registers default to D0 after @@ -5191,6 +5195,8 @@ static void pci_dev_restore(struct pci_dev *dev) */ if (err_handler && err_handler->reset_done) err_handler->reset_done(dev); + else if (dev->driver) + pci_warn(dev, "reset done"); } /* dev->reset_methods[] is a 0-terminated list of indices into this array */ @@ -5880,7 +5886,7 @@ EXPORT_SYMBOL_GPL(pci_probe_reset_bus); * * Same as above except return -EAGAIN if the bus cannot be locked */ -static int __pci_reset_bus(struct pci_bus *bus) +int __pci_reset_bus(struct pci_bus *bus) { int rc; @@ -6189,38 +6195,64 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, EXPORT_SYMBOL(pcie_bandwidth_available); /** - * pcie_get_speed_cap - query for the PCI device's link speed capability + * pcie_get_supported_speeds - query Supported Link Speed Vector * @dev: PCI device to query * - * Query the PCI device speed capability. Return the maximum link speed - * supported by the device. + * Query @dev supported link speeds. + * + * Implementation Note in PCIe r6.0 sec 7.5.3.18 recommends determining + * supported link speeds using the Supported Link Speeds Vector in the Link + * Capabilities 2 Register (when available). + * + * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. + * + * Without Link Capabilities 2, i.e., prior to PCIe r3.0, Supported Link + * Speeds field in Link Capabilities is used and only 2.5 GT/s and 5.0 GT/s + * speeds were defined. + * + * For @dev without Supported Link Speed Vector, the field is synthesized + * from the Max Link Speed field in the Link Capabilities Register. + * + * Return: Supported Link Speeds Vector (+ reserved 0 at LSB). */ -enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) +u8 pcie_get_supported_speeds(struct pci_dev *dev) { u32 lnkcap2, lnkcap; + u8 speeds; /* - * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The - * implementation note there recommends using the Supported Link - * Speeds Vector in Link Capabilities 2 when supported. - * - * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software - * should use the Supported Link Speeds field in Link Capabilities, - * where only 2.5 GT/s and 5.0 GT/s speeds were defined. + * Speeds retain the reserved 0 at LSB before PCIe Supported Link + * Speeds Vector to allow using SLS Vector bit defines directly. */ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS; /* PCIe r3.0-compliant */ - if (lnkcap2) - return PCIE_LNKCAP2_SLS2SPEED(lnkcap2); + if (speeds) + return speeds; pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + + /* Synthesize from the Max Link Speed field */ if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB) - return PCIE_SPEED_5_0GT; + speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB; else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB) - return PCIE_SPEED_2_5GT; + speeds = PCI_EXP_LNKCAP2_SLS_2_5GB; - return PCI_SPEED_UNKNOWN; + return speeds; +} + +/** + * pcie_get_speed_cap - query for the PCI device's link speed capability + * @dev: PCI device to query + * + * Query the PCI device speed capability. + * + * Return: the maximum link speed supported by the device. + */ +enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) +{ + return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds); } EXPORT_SYMBOL(pcie_get_speed_cap); @@ -6649,8 +6681,7 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar, } else { r->flags &= ~IORESOURCE_SIZEALIGN; r->flags |= IORESOURCE_STARTALIGN; - r->start = align; - r->end = r->start + size - 1; + resource_set_range(r, align, size); } r->flags |= IORESOURCE_UNSET; } @@ -6896,6 +6927,8 @@ static int __init pci_setup(char *str) pci_no_domains(); } else if (!strncmp(str, "noari", 5)) { pcie_ari_disabled = true; + } else if (!strncmp(str, "notph", 5)) { + pci_no_tph(); } else if (!strncmp(str, "cbiosize=", 9)) { pci_cardbus_io_size = memparse(str + 9, &str); } else if (!strncmp(str, "cbmemsize=", 10)) { diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 14d00ce45bfa..2e40fc63ba31 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -104,6 +104,7 @@ bool pci_reset_supported(struct pci_dev *dev); void pci_init_reset_methods(struct pci_dev *dev); int pci_bridge_secondary_bus_reset(struct pci_dev *dev); int pci_bus_error_reset(struct pci_dev *dev); +int __pci_reset_bus(struct pci_bus *bus); struct pci_cap_saved_data { u16 cap_nr; @@ -323,6 +324,9 @@ void __pci_bus_assign_resources(const struct pci_bus *bus, struct list_head *realloc_head, struct list_head *fail_head); bool pci_bus_clip_resource(struct pci_dev *dev, int idx); +void pci_walk_bus_locked(struct pci_bus *top, + int (*cb)(struct pci_dev *, void *), + void *userdata); const char *pci_resource_name(struct pci_dev *dev, unsigned int i); @@ -331,6 +335,17 @@ void pci_disable_bridge_window(struct pci_dev *dev); struct pci_bus *pci_bus_get(struct pci_bus *bus); void pci_bus_put(struct pci_bus *bus); +#define PCIE_LNKCAP_SLS2SPEED(lnkcap) \ +({ \ + ((lnkcap) == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \ + (lnkcap) == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \ + (lnkcap) == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \ + (lnkcap) == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \ + (lnkcap) == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \ + (lnkcap) == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \ + PCI_SPEED_UNKNOWN); \ +}) + /* PCIe link information from Link Capabilities 2 */ #define PCIE_LNKCAP2_SLS2SPEED(lnkcap2) \ ((lnkcap2) & PCI_EXP_LNKCAP2_SLS_64_0GB ? PCIE_SPEED_64_0GT : \ @@ -341,6 +356,15 @@ void pci_bus_put(struct pci_bus *bus); (lnkcap2) & PCI_EXP_LNKCAP2_SLS_2_5GB ? PCIE_SPEED_2_5GT : \ PCI_SPEED_UNKNOWN) +#define PCIE_LNKCTL2_TLS2SPEED(lnkctl2) \ + ((lnkctl2) == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \ + (lnkctl2) == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \ + (lnkctl2) == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \ + (lnkctl2) == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \ + (lnkctl2) == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \ + (lnkctl2) == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \ + PCI_SPEED_UNKNOWN) + /* PCIe speed to Mb/s reduced by encoding overhead */ #define PCIE_SPEED2MBS_ENC(speed) \ ((speed) == PCIE_SPEED_64_0GT ? 64000*1/1 : \ @@ -373,12 +397,16 @@ static inline int pcie_dev_speed_mbps(enum pci_bus_speed speed) return -EINVAL; } +u8 pcie_get_supported_speeds(struct pci_dev *dev); const char *pci_speed_string(enum pci_bus_speed speed); -enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); -enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); void __pcie_print_link_status(struct pci_dev *dev, bool verbose); void pcie_report_downtraining(struct pci_dev *dev); -void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); + +static inline void __pcie_update_link_speed(struct pci_bus *bus, u16 linksta) +{ + bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; +} +void pcie_update_link_speed(struct pci_bus *bus); /* Single Root I/O Virtualization */ struct pci_sriov { @@ -469,10 +497,18 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) #define PCI_DEV_ADDED 0 #define PCI_DPC_RECOVERED 1 #define PCI_DPC_RECOVERING 2 +#define PCI_DEV_REMOVED 3 -static inline void pci_dev_assign_added(struct pci_dev *dev, bool added) +static inline void pci_dev_assign_added(struct pci_dev *dev) { - assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added); + smp_mb__before_atomic(); + set_bit(PCI_DEV_ADDED, &dev->priv_flags); + smp_mb__after_atomic(); +} + +static inline bool pci_dev_test_and_clear_added(struct pci_dev *dev) +{ + return test_and_clear_bit(PCI_DEV_ADDED, &dev->priv_flags); } static inline bool pci_dev_is_added(const struct pci_dev *dev) @@ -480,6 +516,11 @@ static inline bool pci_dev_is_added(const struct pci_dev *dev) return test_bit(PCI_DEV_ADDED, &dev->priv_flags); } +static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev) +{ + return test_and_set_bit(PCI_DEV_REMOVED, &dev->priv_flags); +} + #ifdef CONFIG_PCIEAER #include <linux/aer.h> @@ -597,6 +638,18 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) #endif /* CONFIG_PCI_IOV */ +#ifdef CONFIG_PCIE_TPH +void pci_restore_tph_state(struct pci_dev *dev); +void pci_save_tph_state(struct pci_dev *dev); +void pci_no_tph(void); +void pci_tph_init(struct pci_dev *dev); +#else +static inline void pci_restore_tph_state(struct pci_dev *dev) { } +static inline void pci_save_tph_state(struct pci_dev *dev) { } +static inline void pci_no_tph(void) { } +static inline void pci_tph_init(struct pci_dev *dev) { } +#endif + #ifdef CONFIG_PCIE_PTM void pci_ptm_init(struct pci_dev *dev); void pci_save_ptm_state(struct pci_dev *dev); @@ -692,6 +745,17 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } static inline void pcie_ecrc_get_policy(char *str) { } #endif +#ifdef CONFIG_PCIEPORTBUS +void pcie_reset_lbms_count(struct pci_dev *port); +int pcie_lbms_count(struct pci_dev *port, unsigned long *val); +#else +static inline void pcie_reset_lbms_count(struct pci_dev *port) {} +static inline int pcie_lbms_count(struct pci_dev *port, unsigned long *val) +{ + return -EOPNOTSUPP; +} +#endif + struct pci_dev_reset_methods { u16 vendor; u16 device; @@ -746,6 +810,7 @@ void pci_set_bus_of_node(struct pci_bus *bus); void pci_release_bus_of_node(struct pci_bus *bus); int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge); +bool of_pci_supply_present(struct device_node *np); #else static inline int @@ -793,6 +858,10 @@ static inline int devm_of_pci_bridge_init(struct device *dev, struct pci_host_br return 0; } +static inline bool of_pci_supply_present(struct device_node *np) +{ + return false; +} #endif /* CONFIG_OF */ struct of_changeset; diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index 6461aa93fe76..53ccab62314d 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile @@ -4,7 +4,7 @@ pcieportdrv-y := portdrv.o rcec.o -obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o +obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o bwctrl.o obj-y += aspm.o obj-$(CONFIG_PCIEAER) += aer.o err.o diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index cee2365e54b8..28567d457613 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -805,6 +805,15 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl); pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl); + /* Disable L0s/L1 before updating L1SS config */ + if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) || + FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) { + pcie_capability_write_word(child, PCI_EXP_LNKCTL, + child_lnkctl & ~PCI_EXP_LNKCTL_ASPMC); + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, + parent_lnkctl & ~PCI_EXP_LNKCTL_ASPMC); + } + /* * Setup L0s state * @@ -829,6 +838,13 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) aspm_l1ss_init(link); + /* Restore L0s/L1 if they were enabled */ + if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) || + FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) { + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_lnkctl); + pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_lnkctl); + } + /* Save default state */ link->aspm_default = link->aspm_enabled; @@ -845,25 +861,28 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) } } -/* Configure the ASPM L1 substates */ +/* Configure the ASPM L1 substates. Caller must disable L1 first. */ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) { - u32 val, enable_req; + u32 val; struct pci_dev *child = link->downstream, *parent = link->pdev; - enable_req = (link->aspm_enabled ^ state) & state; + val = 0; + if (state & PCIE_LINK_STATE_L1_1) + val |= PCI_L1SS_CTL1_ASPM_L1_1; + if (state & PCIE_LINK_STATE_L1_2) + val |= PCI_L1SS_CTL1_ASPM_L1_2; + if (state & PCIE_LINK_STATE_L1_1_PCIPM) + val |= PCI_L1SS_CTL1_PCIPM_L1_1; + if (state & PCIE_LINK_STATE_L1_2_PCIPM) + val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* - * Here are the rules specified in the PCIe spec for enabling L1SS: - * - When enabling L1.x, enable bit at parent first, then at child - * - When disabling L1.x, disable bit at child first, then at parent - * - When enabling ASPM L1.x, need to disable L1 - * (at child followed by parent). - * - The ASPM/PCIPM L1.2 must be disabled while programming timing + * PCIe r6.2, sec 5.5.4, rules for enabling L1 PM Substates: + * - Clear L1.x enable bits at child first, then at parent + * - Set L1.x enable bits at parent first, then at child + * - ASPM/PCIPM L1.2 must be disabled while programming timing * parameters - * - * To keep it simple, disable all L1SS bits first, and later enable - * what is needed. */ /* Disable all L1 substates */ @@ -871,26 +890,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) PCI_L1SS_CTL1_L1SS_MASK, 0); pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, PCI_L1SS_CTL1_L1SS_MASK, 0); - /* - * If needed, disable L1, and it gets enabled later - * in pcie_config_aspm_link(). - */ - if (enable_req & (PCIE_LINK_STATE_L1_1 | PCIE_LINK_STATE_L1_2)) { - pcie_capability_clear_word(child, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_ASPM_L1); - pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_ASPM_L1); - } - - val = 0; - if (state & PCIE_LINK_STATE_L1_1) - val |= PCI_L1SS_CTL1_ASPM_L1_1; - if (state & PCIE_LINK_STATE_L1_2) - val |= PCI_L1SS_CTL1_ASPM_L1_2; - if (state & PCIE_LINK_STATE_L1_1_PCIPM) - val |= PCI_L1SS_CTL1_PCIPM_L1_1; - if (state & PCIE_LINK_STATE_L1_2_PCIPM) - val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* Enable what we need to enable */ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, @@ -937,21 +936,30 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) dwstream |= PCI_EXP_LNKCTL_ASPM_L1; } + /* + * Per PCIe r6.2, sec 5.5.4, setting either or both of the enable + * bits for ASPM L1 PM Substates must be done while ASPM L1 is + * disabled. Disable L1 here and apply new configuration after L1SS + * configuration has been completed. + * + * Per sec 7.5.3.7, when disabling ASPM L1, software must disable + * it in the Downstream component prior to disabling it in the + * Upstream component, and ASPM L1 must be enabled in the Upstream + * component prior to enabling it in the Downstream component. + * + * Sec 7.5.3.7 also recommends programming the same ASPM Control + * value for all functions of a multi-function device. + */ + list_for_each_entry(child, &linkbus->devices, bus_list) + pcie_config_aspm_dev(child, 0); + pcie_config_aspm_dev(parent, 0); + if (link->aspm_capable & PCIE_LINK_STATE_L1SS) pcie_config_aspm_l1ss(link, state); - /* - * Spec 2.0 suggests all functions should be configured the - * same setting for ASPM. Enabling ASPM L1 should be done in - * upstream component first and then downstream, and vice - * versa for disabling ASPM L1. Spec doesn't mention L0S. - */ - if (state & PCIE_LINK_STATE_L1) - pcie_config_aspm_dev(parent, upstream); + pcie_config_aspm_dev(parent, upstream); list_for_each_entry(child, &linkbus->devices, bus_list) pcie_config_aspm_dev(child, dwstream); - if (!(state & PCIE_LINK_STATE_L1)) - pcie_config_aspm_dev(parent, upstream); link->aspm_enabled = state; @@ -1442,6 +1450,9 @@ static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked) * touch the LNKCTL register. Also note that this does not enable states * disabled by pci_disable_link_state(). Return 0 or a negative errno. * + * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per + * PCIe r6.0, sec 5.5.4. + * * @pdev: PCI device * @state: Mask of ASPM link states to enable */ @@ -1458,6 +1469,9 @@ EXPORT_SYMBOL(pci_enable_link_state); * can't touch the LNKCTL register. Also note that this does not enable states * disabled by pci_disable_link_state(). Return 0 or a negative errno. * + * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per + * PCIe r6.0, sec 5.5.4. + * * @pdev: PCI device * @state: Mask of ASPM link states to enable * diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c new file mode 100644 index 000000000000..b59cacc740fa --- /dev/null +++ b/drivers/pci/pcie/bwctrl.c @@ -0,0 +1,366 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCIe bandwidth controller + * + * Author: Alexandru Gagniuc <mr.nuke.me@gmail.com> + * + * Copyright (C) 2019 Dell Inc + * Copyright (C) 2023-2024 Intel Corporation + * + * The PCIe bandwidth controller provides a way to alter PCIe Link Speeds + * and notify the operating system when the Link Width or Speed changes. The + * notification capability is required for all Root Ports and Downstream + * Ports supporting Link Width wider than x1 and/or multiple Link Speeds. + * + * This service port driver hooks into the Bandwidth Notification interrupt + * watching for changes or links becoming degraded in operation. It updates + * the cached Current Link Speed that is exposed to user space through sysfs. + */ + +#define dev_fmt(fmt) "bwctrl: " fmt + +#include <linux/atomic.h> +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/pci-bwctrl.h> +#include <linux/rwsem.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include "../pci.h" +#include "portdrv.h" + +/** + * struct pcie_bwctrl_data - PCIe bandwidth controller + * @set_speed_mutex: Serializes link speed changes + * @lbms_count: Count for LBMS (since last reset) + * @cdev: Thermal cooling device associated with the port + */ +struct pcie_bwctrl_data { + struct mutex set_speed_mutex; + atomic_t lbms_count; + struct thermal_cooling_device *cdev; +}; + +/* + * Prevent port removal during LBMS count accessors and Link Speed changes. + * + * These have to be differentiated because pcie_bwctrl_change_speed() calls + * pcie_retrain_link() which uses LBMS count reset accessor on success + * (using just one rwsem triggers "possible recursive locking detected" + * warning). + */ +static DECLARE_RWSEM(pcie_bwctrl_lbms_rwsem); +static DECLARE_RWSEM(pcie_bwctrl_setspeed_rwsem); + +static bool pcie_valid_speed(enum pci_bus_speed speed) +{ + return (speed >= PCIE_SPEED_2_5GT) && (speed <= PCIE_SPEED_64_0GT); +} + +static u16 pci_bus_speed2lnkctl2(enum pci_bus_speed speed) +{ + static const u8 speed_conv[] = { + [PCIE_SPEED_2_5GT] = PCI_EXP_LNKCTL2_TLS_2_5GT, + [PCIE_SPEED_5_0GT] = PCI_EXP_LNKCTL2_TLS_5_0GT, + [PCIE_SPEED_8_0GT] = PCI_EXP_LNKCTL2_TLS_8_0GT, + [PCIE_SPEED_16_0GT] = PCI_EXP_LNKCTL2_TLS_16_0GT, + [PCIE_SPEED_32_0GT] = PCI_EXP_LNKCTL2_TLS_32_0GT, + [PCIE_SPEED_64_0GT] = PCI_EXP_LNKCTL2_TLS_64_0GT, + }; + + if (WARN_ON_ONCE(!pcie_valid_speed(speed))) + return 0; + + return speed_conv[speed]; +} + +static inline u16 pcie_supported_speeds2target_speed(u8 supported_speeds) +{ + return __fls(supported_speeds); +} + +/** + * pcie_bwctrl_select_speed - Select Target Link Speed + * @port: PCIe Port + * @speed_req: Requested PCIe Link Speed + * + * Select Target Link Speed by take into account Supported Link Speeds of + * both the Root Port and the Endpoint. + * + * Return: Target Link Speed (1=2.5GT/s, 2=5GT/s, 3=8GT/s, etc.) + */ +static u16 pcie_bwctrl_select_speed(struct pci_dev *port, enum pci_bus_speed speed_req) +{ + struct pci_bus *bus = port->subordinate; + u8 desired_speeds, supported_speeds; + struct pci_dev *dev; + + desired_speeds = GENMASK(pci_bus_speed2lnkctl2(speed_req), + __fls(PCI_EXP_LNKCAP2_SLS_2_5GB)); + + supported_speeds = port->supported_speeds; + if (bus) { + down_read(&pci_bus_sem); + dev = list_first_entry_or_null(&bus->devices, struct pci_dev, bus_list); + if (dev) + supported_speeds &= dev->supported_speeds; + up_read(&pci_bus_sem); + } + if (!supported_speeds) + return PCI_EXP_LNKCAP2_SLS_2_5GB; + + return pcie_supported_speeds2target_speed(supported_speeds & desired_speeds); +} + +static int pcie_bwctrl_change_speed(struct pci_dev *port, u16 target_speed, bool use_lt) +{ + int ret; + + ret = pcie_capability_clear_and_set_word(port, PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_TLS, target_speed); + if (ret != PCIBIOS_SUCCESSFUL) + return pcibios_err_to_errno(ret); + + ret = pcie_retrain_link(port, use_lt); + if (ret < 0) + return ret; + + /* + * Ensure link speed updates also with platforms that have problems + * with notifications. + */ + if (port->subordinate) + pcie_update_link_speed(port->subordinate); + + return 0; +} + +/** + * pcie_set_target_speed - Set downstream Link Speed for PCIe Port + * @port: PCIe Port + * @speed_req: Requested PCIe Link Speed + * @use_lt: Wait for the LT or DLLLA bit to detect the end of link training + * + * Attempt to set PCIe Port Link Speed to @speed_req. @speed_req may be + * adjusted downwards to the best speed supported by both the Port and PCIe + * Device underneath it. + * + * Return: + * * 0 - on success + * * -EINVAL - @speed_req is not a PCIe Link Speed + * * -ENODEV - @port is not controllable + * * -ETIMEDOUT - changing Link Speed took too long + * * -EAGAIN - Link Speed was changed but @speed_req was not achieved + */ +int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req, + bool use_lt) +{ + struct pci_bus *bus = port->subordinate; + u16 target_speed; + int ret; + + if (WARN_ON_ONCE(!pcie_valid_speed(speed_req))) + return -EINVAL; + + if (bus && bus->cur_bus_speed == speed_req) + return 0; + + target_speed = pcie_bwctrl_select_speed(port, speed_req); + + scoped_guard(rwsem_read, &pcie_bwctrl_setspeed_rwsem) { + struct pcie_bwctrl_data *data = port->link_bwctrl; + + /* + * port->link_bwctrl is NULL during initial scan when called + * e.g. from the Target Speed quirk. + */ + if (data) + mutex_lock(&data->set_speed_mutex); + + ret = pcie_bwctrl_change_speed(port, target_speed, use_lt); + + if (data) + mutex_unlock(&data->set_speed_mutex); + } + + /* + * Despite setting higher speed into the Target Link Speed, empty + * bus won't train to 5GT+ speeds. + */ + if (!ret && bus && bus->cur_bus_speed != speed_req && + !list_empty(&bus->devices)) + ret = -EAGAIN; + + return ret; +} + +static void pcie_bwnotif_enable(struct pcie_device *srv) +{ + struct pcie_bwctrl_data *data = srv->port->link_bwctrl; + struct pci_dev *port = srv->port; + u16 link_status; + int ret; + + /* Count LBMS seen so far as one */ + ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status); + if (ret == PCIBIOS_SUCCESSFUL && link_status & PCI_EXP_LNKSTA_LBMS) + atomic_inc(&data->lbms_count); + + pcie_capability_set_word(port, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); + pcie_capability_write_word(port, PCI_EXP_LNKSTA, + PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS); + + /* + * Update after enabling notifications & clearing status bits ensures + * link speed is up to date. + */ + pcie_update_link_speed(port->subordinate); +} + +static void pcie_bwnotif_disable(struct pci_dev *port) +{ + pcie_capability_clear_word(port, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); +} + +static irqreturn_t pcie_bwnotif_irq(int irq, void *context) +{ + struct pcie_device *srv = context; + struct pcie_bwctrl_data *data = srv->port->link_bwctrl; + struct pci_dev *port = srv->port; + u16 link_status, events; + int ret; + + ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status); + if (ret != PCIBIOS_SUCCESSFUL) + return IRQ_NONE; + + events = link_status & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS); + if (!events) + return IRQ_NONE; + + if (events & PCI_EXP_LNKSTA_LBMS) + atomic_inc(&data->lbms_count); + + pcie_capability_write_word(port, PCI_EXP_LNKSTA, events); + + /* + * Interrupts will not be triggered from any further Link Speed + * change until LBMS is cleared by the write. Therefore, re-read the + * speed (inside pcie_update_link_speed()) after LBMS has been + * cleared to avoid missing link speed changes. + */ + pcie_update_link_speed(port->subordinate); + + return IRQ_HANDLED; +} + +void pcie_reset_lbms_count(struct pci_dev *port) +{ + struct pcie_bwctrl_data *data; + + guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem); + data = port->link_bwctrl; + if (data) + atomic_set(&data->lbms_count, 0); + else + pcie_capability_write_word(port, PCI_EXP_LNKSTA, + PCI_EXP_LNKSTA_LBMS); +} + +int pcie_lbms_count(struct pci_dev *port, unsigned long *val) +{ + struct pcie_bwctrl_data *data; + + guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem); + data = port->link_bwctrl; + if (!data) + return -ENOTTY; + + *val = atomic_read(&data->lbms_count); + + return 0; +} + +static int pcie_bwnotif_probe(struct pcie_device *srv) +{ + struct pci_dev *port = srv->port; + int ret; + + struct pcie_bwctrl_data *data = devm_kzalloc(&srv->device, + sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + ret = devm_mutex_init(&srv->device, &data->set_speed_mutex); + if (ret) + return ret; + + ret = devm_request_irq(&srv->device, srv->irq, pcie_bwnotif_irq, + IRQF_SHARED, "PCIe bwctrl", srv); + if (ret) + return ret; + + scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) { + scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) { + port->link_bwctrl = no_free_ptr(data); + pcie_bwnotif_enable(srv); + } + } + + pci_dbg(port, "enabled with IRQ %d\n", srv->irq); + + /* Don't fail on errors. Don't leave IS_ERR() "pointer" into ->cdev */ + port->link_bwctrl->cdev = pcie_cooling_device_register(port); + if (IS_ERR(port->link_bwctrl->cdev)) + port->link_bwctrl->cdev = NULL; + + return 0; +} + +static void pcie_bwnotif_remove(struct pcie_device *srv) +{ + struct pcie_bwctrl_data *data = srv->port->link_bwctrl; + + pcie_cooling_device_unregister(data->cdev); + + pcie_bwnotif_disable(srv->port); + + scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) + scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) + srv->port->link_bwctrl = NULL; +} + +static int pcie_bwnotif_suspend(struct pcie_device *srv) +{ + pcie_bwnotif_disable(srv->port); + return 0; +} + +static int pcie_bwnotif_resume(struct pcie_device *srv) +{ + pcie_bwnotif_enable(srv); + return 0; +} + +static struct pcie_port_service_driver pcie_bwctrl_driver = { + .name = "pcie_bwctrl", + .port_type = PCIE_ANY_PORT, + .service = PCIE_PORT_SERVICE_BWCTRL, + .probe = pcie_bwnotif_probe, + .suspend = pcie_bwnotif_suspend, + .resume = pcie_bwnotif_resume, + .remove = pcie_bwnotif_remove, +}; + +int __init pcie_bwctrl_init(void) +{ + return pcie_port_service_register(&pcie_bwctrl_driver); +} diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c index 6af5e0425872..5e10306b6308 100644 --- a/drivers/pci/pcie/portdrv.c +++ b/drivers/pci/pcie/portdrv.c @@ -68,7 +68,7 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask, */ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | - PCIE_PORT_SERVICE_BWNOTIF)) { + PCIE_PORT_SERVICE_BWCTRL)) { pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16); *pme = FIELD_GET(PCI_EXP_FLAGS_IRQ, reg16); nvec = *pme + 1; @@ -150,11 +150,11 @@ static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask) /* PME, hotplug and bandwidth notification share an MSI/MSI-X vector */ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | - PCIE_PORT_SERVICE_BWNOTIF)) { + PCIE_PORT_SERVICE_BWCTRL)) { pcie_irq = pci_irq_vector(dev, pme); irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pcie_irq; irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pcie_irq; - irqs[PCIE_PORT_SERVICE_BWNOTIF_SHIFT] = pcie_irq; + irqs[PCIE_PORT_SERVICE_BWCTRL_SHIFT] = pcie_irq; } if (mask & PCIE_PORT_SERVICE_AER) @@ -271,7 +271,7 @@ static int get_port_device_capability(struct pci_dev *dev) pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap); if (linkcap & PCI_EXP_LNKCAP_LBNC) - services |= PCIE_PORT_SERVICE_BWNOTIF; + services |= PCIE_PORT_SERVICE_BWCTRL; } return services; @@ -828,6 +828,7 @@ static void __init pcie_init_services(void) pcie_aer_init(); pcie_pme_init(); pcie_dpc_init(); + pcie_bwctrl_init(); pcie_hp_init(); } diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 12c89ea0313b..bd29d1cc7b8b 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -20,8 +20,8 @@ #define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) #define PCIE_PORT_SERVICE_DPC_SHIFT 3 /* Downstream Port Containment */ #define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT) -#define PCIE_PORT_SERVICE_BWNOTIF_SHIFT 4 /* Bandwidth notification */ -#define PCIE_PORT_SERVICE_BWNOTIF (1 << PCIE_PORT_SERVICE_BWNOTIF_SHIFT) +#define PCIE_PORT_SERVICE_BWCTRL_SHIFT 4 /* Bandwidth Controller (notifications) */ +#define PCIE_PORT_SERVICE_BWCTRL (1 << PCIE_PORT_SERVICE_BWCTRL_SHIFT) #define PCIE_PORT_DEVICE_MAXSERVICES 5 @@ -51,6 +51,8 @@ int pcie_dpc_init(void); static inline int pcie_dpc_init(void) { return 0; } #endif +int pcie_bwctrl_init(void); + /* Port Type */ #define PCIE_ANY_PORT (~0) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 4f68414c3086..bf4c76ec8cd4 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -543,15 +543,15 @@ void pci_read_bridge_bases(struct pci_bus *child) pci_read_bridge_mmio(child->self, child->resource[1], false); pci_read_bridge_mmio_pref(child->self, child->resource[2], false); - if (dev->transparent) { - pci_bus_for_each_resource(child->parent, res) { - if (res && res->flags) { - pci_bus_add_resource(child, res, - PCI_SUBTRACTIVE_DECODE); - pci_info(dev, " bridge window %pR (subtractive decode)\n", - res); - } - } + if (!dev->transparent) + return; + + pci_bus_for_each_resource(child->parent, res) { + if (!res || !res->flags) + continue; + + pci_bus_add_resource(child, res); + pci_info(dev, " bridge window %pR (subtractive decode)\n", res); } } @@ -742,9 +742,13 @@ const char *pci_speed_string(enum pci_bus_speed speed) } EXPORT_SYMBOL_GPL(pci_speed_string); -void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) +void pcie_update_link_speed(struct pci_bus *bus) { - bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; + struct pci_dev *bridge = bus->self; + u16 linksta; + + pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); + __pcie_update_link_speed(bus, linksta); } EXPORT_SYMBOL_GPL(pcie_update_link_speed); @@ -827,13 +831,11 @@ static void pci_set_bus_speed(struct pci_bus *bus) if (pci_is_pcie(bridge)) { u32 linkcap; - u16 linksta; pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; - pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); - pcie_update_link_speed(bus, linksta); + pcie_update_link_speed(bus); } } @@ -1032,7 +1034,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) if (res->flags & IORESOURCE_BUS) pci_bus_insert_busn_res(bus, bus->number, res->end); else - pci_bus_add_resource(bus, res, 0); + pci_bus_add_resource(bus, res); if (offset) { if (resource_type(res) == IORESOURCE_IO) @@ -1633,23 +1635,33 @@ static void set_pcie_thunderbolt(struct pci_dev *dev) static void set_pcie_untrusted(struct pci_dev *dev) { - struct pci_dev *parent; + struct pci_dev *parent = pci_upstream_bridge(dev); + if (!parent) + return; /* - * If the upstream bridge is untrusted we treat this device + * If the upstream bridge is untrusted we treat this device as * untrusted as well. */ - parent = pci_upstream_bridge(dev); - if (parent && (parent->untrusted || parent->external_facing)) + if (parent->untrusted) { + dev->untrusted = true; + return; + } + + if (arch_pci_dev_is_removable(dev)) { + pci_dbg(dev, "marking as untrusted\n"); dev->untrusted = true; + } } static void pci_set_removable(struct pci_dev *dev) { struct pci_dev *parent = pci_upstream_bridge(dev); + if (!parent) + return; /* - * We (only) consider everything downstream from an external_facing + * We (only) consider everything tunneled below an external_facing * device to be removable by the user. We're mainly concerned with * consumer platforms with user accessible thunderbolt ports that are * vulnerable to DMA attacks, and we expect those ports to be marked by @@ -1659,9 +1671,15 @@ static void pci_set_removable(struct pci_dev *dev) * accessible to user / may not be removed by end user, and thus not * exposed as "removable" to userspace. */ - if (parent && - (parent->external_facing || dev_is_removable(&parent->dev))) + if (dev_is_removable(&parent->dev)) { + dev_set_removable(&dev->dev, DEVICE_REMOVABLE); + return; + } + + if (arch_pci_dev_is_removable(dev)) { + pci_dbg(dev, "marking as removable\n"); dev_set_removable(&dev->dev, DEVICE_REMOVABLE); + } } /** @@ -1947,6 +1965,9 @@ int pci_setup_device(struct pci_dev *dev) set_pcie_untrusted(dev); + if (pci_is_pcie(dev)) + dev->supported_speeds = pcie_get_supported_speeds(dev); + /* "Unknown power state" */ dev->current_state = PCI_UNKNOWN; @@ -2495,6 +2516,7 @@ static void pci_init_capabilities(struct pci_dev *dev) pci_dpc_init(dev); /* Downstream Port Containment */ pci_rcec_init(dev); /* Root Complex Event Collector */ pci_doe_init(dev); /* Data Object Exchange */ + pci_tph_init(dev); /* TLP Processing Hints */ pcie_report_downtraining(dev); pci_init_reset_methods(dev); @@ -3106,6 +3128,17 @@ int pci_host_probe(struct pci_host_bridge *bridge) pcie_bus_configure_settings(child); pci_bus_add_devices(bus); + + /* + * Ensure pm_runtime_enable() is called for the controller drivers + * before calling pci_host_probe(). The PM framework expects that + * if the parent device supports runtime PM, it will be enabled + * before child runtime PM is enabled. + */ + pm_runtime_set_active(&bridge->dev); + pm_runtime_no_callbacks(&bridge->dev); + devm_pm_runtime_enable(&bridge->dev); + return 0; } EXPORT_SYMBOL_GPL(pci_host_probe); diff --git a/drivers/pci/pwrctl/Makefile b/drivers/pci/pwrctl/Makefile deleted file mode 100644 index d308aae4800c..000000000000 --- a/drivers/pci/pwrctl/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -obj-$(CONFIG_PCI_PWRCTL) += pci-pwrctl-core.o -pci-pwrctl-core-y := core.o - -obj-$(CONFIG_PCI_PWRCTL_PWRSEQ) += pci-pwrctl-pwrseq.o diff --git a/drivers/pci/pwrctl/core.c b/drivers/pci/pwrctl/core.c deleted file mode 100644 index 01d913b60316..000000000000 --- a/drivers/pci/pwrctl/core.c +++ /dev/null @@ -1,157 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2024 Linaro Ltd. - */ - -#include <linux/device.h> -#include <linux/export.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/pci-pwrctl.h> -#include <linux/property.h> -#include <linux/slab.h> - -static int pci_pwrctl_notify(struct notifier_block *nb, unsigned long action, - void *data) -{ - struct pci_pwrctl *pwrctl = container_of(nb, struct pci_pwrctl, nb); - struct device *dev = data; - - if (dev_fwnode(dev) != dev_fwnode(pwrctl->dev)) - return NOTIFY_DONE; - - switch (action) { - case BUS_NOTIFY_ADD_DEVICE: - /* - * We will have two struct device objects bound to two different - * drivers on different buses but consuming the same DT node. We - * must not bind the pins twice in this case but only once for - * the first device to be added. - * - * If we got here then the PCI device is the second after the - * power control platform device. Mark its OF node as reused. - */ - dev->of_node_reused = true; - break; - case BUS_NOTIFY_BOUND_DRIVER: - pwrctl->link = device_link_add(dev, pwrctl->dev, - DL_FLAG_AUTOREMOVE_CONSUMER); - if (!pwrctl->link) - dev_err(pwrctl->dev, "Failed to add device link\n"); - break; - case BUS_NOTIFY_UNBOUND_DRIVER: - if (pwrctl->link) - device_link_remove(dev, pwrctl->dev); - break; - } - - return NOTIFY_DONE; -} - -static void rescan_work_func(struct work_struct *work) -{ - struct pci_pwrctl *pwrctl = container_of(work, struct pci_pwrctl, work); - - pci_lock_rescan_remove(); - pci_rescan_bus(to_pci_dev(pwrctl->dev->parent)->bus); - pci_unlock_rescan_remove(); -} - -/** - * pci_pwrctl_init() - Initialize the PCI power control context struct - * - * @pwrctl: PCI power control data - * @dev: Parent device - */ -void pci_pwrctl_init(struct pci_pwrctl *pwrctl, struct device *dev) -{ - pwrctl->dev = dev; - INIT_WORK(&pwrctl->work, rescan_work_func); -} -EXPORT_SYMBOL_GPL(pci_pwrctl_init); - -/** - * pci_pwrctl_device_set_ready() - Notify the pwrctl subsystem that the PCI - * device is powered-up and ready to be detected. - * - * @pwrctl: PCI power control data. - * - * Returns: - * 0 on success, negative error number on error. - * - * Note: - * This function returning 0 doesn't mean the device was detected. It means, - * that the bus rescan was successfully started. The device will get bound to - * its PCI driver asynchronously. - */ -int pci_pwrctl_device_set_ready(struct pci_pwrctl *pwrctl) -{ - int ret; - - if (!pwrctl->dev) - return -ENODEV; - - pwrctl->nb.notifier_call = pci_pwrctl_notify; - ret = bus_register_notifier(&pci_bus_type, &pwrctl->nb); - if (ret) - return ret; - - schedule_work(&pwrctl->work); - - return 0; -} -EXPORT_SYMBOL_GPL(pci_pwrctl_device_set_ready); - -/** - * pci_pwrctl_device_unset_ready() - Notify the pwrctl subsystem that the PCI - * device is about to be powered-down. - * - * @pwrctl: PCI power control data. - */ -void pci_pwrctl_device_unset_ready(struct pci_pwrctl *pwrctl) -{ - /* - * We don't have to delete the link here. Typically, this function - * is only called when the power control device is being detached. If - * it is being detached then the child PCI device must have already - * been unbound too or the device core wouldn't let us unbind. - */ - bus_unregister_notifier(&pci_bus_type, &pwrctl->nb); -} -EXPORT_SYMBOL_GPL(pci_pwrctl_device_unset_ready); - -static void devm_pci_pwrctl_device_unset_ready(void *data) -{ - struct pci_pwrctl *pwrctl = data; - - pci_pwrctl_device_unset_ready(pwrctl); -} - -/** - * devm_pci_pwrctl_device_set_ready - Managed variant of - * pci_pwrctl_device_set_ready(). - * - * @dev: Device managing this pwrctl provider. - * @pwrctl: PCI power control data. - * - * Returns: - * 0 on success, negative error number on error. - */ -int devm_pci_pwrctl_device_set_ready(struct device *dev, - struct pci_pwrctl *pwrctl) -{ - int ret; - - ret = pci_pwrctl_device_set_ready(pwrctl); - if (ret) - return ret; - - return devm_add_action_or_reset(dev, - devm_pci_pwrctl_device_unset_ready, - pwrctl); -} -EXPORT_SYMBOL_GPL(devm_pci_pwrctl_device_set_ready); - -MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); -MODULE_DESCRIPTION("PCI Device Power Control core driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/pci/pwrctl/Kconfig b/drivers/pci/pwrctrl/Kconfig index 54589bb2403b..54589bb2403b 100644 --- a/drivers/pci/pwrctl/Kconfig +++ b/drivers/pci/pwrctrl/Kconfig diff --git a/drivers/pci/pwrctrl/Makefile b/drivers/pci/pwrctrl/Makefile new file mode 100644 index 000000000000..75c7ce531c7e --- /dev/null +++ b/drivers/pci/pwrctrl/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_PCI_PWRCTL) += pci-pwrctrl-core.o +pci-pwrctrl-core-y := core.o + +obj-$(CONFIG_PCI_PWRCTL_PWRSEQ) += pci-pwrctrl-pwrseq.o diff --git a/drivers/pci/pwrctrl/core.c b/drivers/pci/pwrctrl/core.c new file mode 100644 index 000000000000..2fb174db91e5 --- /dev/null +++ b/drivers/pci/pwrctrl/core.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Linaro Ltd. + */ + +#include <linux/device.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/pci-pwrctrl.h> +#include <linux/property.h> +#include <linux/slab.h> + +static int pci_pwrctrl_notify(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct pci_pwrctrl *pwrctrl = container_of(nb, struct pci_pwrctrl, nb); + struct device *dev = data; + + if (dev_fwnode(dev) != dev_fwnode(pwrctrl->dev)) + return NOTIFY_DONE; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + /* + * We will have two struct device objects bound to two different + * drivers on different buses but consuming the same DT node. We + * must not bind the pins twice in this case but only once for + * the first device to be added. + * + * If we got here then the PCI device is the second after the + * power control platform device. Mark its OF node as reused. + */ + dev->of_node_reused = true; + break; + } + + return NOTIFY_DONE; +} + +static void rescan_work_func(struct work_struct *work) +{ + struct pci_pwrctrl *pwrctrl = container_of(work, + struct pci_pwrctrl, work); + + pci_lock_rescan_remove(); + pci_rescan_bus(to_pci_dev(pwrctrl->dev->parent)->bus); + pci_unlock_rescan_remove(); +} + +/** + * pci_pwrctrl_init() - Initialize the PCI power control context struct + * + * @pwrctrl: PCI power control data + * @dev: Parent device + */ +void pci_pwrctrl_init(struct pci_pwrctrl *pwrctrl, struct device *dev) +{ + pwrctrl->dev = dev; + INIT_WORK(&pwrctrl->work, rescan_work_func); +} +EXPORT_SYMBOL_GPL(pci_pwrctrl_init); + +/** + * pci_pwrctrl_device_set_ready() - Notify the pwrctrl subsystem that the PCI + * device is powered-up and ready to be detected. + * + * @pwrctrl: PCI power control data. + * + * Returns: + * 0 on success, negative error number on error. + * + * Note: + * This function returning 0 doesn't mean the device was detected. It means, + * that the bus rescan was successfully started. The device will get bound to + * its PCI driver asynchronously. + */ +int pci_pwrctrl_device_set_ready(struct pci_pwrctrl *pwrctrl) +{ + int ret; + + if (!pwrctrl->dev) + return -ENODEV; + + pwrctrl->nb.notifier_call = pci_pwrctrl_notify; + ret = bus_register_notifier(&pci_bus_type, &pwrctrl->nb); + if (ret) + return ret; + + schedule_work(&pwrctrl->work); + + return 0; +} +EXPORT_SYMBOL_GPL(pci_pwrctrl_device_set_ready); + +/** + * pci_pwrctrl_device_unset_ready() - Notify the pwrctrl subsystem that the PCI + * device is about to be powered-down. + * + * @pwrctrl: PCI power control data. + */ +void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl) +{ + /* + * We don't have to delete the link here. Typically, this function + * is only called when the power control device is being detached. If + * it is being detached then the child PCI device must have already + * been unbound too or the device core wouldn't let us unbind. + */ + bus_unregister_notifier(&pci_bus_type, &pwrctrl->nb); +} +EXPORT_SYMBOL_GPL(pci_pwrctrl_device_unset_ready); + +static void devm_pci_pwrctrl_device_unset_ready(void *data) +{ + struct pci_pwrctrl *pwrctrl = data; + + pci_pwrctrl_device_unset_ready(pwrctrl); +} + +/** + * devm_pci_pwrctrl_device_set_ready - Managed variant of + * pci_pwrctrl_device_set_ready(). + * + * @dev: Device managing this pwrctrl provider. + * @pwrctrl: PCI power control data. + * + * Returns: + * 0 on success, negative error number on error. + */ +int devm_pci_pwrctrl_device_set_ready(struct device *dev, + struct pci_pwrctrl *pwrctrl) +{ + int ret; + + ret = pci_pwrctrl_device_set_ready(pwrctrl); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, + devm_pci_pwrctrl_device_unset_ready, + pwrctrl); +} +EXPORT_SYMBOL_GPL(devm_pci_pwrctrl_device_set_ready); + +MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); +MODULE_DESCRIPTION("PCI Device Power Control core driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c b/drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c index a23a4312574b..e9f89866b7c2 100644 --- a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c +++ b/drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c @@ -7,27 +7,27 @@ #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/pci-pwrctl.h> +#include <linux/pci-pwrctrl.h> #include <linux/platform_device.h> #include <linux/pwrseq/consumer.h> #include <linux/slab.h> #include <linux/types.h> -struct pci_pwrctl_pwrseq_data { - struct pci_pwrctl ctx; +struct pci_pwrctrl_pwrseq_data { + struct pci_pwrctrl ctx; struct pwrseq_desc *pwrseq; }; -static void devm_pci_pwrctl_pwrseq_power_off(void *data) +static void devm_pci_pwrctrl_pwrseq_power_off(void *data) { struct pwrseq_desc *pwrseq = data; pwrseq_power_off(pwrseq); } -static int pci_pwrctl_pwrseq_probe(struct platform_device *pdev) +static int pci_pwrctrl_pwrseq_probe(struct platform_device *pdev) { - struct pci_pwrctl_pwrseq_data *data; + struct pci_pwrctrl_pwrseq_data *data; struct device *dev = &pdev->dev; int ret; @@ -45,22 +45,22 @@ static int pci_pwrctl_pwrseq_probe(struct platform_device *pdev) return dev_err_probe(dev, ret, "Failed to power-on the device\n"); - ret = devm_add_action_or_reset(dev, devm_pci_pwrctl_pwrseq_power_off, + ret = devm_add_action_or_reset(dev, devm_pci_pwrctrl_pwrseq_power_off, data->pwrseq); if (ret) return ret; - pci_pwrctl_init(&data->ctx, dev); + pci_pwrctrl_init(&data->ctx, dev); - ret = devm_pci_pwrctl_device_set_ready(dev, &data->ctx); + ret = devm_pci_pwrctrl_device_set_ready(dev, &data->ctx); if (ret) return dev_err_probe(dev, ret, - "Failed to register the pwrctl wrapper\n"); + "Failed to register the pwrctrl wrapper\n"); return 0; } -static const struct of_device_id pci_pwrctl_pwrseq_of_match[] = { +static const struct of_device_id pci_pwrctrl_pwrseq_of_match[] = { { /* ATH11K in QCA6390 package. */ .compatible = "pci17cb,1101", @@ -78,16 +78,16 @@ static const struct of_device_id pci_pwrctl_pwrseq_of_match[] = { }, { } }; -MODULE_DEVICE_TABLE(of, pci_pwrctl_pwrseq_of_match); +MODULE_DEVICE_TABLE(of, pci_pwrctrl_pwrseq_of_match); -static struct platform_driver pci_pwrctl_pwrseq_driver = { +static struct platform_driver pci_pwrctrl_pwrseq_driver = { .driver = { - .name = "pci-pwrctl-pwrseq", - .of_match_table = pci_pwrctl_pwrseq_of_match, + .name = "pci-pwrctrl-pwrseq", + .of_match_table = pci_pwrctrl_pwrseq_of_match, }, - .probe = pci_pwrctl_pwrseq_probe, + .probe = pci_pwrctrl_pwrseq_probe, }; -module_platform_driver(pci_pwrctl_pwrseq_driver); +module_platform_driver(pci_pwrctrl_pwrseq_driver); MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); MODULE_DESCRIPTION("Generic PCI Power Control module for power sequenced devices"); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index dccb60c1d9cc..562394fe76d0 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -12,6 +12,7 @@ * file, where their drivers can use them. */ +#include <linux/align.h> #include <linux/bitfield.h> #include <linux/types.h> #include <linux/kernel.h> @@ -29,10 +30,23 @@ #include <linux/nvme.h> #include <linux/platform_data/x86/apple.h> #include <linux/pm_runtime.h> +#include <linux/sizes.h> #include <linux/suspend.h> #include <linux/switchtec.h> #include "pci.h" +static bool pcie_lbms_seen(struct pci_dev *dev, u16 lnksta) +{ + unsigned long count; + int ret; + + ret = pcie_lbms_count(dev, &count); + if (ret < 0) + return lnksta & PCI_EXP_LNKSTA_LBMS; + + return count > 0; +} + /* * Retrain the link of a downstream PCIe port by hand if necessary. * @@ -96,22 +110,16 @@ int pcie_failed_link_retrain(struct pci_dev *dev) pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2); pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); - if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) == - PCI_EXP_LNKSTA_LBMS) { + if (!(lnksta & PCI_EXP_LNKSTA_DLLLA) && pcie_lbms_seen(dev, lnksta)) { u16 oldlnkctl2 = lnkctl2; pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n"); - lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; - lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT; - pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2); - - ret = pcie_retrain_link(dev, false); + ret = pcie_set_target_speed(dev, PCIE_SPEED_2_5GT, false); if (ret) { pci_info(dev, "retraining failed\n"); - pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, - oldlnkctl2); - pcie_retrain_link(dev, true); + pcie_set_target_speed(dev, PCIE_LNKCTL2_TLS2SPEED(oldlnkctl2), + true); return ret; } @@ -125,11 +133,7 @@ int pcie_failed_link_retrain(struct pci_dev *dev) pci_info(dev, "removing 2.5GT/s downstream link speed restriction\n"); pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); - lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; - lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS; - pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2); - - ret = pcie_retrain_link(dev, false); + ret = pcie_set_target_speed(dev, PCIE_LNKCAP_SLS2SPEED(lnkcap), false); if (ret) { pci_info(dev, "retraining failed\n"); return ret; @@ -586,8 +590,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev) const char *r_name = pci_resource_name(dev, i); if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { - r->end = PAGE_SIZE - 1; - r->start = 0; + resource_set_range(r, 0, PAGE_SIZE); r->flags |= IORESOURCE_UNSET; pci_info(dev, "%s %pR: expanded to page size\n", r_name, r); @@ -604,10 +607,9 @@ static void quirk_s3_64M(struct pci_dev *dev) { struct resource *r = &dev->resource[0]; - if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) { + if (!IS_ALIGNED(r->start, SZ_64M) || resource_size(r) != SZ_64M) { r->flags |= IORESOURCE_UNSET; - r->start = 0; - r->end = 0x3ffffff; + resource_set_range(r, 0, SZ_64M); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); @@ -1342,8 +1344,7 @@ static void quirk_dunord(struct pci_dev *dev) struct resource *r = &dev->resource[1]; r->flags |= IORESOURCE_UNSET; - r->start = 0; - r->end = 0xffffff; + resource_set_range(r, 0, SZ_16M); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord); @@ -2340,8 +2341,7 @@ static void quirk_tc86c001_ide(struct pci_dev *dev) if (r->start & 0x8) { r->flags |= IORESOURCE_UNSET; - r->start = 0; - r->end = 0xf; + resource_set_range(r, 0, SZ_16); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2, @@ -2369,8 +2369,7 @@ static void quirk_plx_pci9050(struct pci_dev *dev) pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n", bar); r->flags |= IORESOURCE_UNSET; - r->start = 0; - r->end = 0xff; + resource_set_range(r, 0, SZ_256); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, @@ -3522,13 +3521,13 @@ static void quirk_intel_ntb(struct pci_dev *dev) if (rc) return; - dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1; + resource_set_size(&dev->resource[2], (resource_size_t)1 << val); rc = pci_read_config_byte(dev, 0x00D1, &val); if (rc) return; - dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1; + resource_set_size(&dev->resource[4], (resource_size_t)1 << val); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb); @@ -4996,18 +4995,21 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) } /* - * Wangxun 10G/1G NICs have no ACS capability, and on multi-function - * devices, peer-to-peer transactions are not be used between the functions. - * So add an ACS quirk for below devices to isolate functions. + * Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on + * multi-function devices, the hardware isolates the functions by + * directing all peer-to-peer traffic upstream as though PCI_ACS_RR and + * PCI_ACS_CR were set. * SFxxx 1G NICs(em). * RP1000/RP2000 10G NICs(sp). + * FF5xxx 40G/25G/10G NICs(aml). */ static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags) { switch (dev->device) { - case 0x0100 ... 0x010F: - case 0x1001: - case 0x2001: + case 0x0100 ... 0x010F: /* EM */ + case 0x1001: case 0x2001: /* SP */ + case 0x5010: case 0x5025: case 0x5040: /* AML */ + case 0x5110: case 0x5125: case 0x5140: /* AML */ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index e4ce1145aa3e..963b8d2855c1 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -17,37 +17,35 @@ static void pci_free_resources(struct pci_dev *dev) } } -static int pci_pwrctl_unregister(struct device *dev, void *data) +static void pci_pwrctrl_unregister(struct device *dev) { - struct device_node *pci_node = data, *plat_node = dev_of_node(dev); + struct platform_device *pdev; - if (dev_is_platform(dev) && plat_node && plat_node == pci_node) { - of_device_unregister(to_platform_device(dev)); - of_node_clear_flag(plat_node, OF_POPULATED); - } + pdev = of_find_device_by_node(dev_of_node(dev)); + if (!pdev) + return; - return 0; + of_device_unregister(pdev); + of_node_clear_flag(dev_of_node(dev), OF_POPULATED); } static void pci_stop_dev(struct pci_dev *dev) { pci_pme_active(dev, false); - if (pci_dev_is_added(dev)) { - device_for_each_child(dev->dev.parent, dev_of_node(&dev->dev), - pci_pwrctl_unregister); - device_release_driver(&dev->dev); - pci_proc_detach_device(dev); - pci_remove_sysfs_dev_files(dev); - of_pci_remove_node(dev); + if (!pci_dev_test_and_clear_added(dev)) + return; - pci_dev_assign_added(dev, false); - } + pci_pwrctrl_unregister(&dev->dev); + device_release_driver(&dev->dev); + pci_proc_detach_device(dev); + pci_remove_sysfs_dev_files(dev); + of_pci_remove_node(dev); } static void pci_destroy_dev(struct pci_dev *dev) { - if (!dev->dev.kobj.parent) + if (pci_dev_test_and_set_removed(dev)) return; pci_npem_remove(dev); diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 23082bc0ca37..5e00cecf1f1a 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -134,6 +134,7 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) int i; pci_dev_for_each_resource(dev, r, i) { + const char *r_name = pci_resource_name(dev, i); struct pci_dev_resource *dev_res, *tmp; resource_size_t r_align; struct list_head *n; @@ -146,8 +147,8 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) r_align = pci_resource_alignment(dev, r); if (!r_align) { - pci_warn(dev, "BAR %d: %pR has bogus alignment\n", - i, r); + pci_warn(dev, "%s %pR: alignment must not be zero\n", + r_name, r); continue; } @@ -246,8 +247,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head, add_size = add_res->add_size; align = add_res->min_align; if (!resource_size(res)) { - res->start = align; - res->end = res->start + add_size - 1; + resource_set_range(res, align, add_size); if (pci_assign_resource(add_res->dev, idx)) reset_resource(res); } else { @@ -938,8 +938,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, return; } - b_res->start = min_align; - b_res->end = b_res->start + size0 - 1; + resource_set_range(b_res, min_align, size0); b_res->flags |= IORESOURCE_STARTALIGN; if (bus->self && size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, @@ -1202,8 +1201,7 @@ static void pci_bus_size_cardbus(struct pci_bus *bus, * Reserve some resources for CardBus. We reserve a fixed amount * of bus space for CardBus bridges. */ - b_res->start = pci_cardbus_io_size; - b_res->end = b_res->start + pci_cardbus_io_size - 1; + resource_set_range(b_res, pci_cardbus_io_size, pci_cardbus_io_size); b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN; if (realloc_head) { b_res->end -= pci_cardbus_io_size; @@ -1215,8 +1213,7 @@ handle_b_res_1: b_res = &bridge->resource[PCI_CB_BRIDGE_IO_1_WINDOW]; if (b_res->parent) goto handle_b_res_2; - b_res->start = pci_cardbus_io_size; - b_res->end = b_res->start + pci_cardbus_io_size - 1; + resource_set_range(b_res, pci_cardbus_io_size, pci_cardbus_io_size); b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN; if (realloc_head) { b_res->end -= pci_cardbus_io_size; @@ -1249,8 +1246,8 @@ handle_b_res_2: * Otherwise, allocate one region of twice the size. */ if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { - b_res->start = pci_cardbus_mem_size; - b_res->end = b_res->start + pci_cardbus_mem_size - 1; + resource_set_range(b_res, pci_cardbus_mem_size, + pci_cardbus_mem_size); b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_STARTALIGN; if (realloc_head) { @@ -1267,8 +1264,7 @@ handle_b_res_3: b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_1_WINDOW]; if (b_res->parent) goto handle_done; - b_res->start = pci_cardbus_mem_size; - b_res->end = b_res->start + b_res_3_size - 1; + resource_set_range(b_res, pci_cardbus_mem_size, b_res_3_size); b_res->flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN; if (realloc_head) { b_res->end -= b_res_3_size; @@ -1847,7 +1843,7 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res, return; } - res->end = res->start + new_size - 1; + resource_set_size(res, new_size); /* If the resource is part of the add_list, remove it now */ if (add_list) @@ -1899,6 +1895,9 @@ static void remove_dev_resources(struct pci_dev *dev, struct resource *io, } } +#define ALIGN_DOWN_IF_NONZERO(addr, align) \ + ((align) ? ALIGN_DOWN((addr), (align)) : (addr)) + /* * io, mmio and mmio_pref contain the total amount of bridge window space * available. This includes the minimal space needed to cover all the @@ -2010,8 +2009,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, * what is available). */ align = pci_resource_alignment(dev, res); - io.end = align ? io.start + ALIGN_DOWN(io_per_b, align) - 1 - : io.start + io_per_b - 1; + resource_set_size(&io, ALIGN_DOWN_IF_NONZERO(io_per_b, align)); /* * The x_per_b holds the extra resource space that can be @@ -2023,15 +2021,14 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, res = &dev->resource[PCI_BRIDGE_MEM_WINDOW]; align = pci_resource_alignment(dev, res); - mmio.end = align ? mmio.start + ALIGN_DOWN(mmio_per_b, align) - 1 - : mmio.start + mmio_per_b - 1; + resource_set_size(&mmio, + ALIGN_DOWN_IF_NONZERO(mmio_per_b,align)); mmio.start -= resource_size(res); res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; align = pci_resource_alignment(dev, res); - mmio_pref.end = align ? mmio_pref.start + - ALIGN_DOWN(mmio_pref_per_b, align) - 1 - : mmio_pref.start + mmio_pref_per_b - 1; + resource_set_size(&mmio_pref, + ALIGN_DOWN_IF_NONZERO(mmio_pref_per_b, align)); mmio_pref.start -= resource_size(res); pci_bus_distribute_available_resources(b, add_list, io, mmio, diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index c6d933ddfd46..ca14576bf2bf 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -211,8 +211,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, start = res->start; end = res->end; - res->start = fw_addr; - res->end = res->start + size - 1; + resource_set_range(res, fw_addr, size); res->flags &= ~IORESOURCE_UNSET; root = pci_find_parent_resource(dev, res); @@ -463,7 +462,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) if (ret) return ret; - res->end = res->start + pci_rebar_size_to_bytes(size) - 1; + resource_set_size(res, pci_rebar_size_to_bytes(size)); /* Check if the new config works by trying to assign everything. */ if (dev->bus->self) { @@ -475,7 +474,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) error_resize: pci_rebar_set_size(dev, resno, old); - res->end = res->start + pci_rebar_size_to_bytes(old) - 1; + resource_set_size(res, pci_rebar_size_to_bytes(old)); return ret; } EXPORT_SYMBOL(pci_resize_resource); diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 0f87cade10f7..ed645c7a4e4b 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -79,6 +79,7 @@ static void pci_slot_release(struct kobject *kobj) up_read(&pci_bus_sem); list_del(&slot->list); + pci_bus_put(slot->bus); kfree(slot); } @@ -261,7 +262,7 @@ placeholder: goto err; } - slot->bus = parent; + slot->bus = pci_bus_get(parent); slot->number = slot_nr; slot->kobj.kset = pci_slots_kset; @@ -269,6 +270,7 @@ placeholder: slot_name = make_slot_name(name); if (!slot_name) { err = -ENOMEM; + pci_bus_put(slot->bus); kfree(slot); goto err; } diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c new file mode 100644 index 000000000000..1e604fbbda65 --- /dev/null +++ b/drivers/pci/tph.c @@ -0,0 +1,547 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TPH (TLP Processing Hints) support + * + * Copyright (C) 2024 Advanced Micro Devices, Inc. + * Eric Van Tassell <Eric.VanTassell@amd.com> + * Wei Huang <wei.huang2@amd.com> + */ +#include <linux/pci.h> +#include <linux/pci-acpi.h> +#include <linux/msi.h> +#include <linux/bitfield.h> +#include <linux/pci-tph.h> + +#include "pci.h" + +/* System-wide TPH disabled */ +static bool pci_tph_disabled; + +#ifdef CONFIG_ACPI +/* + * The st_info struct defines the Steering Tag (ST) info returned by the + * firmware PCI ACPI _DSM method (rev=0x7, func=0xF, "_DSM to Query Cache + * Locality TPH Features"), as specified in the approved ECN for PCI Firmware + * Spec and available at https://members.pcisig.com/wg/PCI-SIG/document/15470. + * + * @vm_st_valid: 8-bit ST for volatile memory is valid + * @vm_xst_valid: 16-bit extended ST for volatile memory is valid + * @vm_ph_ignore: 1 => PH was and will be ignored, 0 => PH should be supplied + * @vm_st: 8-bit ST for volatile mem + * @vm_xst: 16-bit extended ST for volatile mem + * @pm_st_valid: 8-bit ST for persistent memory is valid + * @pm_xst_valid: 16-bit extended ST for persistent memory is valid + * @pm_ph_ignore: 1 => PH was and will be ignored, 0 => PH should be supplied + * @pm_st: 8-bit ST for persistent mem + * @pm_xst: 16-bit extended ST for persistent mem + */ +union st_info { + struct { + u64 vm_st_valid : 1; + u64 vm_xst_valid : 1; + u64 vm_ph_ignore : 1; + u64 rsvd1 : 5; + u64 vm_st : 8; + u64 vm_xst : 16; + u64 pm_st_valid : 1; + u64 pm_xst_valid : 1; + u64 pm_ph_ignore : 1; + u64 rsvd2 : 5; + u64 pm_st : 8; + u64 pm_xst : 16; + }; + u64 value; +}; + +static u16 tph_extract_tag(enum tph_mem_type mem_type, u8 req_type, + union st_info *info) +{ + switch (req_type) { + case PCI_TPH_REQ_TPH_ONLY: /* 8-bit tag */ + switch (mem_type) { + case TPH_MEM_TYPE_VM: + if (info->vm_st_valid) + return info->vm_st; + break; + case TPH_MEM_TYPE_PM: + if (info->pm_st_valid) + return info->pm_st; + break; + } + break; + case PCI_TPH_REQ_EXT_TPH: /* 16-bit tag */ + switch (mem_type) { + case TPH_MEM_TYPE_VM: + if (info->vm_xst_valid) + return info->vm_xst; + break; + case TPH_MEM_TYPE_PM: + if (info->pm_xst_valid) + return info->pm_xst; + break; + } + break; + default: + return 0; + } + + return 0; +} + +#define TPH_ST_DSM_FUNC_INDEX 0xF +static acpi_status tph_invoke_dsm(acpi_handle handle, u32 cpu_uid, + union st_info *st_out) +{ + union acpi_object arg3[3], in_obj, *out_obj; + + if (!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 7, + BIT(TPH_ST_DSM_FUNC_INDEX))) + return AE_ERROR; + + /* DWORD: feature ID (0 for processor cache ST query) */ + arg3[0].integer.type = ACPI_TYPE_INTEGER; + arg3[0].integer.value = 0; + + /* DWORD: target UID */ + arg3[1].integer.type = ACPI_TYPE_INTEGER; + arg3[1].integer.value = cpu_uid; + + /* QWORD: properties, all 0's */ + arg3[2].integer.type = ACPI_TYPE_INTEGER; + arg3[2].integer.value = 0; + + in_obj.type = ACPI_TYPE_PACKAGE; + in_obj.package.count = ARRAY_SIZE(arg3); + in_obj.package.elements = arg3; + + out_obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 7, + TPH_ST_DSM_FUNC_INDEX, &in_obj); + if (!out_obj) + return AE_ERROR; + + if (out_obj->type != ACPI_TYPE_BUFFER) { + ACPI_FREE(out_obj); + return AE_ERROR; + } + + st_out->value = *((u64 *)(out_obj->buffer.pointer)); + + ACPI_FREE(out_obj); + + return AE_OK; +} +#endif + +/* Update the TPH Requester Enable field of TPH Control Register */ +static void set_ctrl_reg_req_en(struct pci_dev *pdev, u8 req_type) +{ + u32 reg; + + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, ®); + + reg &= ~PCI_TPH_CTRL_REQ_EN_MASK; + reg |= FIELD_PREP(PCI_TPH_CTRL_REQ_EN_MASK, req_type); + + pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, reg); +} + +static u8 get_st_modes(struct pci_dev *pdev) +{ + u32 reg; + + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, ®); + reg &= PCI_TPH_CAP_ST_NS | PCI_TPH_CAP_ST_IV | PCI_TPH_CAP_ST_DS; + + return reg; +} + +static u32 get_st_table_loc(struct pci_dev *pdev) +{ + u32 reg; + + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, ®); + + return FIELD_GET(PCI_TPH_CAP_LOC_MASK, reg); +} + +/* + * Return the size of ST table. If ST table is not in TPH Requester Extended + * Capability space, return 0. Otherwise return the ST Table Size + 1. + */ +static u16 get_st_table_size(struct pci_dev *pdev) +{ + u32 reg; + u32 loc; + + /* Check ST table location first */ + loc = get_st_table_loc(pdev); + + /* Convert loc to match with PCI_TPH_LOC_* defined in pci_regs.h */ + loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc); + if (loc != PCI_TPH_LOC_CAP) + return 0; + + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, ®); + + return FIELD_GET(PCI_TPH_CAP_ST_MASK, reg) + 1; +} + +/* Return device's Root Port completer capability */ +static u8 get_rp_completer_type(struct pci_dev *pdev) +{ + struct pci_dev *rp; + u32 reg; + int ret; + + rp = pcie_find_root_port(pdev); + if (!rp) + return 0; + + ret = pcie_capability_read_dword(rp, PCI_EXP_DEVCAP2, ®); + if (ret) + return 0; + + return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg); +} + +/* Write ST to MSI-X vector control reg - Return 0 if OK, otherwise -errno */ +static int write_tag_to_msix(struct pci_dev *pdev, int msix_idx, u16 tag) +{ +#ifdef CONFIG_PCI_MSI + struct msi_desc *msi_desc = NULL; + void __iomem *vec_ctrl; + u32 val; + int err = 0; + + msi_lock_descs(&pdev->dev); + + /* Find the msi_desc entry with matching msix_idx */ + msi_for_each_desc(msi_desc, &pdev->dev, MSI_DESC_ASSOCIATED) { + if (msi_desc->msi_index == msix_idx) + break; + } + + if (!msi_desc) { + err = -ENXIO; + goto err_out; + } + + /* Get the vector control register (offset 0xc) pointed by msix_idx */ + vec_ctrl = pdev->msix_base + msix_idx * PCI_MSIX_ENTRY_SIZE; + vec_ctrl += PCI_MSIX_ENTRY_VECTOR_CTRL; + + val = readl(vec_ctrl); + val &= ~PCI_MSIX_ENTRY_CTRL_ST; + val |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag); + writel(val, vec_ctrl); + + /* Read back to flush the update */ + val = readl(vec_ctrl); + +err_out: + msi_unlock_descs(&pdev->dev); + return err; +#else + return -ENODEV; +#endif +} + +/* Write tag to ST table - Return 0 if OK, otherwise -errno */ +static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag) +{ + int st_table_size; + int offset; + + /* Check if index is out of bound */ + st_table_size = get_st_table_size(pdev); + if (index >= st_table_size) + return -ENXIO; + + offset = pdev->tph_cap + PCI_TPH_BASE_SIZEOF + index * sizeof(u16); + + return pci_write_config_word(pdev, offset, tag); +} + +/** + * pcie_tph_get_cpu_st() - Retrieve Steering Tag for a target memory associated + * with a specific CPU + * @pdev: PCI device + * @mem_type: target memory type (volatile or persistent RAM) + * @cpu_uid: associated CPU id + * @tag: Steering Tag to be returned + * + * Return the Steering Tag for a target memory that is associated with a + * specific CPU as indicated by cpu_uid. + * + * Return: 0 if success, otherwise negative value (-errno) + */ +int pcie_tph_get_cpu_st(struct pci_dev *pdev, enum tph_mem_type mem_type, + unsigned int cpu_uid, u16 *tag) +{ +#ifdef CONFIG_ACPI + struct pci_dev *rp; + acpi_handle rp_acpi_handle; + union st_info info; + + rp = pcie_find_root_port(pdev); + if (!rp || !rp->bus || !rp->bus->bridge) + return -ENODEV; + + rp_acpi_handle = ACPI_HANDLE(rp->bus->bridge); + + if (tph_invoke_dsm(rp_acpi_handle, cpu_uid, &info) != AE_OK) { + *tag = 0; + return -EINVAL; + } + + *tag = tph_extract_tag(mem_type, pdev->tph_req_type, &info); + + pci_dbg(pdev, "get steering tag: mem_type=%s, cpu_uid=%d, tag=%#04x\n", + (mem_type == TPH_MEM_TYPE_VM) ? "volatile" : "persistent", + cpu_uid, *tag); + + return 0; +#else + return -ENODEV; +#endif +} +EXPORT_SYMBOL(pcie_tph_get_cpu_st); + +/** + * pcie_tph_set_st_entry() - Set Steering Tag in the ST table entry + * @pdev: PCI device + * @index: ST table entry index + * @tag: Steering Tag to be written + * + * Figure out the proper location of ST table, either in the MSI-X table or + * in the TPH Extended Capability space, and write the Steering Tag into + * the ST entry pointed by index. + * + * Return: 0 if success, otherwise negative value (-errno) + */ +int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag) +{ + u32 loc; + int err = 0; + + if (!pdev->tph_cap) + return -EINVAL; + + if (!pdev->tph_enabled) + return -EINVAL; + + /* No need to write tag if device is in "No ST Mode" */ + if (pdev->tph_mode == PCI_TPH_ST_NS_MODE) + return 0; + + /* + * Disable TPH before updating ST to avoid potential instability as + * cautioned in PCIe r6.2, sec 6.17.3, "ST Modes of Operation" + */ + set_ctrl_reg_req_en(pdev, PCI_TPH_REQ_DISABLE); + + loc = get_st_table_loc(pdev); + /* Convert loc to match with PCI_TPH_LOC_* */ + loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc); + + switch (loc) { + case PCI_TPH_LOC_MSIX: + err = write_tag_to_msix(pdev, index, tag); + break; + case PCI_TPH_LOC_CAP: + err = write_tag_to_st_table(pdev, index, tag); + break; + default: + err = -EINVAL; + } + + if (err) { + pcie_disable_tph(pdev); + return err; + } + + set_ctrl_reg_req_en(pdev, pdev->tph_mode); + + pci_dbg(pdev, "set steering tag: %s table, index=%d, tag=%#04x\n", + (loc == PCI_TPH_LOC_MSIX) ? "MSI-X" : "ST", index, tag); + + return 0; +} +EXPORT_SYMBOL(pcie_tph_set_st_entry); + +/** + * pcie_disable_tph - Turn off TPH support for device + * @pdev: PCI device + * + * Return: none + */ +void pcie_disable_tph(struct pci_dev *pdev) +{ + if (!pdev->tph_cap) + return; + + if (!pdev->tph_enabled) + return; + + pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, 0); + + pdev->tph_mode = 0; + pdev->tph_req_type = 0; + pdev->tph_enabled = 0; +} +EXPORT_SYMBOL(pcie_disable_tph); + +/** + * pcie_enable_tph - Enable TPH support for device using a specific ST mode + * @pdev: PCI device + * @mode: ST mode to enable. Current supported modes include: + * + * - PCI_TPH_ST_NS_MODE: NO ST Mode + * - PCI_TPH_ST_IV_MODE: Interrupt Vector Mode + * - PCI_TPH_ST_DS_MODE: Device Specific Mode + * + * Check whether the mode is actually supported by the device before enabling + * and return an error if not. Additionally determine what types of requests, + * TPH or extended TPH, can be issued by the device based on its TPH requester + * capability and the Root Port's completer capability. + * + * Return: 0 on success, otherwise negative value (-errno) + */ +int pcie_enable_tph(struct pci_dev *pdev, int mode) +{ + u32 reg; + u8 dev_modes; + u8 rp_req_type; + + /* Honor "notph" kernel parameter */ + if (pci_tph_disabled) + return -EINVAL; + + if (!pdev->tph_cap) + return -EINVAL; + + if (pdev->tph_enabled) + return -EBUSY; + + /* Sanitize and check ST mode compatibility */ + mode &= PCI_TPH_CTRL_MODE_SEL_MASK; + dev_modes = get_st_modes(pdev); + if (!((1 << mode) & dev_modes)) + return -EINVAL; + + pdev->tph_mode = mode; + + /* Get req_type supported by device and its Root Port */ + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, ®); + if (FIELD_GET(PCI_TPH_CAP_EXT_TPH, reg)) + pdev->tph_req_type = PCI_TPH_REQ_EXT_TPH; + else + pdev->tph_req_type = PCI_TPH_REQ_TPH_ONLY; + + rp_req_type = get_rp_completer_type(pdev); + + /* Final req_type is the smallest value of two */ + pdev->tph_req_type = min(pdev->tph_req_type, rp_req_type); + + if (pdev->tph_req_type == PCI_TPH_REQ_DISABLE) + return -EINVAL; + + /* Write them into TPH control register */ + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, ®); + + reg &= ~PCI_TPH_CTRL_MODE_SEL_MASK; + reg |= FIELD_PREP(PCI_TPH_CTRL_MODE_SEL_MASK, pdev->tph_mode); + + reg &= ~PCI_TPH_CTRL_REQ_EN_MASK; + reg |= FIELD_PREP(PCI_TPH_CTRL_REQ_EN_MASK, pdev->tph_req_type); + + pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, reg); + + pdev->tph_enabled = 1; + + return 0; +} +EXPORT_SYMBOL(pcie_enable_tph); + +void pci_restore_tph_state(struct pci_dev *pdev) +{ + struct pci_cap_saved_state *save_state; + int num_entries, i, offset; + u16 *st_entry; + u32 *cap; + + if (!pdev->tph_cap) + return; + + if (!pdev->tph_enabled) + return; + + save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_TPH); + if (!save_state) + return; + + /* Restore control register and all ST entries */ + cap = &save_state->cap.data[0]; + pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, *cap++); + st_entry = (u16 *)cap; + offset = PCI_TPH_BASE_SIZEOF; + num_entries = get_st_table_size(pdev); + for (i = 0; i < num_entries; i++) { + pci_write_config_word(pdev, pdev->tph_cap + offset, + *st_entry++); + offset += sizeof(u16); + } +} + +void pci_save_tph_state(struct pci_dev *pdev) +{ + struct pci_cap_saved_state *save_state; + int num_entries, i, offset; + u16 *st_entry; + u32 *cap; + + if (!pdev->tph_cap) + return; + + if (!pdev->tph_enabled) + return; + + save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_TPH); + if (!save_state) + return; + + /* Save control register */ + cap = &save_state->cap.data[0]; + pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, cap++); + + /* Save all ST entries in extended capability structure */ + st_entry = (u16 *)cap; + offset = PCI_TPH_BASE_SIZEOF; + num_entries = get_st_table_size(pdev); + for (i = 0; i < num_entries; i++) { + pci_read_config_word(pdev, pdev->tph_cap + offset, + st_entry++); + offset += sizeof(u16); + } +} + +void pci_no_tph(void) +{ + pci_tph_disabled = true; + + pr_info("PCIe TPH is disabled\n"); +} + +void pci_tph_init(struct pci_dev *pdev) +{ + int num_entries; + u32 save_size; + + pdev->tph_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_TPH); + if (!pdev->tph_cap) + return; + + num_entries = get_st_table_size(pdev); + save_size = sizeof(u32) + num_entries * sizeof(u16); + pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_TPH, save_size); +} diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 61e7ae524b1f..d3f9686e26e7 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -220,6 +220,15 @@ config DEVFREQ_THERMAL If you want this support, you should say Y here. +config PCIE_THERMAL + bool "PCIe cooling support" + depends on PCIEPORTBUS + help + This implements PCIe cooling mechanism through bandwidth reduction + for PCIe devices. + + If you want this support, you should say Y here. + config THERMAL_EMULATION bool "Thermal emulation mode support" help diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 41c4d56beb40..210c16c91461 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -31,6 +31,8 @@ thermal_sys-$(CONFIG_CPU_IDLE_THERMAL) += cpuidle_cooling.o # devfreq cooling thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o +thermal_sys-$(CONFIG_PCIE_THERMAL) += pcie_cooling.o + obj-$(CONFIG_K3_THERMAL) += k3_bandgap.o k3_j72xx_bandgap.o # platform thermal drivers obj-y += broadcom/ diff --git a/drivers/thermal/pcie_cooling.c b/drivers/thermal/pcie_cooling.c new file mode 100644 index 000000000000..a876d64f1582 --- /dev/null +++ b/drivers/thermal/pcie_cooling.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PCIe cooling device + * + * Copyright (C) 2023-2024 Intel Corporation + */ + +#include <linux/build_bug.h> +#include <linux/cleanup.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pci-bwctrl.h> +#include <linux/slab.h> +#include <linux/sprintf.h> +#include <linux/thermal.h> + +#define COOLING_DEV_TYPE_PREFIX "PCIe_Port_Link_Speed_" + +static int pcie_cooling_get_max_level(struct thermal_cooling_device *cdev, unsigned long *state) +{ + struct pci_dev *port = cdev->devdata; + + /* cooling state 0 is same as the maximum PCIe speed */ + *state = port->subordinate->max_bus_speed - PCIE_SPEED_2_5GT; + + return 0; +} + +static int pcie_cooling_get_cur_level(struct thermal_cooling_device *cdev, unsigned long *state) +{ + struct pci_dev *port = cdev->devdata; + + /* cooling state 0 is same as the maximum PCIe speed */ + *state = cdev->max_state - (port->subordinate->cur_bus_speed - PCIE_SPEED_2_5GT); + + return 0; +} + +static int pcie_cooling_set_cur_level(struct thermal_cooling_device *cdev, unsigned long state) +{ + struct pci_dev *port = cdev->devdata; + enum pci_bus_speed speed; + + /* cooling state 0 is same as the maximum PCIe speed */ + speed = (cdev->max_state - state) + PCIE_SPEED_2_5GT; + + return pcie_set_target_speed(port, speed, true); +} + +static struct thermal_cooling_device_ops pcie_cooling_ops = { + .get_max_state = pcie_cooling_get_max_level, + .get_cur_state = pcie_cooling_get_cur_level, + .set_cur_state = pcie_cooling_set_cur_level, +}; + +struct thermal_cooling_device *pcie_cooling_device_register(struct pci_dev *port) +{ + char *name __free(kfree) = + kasprintf(GFP_KERNEL, COOLING_DEV_TYPE_PREFIX "%s", pci_name(port)); + if (!name) + return ERR_PTR(-ENOMEM); + + return thermal_cooling_device_register(name, port, &pcie_cooling_ops); +} + +void pcie_cooling_device_unregister(struct thermal_cooling_device *cdev) +{ + thermal_cooling_device_unregister(cdev); +} + +/* For bus_speed <-> state arithmetic */ +static_assert(PCIE_SPEED_2_5GT + 1 == PCIE_SPEED_5_0GT); +static_assert(PCIE_SPEED_5_0GT + 1 == PCIE_SPEED_8_0GT); +static_assert(PCIE_SPEED_8_0GT + 1 == PCIE_SPEED_16_0GT); +static_assert(PCIE_SPEED_16_0GT + 1 == PCIE_SPEED_32_0GT); +static_assert(PCIE_SPEED_32_0GT + 1 == PCIE_SPEED_64_0GT); + +MODULE_AUTHOR("Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>"); +MODULE_DESCRIPTION("PCIe cooling driver"); diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c index 8bab2aedc499..6d99a02dd439 100644 --- a/drivers/tty/serial/rp2.c +++ b/drivers/tty/serial/rp2.c @@ -698,7 +698,6 @@ static int rp2_probe(struct pci_dev *pdev, const struct firmware *fw; struct rp2_card *card; struct rp2_uart_port *ports; - void __iomem * const *bars; int rc; card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL); @@ -711,13 +710,16 @@ static int rp2_probe(struct pci_dev *pdev, if (rc) return rc; - rc = pcim_iomap_regions_request_all(pdev, 0x03, DRV_NAME); + rc = pcim_request_all_regions(pdev, DRV_NAME); if (rc) return rc; - bars = pcim_iomap_table(pdev); - card->bar0 = bars[0]; - card->bar1 = bars[1]; + card->bar0 = pcim_iomap(pdev, 0, 0); + if (!card->bar0) + return -ENOMEM; + card->bar1 = pcim_iomap(pdev, 1, 0); + if (!card->bar1) + return -ENOMEM; card->pdev = pdev; rp2_decode_cap(id, &card->n_ports, &card->smpte); |