summaryrefslogtreecommitdiff
path: root/drivers/pci/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r--drivers/pci/pci.c287
1 files changed, 185 insertions, 102 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 59e0949fb079..8cf0941f70d1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -946,30 +946,67 @@ void pci_request_acs(void)
}
static const char *disable_acs_redir_param;
+static const char *config_acs_param;
-/**
- * pci_disable_acs_redir - disable ACS redirect capabilities
- * @dev: the PCI device
- *
- * For only devices specified in the disable_acs_redir parameter.
- */
-static void pci_disable_acs_redir(struct pci_dev *dev)
+struct pci_acs {
+ u16 cap;
+ u16 ctrl;
+ u16 fw_ctrl;
+};
+
+static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
+ const char *p, u16 mask, u16 flags)
{
+ char *delimit;
int ret = 0;
- const char *p;
- int pos;
- u16 ctrl;
- if (!disable_acs_redir_param)
+ if (!p)
return;
- p = disable_acs_redir_param;
while (*p) {
+ if (!mask) {
+ /* Check for ACS flags */
+ delimit = strstr(p, "@");
+ if (delimit) {
+ int end;
+ u32 shift = 0;
+
+ end = delimit - p - 1;
+
+ while (end > -1) {
+ if (*(p + end) == '0') {
+ mask |= 1 << shift;
+ shift++;
+ end--;
+ } else if (*(p + end) == '1') {
+ mask |= 1 << shift;
+ flags |= 1 << shift;
+ shift++;
+ end--;
+ } else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
+ shift++;
+ end--;
+ } else {
+ pci_err(dev, "Invalid ACS flags... Ignoring\n");
+ return;
+ }
+ }
+ p = delimit + 1;
+ } else {
+ pci_err(dev, "ACS Flags missing\n");
+ return;
+ }
+ }
+
+ if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
+ PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
+ pci_err(dev, "Invalid ACS flags specified\n");
+ return;
+ }
+
ret = pci_dev_str_match(dev, p, &p);
if (ret < 0) {
- pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
- disable_acs_redir_param);
-
+ pr_info_once("PCI: Can't parse ACS command line parameter\n");
break;
} else if (ret == 1) {
/* Found a match */
@@ -989,56 +1026,38 @@ static void pci_disable_acs_redir(struct pci_dev *dev)
if (!pci_dev_specific_disable_acs_redir(dev))
return;
- pos = dev->acs_cap;
- if (!pos) {
- pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
- return;
- }
+ pci_dbg(dev, "ACS mask = %#06x\n", mask);
+ pci_dbg(dev, "ACS flags = %#06x\n", flags);
- pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+ /* If mask is 0 then we copy the bit from the firmware setting. */
+ caps->ctrl = (caps->ctrl & ~mask) | (caps->fw_ctrl & mask);
+ caps->ctrl |= flags;
- /* P2P Request & Completion Redirect */
- ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
-
- pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
-
- pci_info(dev, "disabled ACS redirect\n");
+ pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
}
/**
* pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
* @dev: the PCI device
+ * @caps: default ACS controls
*/
-static void pci_std_enable_acs(struct pci_dev *dev)
+static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
{
- int pos;
- u16 cap;
- u16 ctrl;
-
- pos = dev->acs_cap;
- if (!pos)
- return;
-
- pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
- pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
-
/* Source Validation */
- ctrl |= (cap & PCI_ACS_SV);
+ caps->ctrl |= (caps->cap & PCI_ACS_SV);
/* P2P Request Redirect */
- ctrl |= (cap & PCI_ACS_RR);
+ caps->ctrl |= (caps->cap & PCI_ACS_RR);
/* P2P Completion Redirect */
- ctrl |= (cap & PCI_ACS_CR);
+ caps->ctrl |= (caps->cap & PCI_ACS_CR);
/* Upstream Forwarding */
- ctrl |= (cap & PCI_ACS_UF);
+ caps->ctrl |= (caps->cap & PCI_ACS_UF);
/* Enable Translation Blocking for external devices and noats */
if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
- ctrl |= (cap & PCI_ACS_TB);
-
- pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+ caps->ctrl |= (caps->cap & PCI_ACS_TB);
}
/**
@@ -1047,23 +1066,33 @@ static void pci_std_enable_acs(struct pci_dev *dev)
*/
static void pci_enable_acs(struct pci_dev *dev)
{
- if (!pci_acs_enable)
- goto disable_acs_redir;
+ struct pci_acs caps;
+ int pos;
+
+ pos = dev->acs_cap;
+ if (!pos)
+ return;
- if (!pci_dev_specific_enable_acs(dev))
- goto disable_acs_redir;
+ pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
+ caps.fw_ctrl = caps.ctrl;
- pci_std_enable_acs(dev);
+ /* If an iommu is present we start with kernel default caps */
+ if (pci_acs_enable) {
+ if (pci_dev_specific_enable_acs(dev))
+ pci_std_enable_acs(dev, &caps);
+ }
-disable_acs_redir:
/*
- * Note: pci_disable_acs_redir() must be called even if ACS was not
- * enabled by the kernel because it may have been enabled by
- * platform firmware. So if we are told to disable it, we should
- * always disable it after setting the kernel's default
- * preferences.
+ * Always apply caps from the command line, even if there is no iommu.
+ * Trust that the admin has a reason to change the ACS settings.
*/
- pci_disable_acs_redir(dev);
+ __pci_config_acs(dev, &caps, disable_acs_redir_param,
+ PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
+ ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
+ __pci_config_acs(dev, &caps, config_acs_param, 0, 0);
+
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
}
/**
@@ -2218,12 +2247,6 @@ void pci_disable_enabled_device(struct pci_dev *dev)
*/
void pci_disable_device(struct pci_dev *dev)
{
- struct pci_devres *dr;
-
- dr = find_pci_dr(dev);
- if (dr)
- dr->enabled = 0;
-
dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
"disabling already-disabled device");
@@ -3872,7 +3895,15 @@ EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
*/
void pci_release_region(struct pci_dev *pdev, int bar)
{
- struct pci_devres *dr;
+ /*
+ * This is done for backwards compatibility, because the old PCI devres
+ * API had a mode in which the function became managed if it had been
+ * enabled with pcim_enable_device() instead of pci_enable_device().
+ */
+ if (pci_is_managed(pdev)) {
+ pcim_release_region(pdev, bar);
+ return;
+ }
if (pci_resource_len(pdev, bar) == 0)
return;
@@ -3882,10 +3913,6 @@ void pci_release_region(struct pci_dev *pdev, int bar)
else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
release_mem_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
-
- dr = find_pci_dr(pdev);
- if (dr)
- dr->region_mask &= ~(1 << bar);
}
EXPORT_SYMBOL(pci_release_region);
@@ -3896,6 +3923,8 @@ EXPORT_SYMBOL(pci_release_region);
* @res_name: Name to be associated with resource.
* @exclusive: whether the region access is exclusive or not
*
+ * Returns: 0 on success, negative error code on failure.
+ *
* Mark the PCI region associated with PCI device @pdev BAR @bar as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
@@ -3911,7 +3940,12 @@ EXPORT_SYMBOL(pci_release_region);
static int __pci_request_region(struct pci_dev *pdev, int bar,
const char *res_name, int exclusive)
{
- struct pci_devres *dr;
+ if (pci_is_managed(pdev)) {
+ if (exclusive == IORESOURCE_EXCLUSIVE)
+ return pcim_request_region_exclusive(pdev, bar, res_name);
+
+ return pcim_request_region(pdev, bar, res_name);
+ }
if (pci_resource_len(pdev, bar) == 0)
return 0;
@@ -3927,10 +3961,6 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
goto err_out;
}
- dr = find_pci_dr(pdev);
- if (dr)
- dr->region_mask |= 1 << bar;
-
return 0;
err_out:
@@ -3945,6 +3975,8 @@ err_out:
* @bar: BAR to be reserved
* @res_name: Name to be associated with resource
*
+ * Returns: 0 on success, negative error code on failure.
+ *
* Mark the PCI region associated with PCI device @pdev BAR @bar as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
@@ -3952,6 +3984,11 @@ err_out:
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
{
@@ -4002,6 +4039,13 @@ err_out:
* @pdev: PCI device whose resources are to be reserved
* @bars: Bitmask of BARs to be requested
* @res_name: Name to be associated with resource
+ *
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *res_name)
@@ -4010,6 +4054,19 @@ int pci_request_selected_regions(struct pci_dev *pdev, int bars,
}
EXPORT_SYMBOL(pci_request_selected_regions);
+/**
+ * pci_request_selected_regions_exclusive - Request regions exclusively
+ * @pdev: PCI device to request regions from
+ * @bars: bit mask of BARs to request
+ * @res_name: name to be associated with the requests
+ *
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
+ */
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
const char *res_name)
{
@@ -4027,7 +4084,6 @@ EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
* successful call to pci_request_regions(). Call this function only
* after all use of the PCI regions has ceased.
*/
-
void pci_release_regions(struct pci_dev *pdev)
{
pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
@@ -4046,6 +4102,11 @@ EXPORT_SYMBOL(pci_release_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
@@ -4059,6 +4120,8 @@ EXPORT_SYMBOL(pci_request_regions);
* @pdev: PCI device whose resources are to be reserved
* @res_name: Name to be associated with resource.
*
+ * Returns: 0 on success, negative error code on failure.
+ *
* Mark all PCI regions associated with PCI device @pdev as being reserved
* by owner @res_name. Do not access any address inside the PCI regions
* unless this call returns successfully.
@@ -4068,6 +4131,11 @@ EXPORT_SYMBOL(pci_request_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning message is also
* printed on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
{
@@ -4399,11 +4467,22 @@ void pci_disable_parity(struct pci_dev *dev)
* @enable: boolean: whether to enable or disable PCI INTx
*
* Enables/disables PCI INTx for device @pdev
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use pcim_intx() instead.
*/
void pci_intx(struct pci_dev *pdev, int enable)
{
u16 pci_command, new;
+ /* Preserve the "hybrid" behavior for backwards compatibility */
+ if (pci_is_managed(pdev)) {
+ WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
+ return;
+ }
+
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
if (enable)
@@ -4411,17 +4490,8 @@ void pci_intx(struct pci_dev *pdev, int enable)
else
new = pci_command | PCI_COMMAND_INTX_DISABLE;
- if (new != pci_command) {
- struct pci_devres *dr;
-
+ if (new != pci_command)
pci_write_config_word(pdev, PCI_COMMAND, new);
-
- dr = find_pci_dr(pdev);
- if (dr && !dr->restore_intx) {
- dr->restore_intx = 1;
- dr->orig_intx = !enable;
- }
- }
}
EXPORT_SYMBOL_GPL(pci_intx);
@@ -4753,7 +4823,7 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
*/
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
{
- struct pci_dev *child;
+ struct pci_dev *child __free(pci_dev_put) = NULL;
int delay;
if (pci_dev_is_disconnected(dev))
@@ -4782,8 +4852,8 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
return 0;
}
- child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
- bus_list);
+ child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
+ struct pci_dev, bus_list));
up_read(&pci_bus_sem);
/*
@@ -4884,6 +4954,9 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
lock_map_assert_held(&dev->cfg_access_lock);
+ if (!dev->block_cfg_access)
+ pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
+ __builtin_return_address(0));
pcibios_reset_secondary_bus(dev);
return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
@@ -5442,10 +5515,12 @@ static void pci_bus_lock(struct pci_bus *bus)
{
struct pci_dev *dev;
+ pci_dev_lock(bus->self);
list_for_each_entry(dev, &bus->devices, bus_list) {
- pci_dev_lock(dev);
if (dev->subordinate)
pci_bus_lock(dev->subordinate);
+ else
+ pci_dev_lock(dev);
}
}
@@ -5457,8 +5532,10 @@ static void pci_bus_unlock(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
+ pci_dev_unlock(bus->self);
}
/* Return 1 on successful lock, 0 on contention */
@@ -5466,15 +5543,15 @@ static int pci_bus_trylock(struct pci_bus *bus)
{
struct pci_dev *dev;
+ if (!pci_dev_trylock(bus->self))
+ return 0;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
- if (!pci_dev_trylock(dev))
- goto unlock;
if (dev->subordinate) {
- if (!pci_bus_trylock(dev->subordinate)) {
- pci_dev_unlock(dev);
+ if (!pci_bus_trylock(dev->subordinate))
goto unlock;
- }
- }
+ } else if (!pci_dev_trylock(dev))
+ goto unlock;
}
return 1;
@@ -5482,8 +5559,10 @@ unlock:
list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
+ pci_dev_unlock(bus->self);
return 0;
}
@@ -5515,9 +5594,10 @@ static void pci_slot_lock(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
- pci_dev_lock(dev);
if (dev->subordinate)
pci_bus_lock(dev->subordinate);
+ else
+ pci_dev_lock(dev);
}
}
@@ -5543,14 +5623,13 @@ static int pci_slot_trylock(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
- if (!pci_dev_trylock(dev))
- goto unlock;
if (dev->subordinate) {
if (!pci_bus_trylock(dev->subordinate)) {
pci_dev_unlock(dev);
goto unlock;
}
- }
+ } else if (!pci_dev_trylock(dev))
+ goto unlock;
}
return 1;
@@ -5561,7 +5640,8 @@ unlock:
continue;
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
return 0;
}
@@ -6840,6 +6920,8 @@ static int __init pci_setup(char *str)
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
disable_acs_redir_param = str + 18;
+ } else if (!strncmp(str, "config_acs=", 11)) {
+ config_acs_param = str + 11;
} else {
pr_err("PCI: Unknown option `%s'\n", str);
}
@@ -6864,6 +6946,7 @@ static int __init pci_realloc_setup_params(void)
resource_alignment_param = kstrdup(resource_alignment_param,
GFP_KERNEL);
disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
+ config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
return 0;
}