summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:17:46 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:17:46 +0000
commit7f3a4257159dea8e7ef66d1a539dc6df708b8ed3 (patch)
treebcc69b5f4609f348fac49e2f59e210b29eaea783 /drivers/pci
parentAdding upstream version 6.9.12. (diff)
downloadlinux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.tar.xz
linux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.zip
Adding upstream version 6.10.3.upstream/6.10.3
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/access.c40
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c7
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c9
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c10
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c167
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c15
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c147
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c11
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h14
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c18
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c28
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c15
-rw-r--r--drivers/pci/controller/pci-hyperv.c4
-rw-r--r--drivers/pci/controller/pci-loongson.c13
-rw-r--r--drivers/pci/controller/pcie-mt7621.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c6
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c4
-rw-r--r--drivers/pci/controller/pcie-rockchip.c2
-rw-r--r--drivers/pci/doe.c12
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c94
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c19
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c9
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c22
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c9
-rw-r--r--drivers/pci/hotplug/TODO12
-rw-r--r--drivers/pci/msi/api.c58
-rw-r--r--drivers/pci/msi/irqdomain.c59
-rw-r--r--drivers/pci/msi/msi.c15
-rw-r--r--drivers/pci/pci.c129
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer_inject.c2
-rw-r--r--drivers/pci/pcie/aspm.c182
-rw-r--r--drivers/pci/pcie/err.c12
-rw-r--r--drivers/pci/pcie/portdrv.c8
-rw-r--r--drivers/pci/probe.c5
-rw-r--r--drivers/pci/quirks.c20
-rw-r--r--drivers/pci/setup-bus.c6
43 files changed, 703 insertions, 515 deletions
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 6449056b57..b123da16b6 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -36,10 +36,13 @@ DEFINE_RAW_SPINLOCK(pci_lock);
int noinline pci_bus_read_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
{ \
- int res; \
unsigned long flags; \
u32 data = 0; \
- if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
+ int res; \
+ \
+ if (PCI_##size##_BAD) \
+ return PCIBIOS_BAD_REGISTER_NUMBER; \
+ \
pci_lock_config(flags); \
res = bus->ops->read(bus, devfn, pos, len, &data); \
if (res) \
@@ -47,6 +50,7 @@ int noinline pci_bus_read_config_##size \
else \
*value = (type)data; \
pci_unlock_config(flags); \
+ \
return res; \
}
@@ -54,12 +58,16 @@ int noinline pci_bus_read_config_##size \
int noinline pci_bus_write_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type value) \
{ \
- int res; \
unsigned long flags; \
- if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
+ int res; \
+ \
+ if (PCI_##size##_BAD) \
+ return PCIBIOS_BAD_REGISTER_NUMBER; \
+ \
pci_lock_config(flags); \
res = bus->ops->write(bus, devfn, pos, len, value); \
pci_unlock_config(flags); \
+ \
return res; \
}
@@ -216,24 +224,27 @@ static noinline void pci_wait_cfg(struct pci_dev *dev)
}
/* Returns 0 on success, negative values indicate error. */
-#define PCI_USER_READ_CONFIG(size, type) \
+#define PCI_USER_READ_CONFIG(size, type) \
int pci_user_read_config_##size \
(struct pci_dev *dev, int pos, type *val) \
{ \
- int ret = PCIBIOS_SUCCESSFUL; \
u32 data = -1; \
+ int ret; \
+ \
if (PCI_##size##_BAD) \
return -EINVAL; \
- raw_spin_lock_irq(&pci_lock); \
+ \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_cfg_access)) \
pci_wait_cfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
- pos, sizeof(type), &data); \
- raw_spin_unlock_irq(&pci_lock); \
+ pos, sizeof(type), &data); \
+ raw_spin_unlock_irq(&pci_lock); \
if (ret) \
PCI_SET_ERROR_RESPONSE(val); \
else \
*val = (type)data; \
+ \
return pcibios_err_to_errno(ret); \
} \
EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
@@ -243,15 +254,18 @@ EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
int pci_user_write_config_##size \
(struct pci_dev *dev, int pos, type val) \
{ \
- int ret = PCIBIOS_SUCCESSFUL; \
+ int ret; \
+ \
if (PCI_##size##_BAD) \
return -EINVAL; \
- raw_spin_lock_irq(&pci_lock); \
+ \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_cfg_access)) \
pci_wait_cfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
- pos, sizeof(type), val); \
- raw_spin_unlock_irq(&pci_lock); \
+ pos, sizeof(type), val); \
+ raw_spin_unlock_irq(&pci_lock); \
+ \
return pcibios_err_to_errno(ret); \
} \
EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 81c50dc64d..e0cc4560df 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -99,14 +99,11 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
- if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
-
if (is_64bits && is_prefetch)
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
else if (is_prefetch)
@@ -746,6 +743,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
spin_lock_init(&ep->lock);
+ pci_epc_init_notify(epc);
+
return 0;
free_epc_mem:
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 0e40667706..d2d17d37d3 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -467,6 +467,15 @@ static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
return ret;
}
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ dw_pcie_ep_init_notify(ep);
+
return 0;
}
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 99a60270b2..917c69edee 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -1123,6 +1123,16 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
dev_err(dev, "failed to initialize endpoint\n");
return ret;
}
+
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ dw_pcie_ep_init_notify(ep);
+
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 844de44187..cd0e0022f9 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -245,8 +245,68 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
.irq_unmask = ks_pcie_msi_unmask,
};
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (val & DBI_CS2);
+}
+
static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ /* Configure and set up BAR0 */
+ ks_pcie_set_dbi_mode(ks_pcie);
+
+ /* Enable BAR0 */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+
pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
}
@@ -340,59 +400,22 @@ static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-/**
- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
- * PCIe host controller driver information.
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val |= DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (!(val & DBI_CS2));
-}
-
-/**
- * ks_pcie_clear_dbi_mode() - Disable DBI mode
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
- * PCIe host controller driver information.
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val &= ~DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (val & DBI_CS2);
-}
-
-static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
u32 val;
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
- u64 start, end;
+ struct resource_entry *entry;
struct resource *mem;
+ u64 start, end;
int i;
- mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
+ entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ mem = entry->res;
start = mem->start;
end = mem->end;
@@ -403,7 +426,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
ks_pcie_clear_dbi_mode(ks_pcie);
if (ks_pcie->is_am6)
- return;
+ return 0;
val = ilog2(OB_WIN_SIZE);
ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
@@ -420,6 +443,8 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val |= OB_XLAT_EN_VAL;
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ return 0;
}
static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
@@ -445,44 +470,10 @@ static struct pci_ops ks_child_pcie_ops = {
.write = pci_generic_config_write,
};
-/**
- * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
- * @bus: A pointer to the PCI bus structure.
- *
- * This sets BAR0 to enable inbound access for MSI_IRQ register
- */
-static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
-{
- struct dw_pcie_rp *pp = bus->sysdata;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- if (!pci_is_root_bus(bus))
- return 0;
-
- /* Configure and set up BAR0 */
- ks_pcie_set_dbi_mode(ks_pcie);
-
- /* Enable BAR0 */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
-
- ks_pcie_clear_dbi_mode(ks_pcie);
-
- /*
- * For BAR0, just setting bus address for inbound writes (MSI) should
- * be sufficient. Use physical address to avoid any conflicts.
- */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
-
- return 0;
-}
-
static struct pci_ops ks_pcie_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
- .add_bus = ks_pcie_v3_65_add_bus,
};
/**
@@ -814,7 +805,10 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
return ret;
ks_pcie_stop_link(pci);
- ks_pcie_setup_rc_app_regs(ks_pcie);
+ ret = ks_pcie_setup_rc_app_regs(ks_pcie);
+ if (ret)
+ return ret;
+
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
@@ -1286,6 +1280,15 @@ static int ks_pcie_probe(struct platform_device *pdev)
ret = dw_pcie_ep_init(&pci->ep);
if (ret < 0)
goto err_get_sync;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ goto err_ep_init;
+ }
+
+ dw_pcie_ep_init_notify(&pci->ep);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", mode);
@@ -1295,6 +1298,8 @@ static int ks_pcie_probe(struct platform_device *pdev)
return 0;
+err_ep_init:
+ dw_pcie_ep_deinit(&pci->ep);
err_get_sync:
pm_runtime_put(dev);
pm_runtime_disable(dev);
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 1f6ee1460e..7dde6d5fa4 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -279,6 +279,15 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ dw_pcie_ep_init_notify(&pci->ep);
+
return ls_pcie_ep_interrupt_init(pcie, pdev);
}
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 9ed0a9ba76..a4630b9248 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -441,7 +441,20 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ dw_pcie_ep_init_notify(&pci->ep);
+
+ break;
default:
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index c43a1479de..769e848246 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -15,6 +15,10 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+/**
+ * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
+ * @ep: DWC EP device
+ */
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
@@ -23,6 +27,10 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+/**
+ * dw_pcie_ep_init_notify - Notify EPF drivers about EPC initialization complete
+ * @ep: DWC EP device
+ */
void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
@@ -31,6 +39,14 @@ void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
+/**
+ * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
+ * the endpoint function
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ *
+ * Return: struct dw_pcie_ep_func if success, NULL otherwise.
+ */
struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
@@ -61,6 +77,11 @@ static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
dw_pcie_dbi_ro_wr_dis(pci);
}
+/**
+ * dw_pcie_ep_reset_bar - Reset endpoint BAR
+ * @pci: DWC PCI device
+ * @bar: BAR number of the endpoint
+ */
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
u8 func_no, funcs;
@@ -140,7 +161,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
if (!ep->bar_to_atu[bar])
free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
else
- free_win = ep->bar_to_atu[bar];
+ free_win = ep->bar_to_atu[bar] - 1;
if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
@@ -154,7 +175,11 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
return ret;
}
- ep->bar_to_atu[bar] = free_win;
+ /*
+ * Always increment free_win before assignment, since value 0 is used to identify
+ * unallocated mapping.
+ */
+ ep->bar_to_atu[bar] = free_win + 1;
set_bit(free_win, ep->ib_window_map);
return 0;
@@ -191,7 +216,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
- u32 atu_index = ep->bar_to_atu[bar];
+ u32 atu_index = ep->bar_to_atu[bar] - 1;
+
+ if (!ep->bar_to_atu[bar])
+ return;
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
@@ -440,6 +468,13 @@ static const struct pci_epc_ops epc_ops = {
.get_features = dw_pcie_ep_get_features,
};
+/**
+ * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ *
+ * Return: 0 if success, errono otherwise.
+ */
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -451,6 +486,14 @@ int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
+/**
+ * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errono otherwise.
+ */
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
@@ -500,6 +543,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
+/**
+ * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell
+ * method
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
@@ -519,6 +571,14 @@ int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+/**
+ * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
@@ -566,22 +626,42 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset
+ * @ep: DWC EP device
+ *
+ * Cleans up the DWC EP specific resources like eDMA etc... after fundamental
+ * reset like PERST#. Note that this API is only applicable for drivers
+ * supporting PERST# or any other methods of fundamental reset.
+ */
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct pci_epc *epc = ep->epc;
dw_pcie_edma_remove(pci);
+ ep->epc->init_complete = false;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
+
+/**
+ * dw_pcie_ep_deinit - Deinitialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Deinitialize the endpoint device. EPC device is not destroyed since that will
+ * be taken care by Devres.
+ */
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ dw_pcie_ep_cleanup(ep);
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
pci_epc_mem_exit(epc);
-
- if (ep->ops->deinit)
- ep->ops->deinit(ep);
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_exit);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
{
@@ -601,7 +681,15 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
return 0;
}
-int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
+ * @ep: DWC EP device
+ *
+ * Initialize the registers (CSRs) specific to DWC EP. This API should be called
+ * only when the endpoint receives an active refclk (either from host or
+ * generated locally).
+ */
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dw_pcie_ep_func *ep_func;
@@ -721,8 +809,17 @@ err_remove_edma:
return ret;
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
+/**
+ * dw_pcie_ep_init - Initialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Initialize the endpoint device. Allocate resources and create the EPC
+ * device with the endpoint framework.
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
int ret;
@@ -732,7 +829,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
- const struct pci_epc_features *epc_features;
INIT_LIST_HEAD(&ep->func_list);
@@ -767,7 +863,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->page_size);
if (ret < 0) {
dev_err(dev, "Failed to initialize address space\n");
- goto err_ep_deinit;
+ return ret;
}
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
@@ -778,36 +874,11 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
goto err_exit_epc_mem;
}
- if (ep->ops->get_features) {
- epc_features = ep->ops->get_features(ep);
- if (epc_features->core_init_notifier)
- return 0;
- }
-
- /*
- * NOTE:- Avoid accessing the hardware (Ex:- DBI space) before this
- * step as platforms that implement 'core_init_notifier' feature may
- * not have the hardware ready (i.e. core initialized) for access
- * (Ex: tegra194). Any hardware access on such platforms result
- * in system hang.
- */
- ret = dw_pcie_ep_init_complete(ep);
- if (ret)
- goto err_free_epc_mem;
-
return 0;
-err_free_epc_mem:
- pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
- epc->mem->window.page_size);
-
err_exit_epc_mem:
pci_epc_mem_exit(epc);
-err_ep_deinit:
- if (ep->ops->deinit)
- ep->ops->deinit(ep);
-
return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 778588b4be..8490c5d6ff 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -145,6 +145,17 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ }
+
+ dw_pcie_ep_init_notify(&pci->ep);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 26dae48374..f8e5431a20 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -333,7 +333,6 @@ struct dw_pcie_rp {
struct dw_pcie_ep_ops {
void (*pre_init)(struct dw_pcie_ep *ep);
void (*init)(struct dw_pcie_ep *ep);
- void (*deinit)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
unsigned int type, u16 interrupt_num);
const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
@@ -670,9 +669,10 @@ static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
#ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
-int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep);
void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep);
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep);
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
@@ -693,7 +693,7 @@ static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return 0;
}
-static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
return 0;
}
@@ -702,7 +702,11 @@ static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
{
}
-static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
+{
+}
+
+static inline void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
}
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index d6842141d3..a909e42b42 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -240,7 +240,7 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
return PTR_ERR(rockchip->apb_base);
rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_HIGH);
+ GPIOD_OUT_LOW);
if (IS_ERR(rockchip->rst_gpio))
return PTR_ERR(rockchip->rst_gpio);
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 5e8e54f597..98bbc83182 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -396,6 +396,7 @@ static int keembay_pcie_probe(struct platform_device *pdev)
struct keembay_pcie *pcie;
struct dw_pcie *pci;
enum dw_pcie_device_mode mode;
+ int ret;
data = device_get_match_data(dev);
if (!data)
@@ -430,11 +431,26 @@ static int keembay_pcie_probe(struct platform_device *pdev)
return -ENODEV;
pci->ep.ops = &keembay_pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ dw_pcie_ep_init_notify(&pci->ep);
+
+ break;
default:
dev_err(dev, "Invalid device type %d\n", pcie->mode);
return -ENODEV;
}
+
+ return 0;
}
static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 36e5e80cd2..50b1635e3c 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -463,7 +463,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA;
writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
- ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
+ ret = dw_pcie_ep_init_registers(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
goto err_disable_resources;
@@ -500,13 +500,8 @@ err_disable_resources:
static void qcom_pcie_perst_assert(struct dw_pcie *pci)
{
struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
- struct device *dev = pci->dev;
-
- if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
- dev_dbg(dev, "Link is already disabled\n");
- return;
- }
+ dw_pcie_ep_cleanup(&pci->ep);
qcom_pcie_disable_resources(pcie_ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
}
@@ -774,7 +769,6 @@ static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
static const struct pci_epc_features qcom_pcie_epc_features = {
.linkup_notifier = true,
- .core_init_notifier = true,
.msi_capable = true,
.msix_capable = false,
.align = SZ_4K,
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index 0be760ed42..cfeccc2f9e 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -352,11 +352,8 @@ static void rcar_gen4_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static void rcar_gen4_pcie_ep_deinit(struct dw_pcie_ep *ep)
+static void rcar_gen4_pcie_ep_deinit(struct rcar_gen4_pcie *rcar)
{
- struct dw_pcie *dw = to_dw_pcie_from_ep(ep);
- struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
-
writel(0, rcar->base + PCIEDMAINTSTSEN);
rcar_gen4_pcie_common_deinit(rcar);
}
@@ -410,7 +407,6 @@ static unsigned int rcar_gen4_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.pre_init = rcar_gen4_pcie_ep_pre_init,
.init = rcar_gen4_pcie_ep_init,
- .deinit = rcar_gen4_pcie_ep_deinit,
.raise_irq = rcar_gen4_pcie_ep_raise_irq,
.get_features = rcar_gen4_pcie_ep_get_features,
.get_dbi_offset = rcar_gen4_pcie_ep_get_dbi_offset,
@@ -420,18 +416,36 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
{
struct dw_pcie_ep *ep = &rcar->dw.ep;
+ struct device *dev = rcar->dw.dev;
+ int ret;
if (!IS_ENABLED(CONFIG_PCIE_RCAR_GEN4_EP))
return -ENODEV;
ep->ops = &pcie_ep_ops;
- return dw_pcie_ep_init(ep);
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ rcar_gen4_pcie_ep_deinit(rcar);
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
+ }
+
+ dw_pcie_ep_init_notify(ep);
+
+ return ret;
}
static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
{
- dw_pcie_ep_exit(&rcar->dw.ep);
+ dw_pcie_ep_deinit(&rcar->dw.ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
}
/* Common */
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index e440c09d1d..4537313ef3 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -1715,6 +1715,8 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
if (ret)
dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
+ dw_pcie_ep_cleanup(&pcie->pci.ep);
+
reset_control_assert(pcie->core_rst);
tegra_pcie_disable_phy(pcie);
@@ -1895,7 +1897,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
- ret = dw_pcie_ep_init_complete(ep);
+ ret = dw_pcie_ep_init_registers(ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
goto fail_init_complete;
@@ -2004,7 +2006,6 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features tegra_pcie_epc_features = {
.linkup_notifier = true,
- .core_init_notifier = true,
.msi_capable = false,
.msix_capable = false,
.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
@@ -2014,6 +2015,7 @@ static const struct pci_epc_features tegra_pcie_epc_features = {
.bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_4] = { .type = BAR_RESERVED, },
.bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
};
static const struct pci_epc_features*
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index 639bc2e124..a2b844268e 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -399,7 +399,20 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
return ret;
priv->pci.ep.ops = &uniphier_pcie_ep_ops;
- return dw_pcie_ep_init(&priv->pci.ep);
+ ret = dw_pcie_ep_init(&priv->pci.ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&priv->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&priv->pci.ep);
+ return ret;
+ }
+
+ dw_pcie_ep_init_notify(&priv->pci.ep);
+
+ return 0;
}
static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 5992280e81..cdd5be1602 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1130,8 +1130,8 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
PCI_CAPABILITY_LIST) {
/* ROM BARs are unimplemented */
*val = 0;
- } else if (where >= PCI_INTERRUPT_LINE && where + size <=
- PCI_INTERRUPT_PIN) {
+ } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
+ (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
/*
* Interrupt Line and Interrupt PIN are hard-wired to zero
* because this front-end only supports message-signaled
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index 8b34ccff07..bc630ab8a2 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -163,6 +163,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_HDMI, loongson_pci_pin_quirk);
+static void loongson_pci_msi_quirk(struct pci_dev *dev)
+{
+ u16 val, class = dev->class >> 8;
+
+ if (class != PCI_CLASS_BRIDGE_HOST)
+ return;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &val);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk);
+
static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
{
struct pci_config_window *cfg;
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index 79e225edb4..d97b956e6e 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -202,7 +202,7 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
struct mt7621_pcie_port *port;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- char name[10];
+ char name[11];
int err;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index 05967c6c0b..047e2cef5a 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -542,6 +542,8 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
goto err_pm_put;
}
+ pci_epc_init_notify(epc);
+
return 0;
err_pm_put:
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 996077ab7c..c01efc6ea6 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -78,7 +78,11 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
writel(L1IATN, pcie_base + PMCTLR);
ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
val & L1FAEG, 10, 1000);
- WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+ if (ret) {
+ dev_warn_ratelimited(pcie_dev,
+ "Timeout waiting for L1 link state, ret=%d\n",
+ ret);
+ }
writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
}
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 79c3e7e9a2..1362745336 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -151,7 +151,7 @@ static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
@@ -607,6 +607,8 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE,
PCIE_CLIENT_CONFIG);
+ pci_epc_init_notify(epc);
+
return 0;
err_epc_mem_exit:
pci_epc_mem_exit(epc);
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
index 0ef2e622d3..c07d7129f1 100644
--- a/drivers/pci/controller/pcie-rockchip.c
+++ b/drivers/pci/controller/pcie-rockchip.c
@@ -121,7 +121,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
if (rockchip->is_rc) {
rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
- GPIOD_OUT_HIGH);
+ GPIOD_OUT_LOW);
if (IS_ERR(rockchip->ep_gpio))
return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
"failed to get ep GPIO\n");
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
index e3aab5edaf..652d63df9d 100644
--- a/drivers/pci/doe.c
+++ b/drivers/pci/doe.c
@@ -383,11 +383,13 @@ static void pci_doe_task_complete(struct pci_doe_task *task)
complete(task->private);
}
-static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
+static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u16 *vid,
u8 *protocol)
{
u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
- *index);
+ *index) |
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_VER,
+ (capver >= 2) ? 2 : 0);
__le32 request_pl_le = cpu_to_le32(request_pl);
__le32 response_pl_le;
u32 response_pl;
@@ -421,13 +423,17 @@ static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
{
u8 index = 0;
u8 xa_idx = 0;
+ u32 hdr = 0;
+
+ pci_read_config_dword(doe_mb->pdev, doe_mb->cap_offset, &hdr);
do {
int rc;
u16 vid;
u8 prot;
- rc = pci_doe_discovery(doe_mb, &index, &vid, &prot);
+ rc = pci_doe_discovery(doe_mb, PCI_EXT_CAP_VER(hdr), &index,
+ &vid, &prot);
if (rc)
return rc;
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index cd4ffb39dc..546d2a2795 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -690,50 +690,35 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct pci_epc *epc = epf->epc;
- struct pci_epf_bar *epf_bar;
int bar;
cancel_delayed_work(&epf_test->cmd_handler);
pci_epf_test_clean_dma_chan(epf_test);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
- epf_bar = &epf->bar[bar];
+ if (!epf_test->reg[bar])
+ continue;
- if (epf_test->reg[bar]) {
- pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
- epf_bar);
- pci_epf_free_space(epf, epf_test->reg[bar], bar,
- PRIMARY_INTERFACE);
- }
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
+ &epf->bar[bar]);
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
}
}
static int pci_epf_test_set_bar(struct pci_epf *epf)
{
- int bar, add;
- int ret;
- struct pci_epf_bar *epf_bar;
+ int bar, ret;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
- const struct pci_epc_features *epc_features;
-
- epc_features = epf_test->epc_features;
-
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
- epf_bar = &epf->bar[bar];
- /*
- * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
- * if the specific implementation required a 64-bit BAR,
- * even if we only requested a 32-bit BAR.
- */
- add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
- if (epc_features->bar[bar].type == BAR_RESERVED)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
continue;
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
- epf_bar);
+ &epf->bar[bar]);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
@@ -750,19 +735,12 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct pci_epf_header *header = epf->header;
- const struct pci_epc_features *epc_features;
+ const struct pci_epc_features *epc_features = epf_test->epc_features;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
- bool msix_capable = false;
- bool msi_capable = true;
+ bool linkup_notifier = false;
int ret;
- epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
- if (epc_features) {
- msix_capable = epc_features->msix_capable;
- msi_capable = epc_features->msi_capable;
- }
-
if (epf->vfunc_no <= 1) {
ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
if (ret) {
@@ -775,7 +753,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
if (ret)
return ret;
- if (msi_capable) {
+ if (epc_features->msi_capable) {
ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
epf->msi_interrupts);
if (ret) {
@@ -784,7 +762,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
- if (msix_capable) {
+ if (epc_features->msix_capable) {
ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
epf->msix_interrupts,
epf_test->test_reg_bar,
@@ -795,6 +773,10 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
+ linkup_notifier = epc_features->linkup_notifier;
+ if (!linkup_notifier)
+ queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+
return 0;
}
@@ -817,14 +799,13 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct device *dev = &epf->dev;
- struct pci_epf_bar *epf_bar;
size_t msix_table_size = 0;
size_t test_reg_bar_size;
size_t pba_size = 0;
bool msix_capable;
void *base;
- int bar, add;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ enum pci_barno bar;
const struct pci_epc_features *epc_features;
size_t test_reg_size;
@@ -849,16 +830,14 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
epf_test->reg[test_reg_bar] = base;
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
- epf_bar = &epf->bar[bar];
- add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
+ for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
+ bar = pci_epc_get_next_free_bar(epc_features, bar);
+ if (bar == NO_BAR)
+ break;
if (bar == test_reg_bar)
continue;
- if (epc_features->bar[bar].type == BAR_RESERVED)
- continue;
-
base = pci_epf_alloc_space(epf, bar_size[bar], bar,
epc_features, PRIMARY_INTERFACE);
if (!base)
@@ -870,19 +849,6 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
return 0;
}
-static void pci_epf_configure_bar(struct pci_epf *epf,
- const struct pci_epc_features *epc_features)
-{
- struct pci_epf_bar *epf_bar;
- int i;
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- epf_bar = &epf->bar[i];
- if (epc_features->bar[i].only_64bit)
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
- }
-}
-
static int pci_epf_test_bind(struct pci_epf *epf)
{
int ret;
@@ -890,8 +856,6 @@ static int pci_epf_test_bind(struct pci_epf *epf)
const struct pci_epc_features *epc_features;
enum pci_barno test_reg_bar = BAR_0;
struct pci_epc *epc = epf->epc;
- bool linkup_notifier = false;
- bool core_init_notifier = false;
if (WARN_ON_ONCE(!epc))
return -EINVAL;
@@ -902,12 +866,9 @@ static int pci_epf_test_bind(struct pci_epf *epf)
return -EOPNOTSUPP;
}
- linkup_notifier = epc_features->linkup_notifier;
- core_init_notifier = epc_features->core_init_notifier;
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
if (test_reg_bar < 0)
return -EINVAL;
- pci_epf_configure_bar(epf, epc_features);
epf_test->test_reg_bar = test_reg_bar;
epf_test->epc_features = epc_features;
@@ -916,21 +877,12 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (ret)
return ret;
- if (!core_init_notifier) {
- ret = pci_epf_test_core_init(epf);
- if (ret)
- return ret;
- }
-
epf_test->dma_supported = true;
ret = pci_epf_test_init_dma_chan(epf_test);
if (ret)
epf_test->dma_supported = false;
- if (!linkup_notifier && !core_init_notifier)
- queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
-
return 0;
}
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 8e779eecd6..874cb097b0 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -799,8 +799,9 @@ err_config_interrupt:
*/
static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
{
- epf_ntb_db_bar_clear(ntb);
epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+ epf_ntb_db_bar_clear(ntb);
+ epf_ntb_config_sspad_bar_clear(ntb);
}
#define EPF_NTB_R(_name) \
@@ -1018,8 +1019,10 @@ static int vpci_scan_bus(void *sysdata)
struct epf_ntb *ndev = sysdata;
vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
- if (vpci_bus)
- pr_err("create pci bus\n");
+ if (!vpci_bus) {
+ pr_err("create pci bus failed\n");
+ return -EINVAL;
+ }
pci_bus_add_devices(vpci_bus);
@@ -1335,13 +1338,19 @@ static int epf_ntb_bind(struct pci_epf *epf)
ret = pci_register_driver(&vntb_pci_driver);
if (ret) {
dev_err(dev, "failure register vntb pci driver\n");
- goto err_bar_alloc;
+ goto err_epc_cleanup;
}
- vpci_scan_bus(ntb);
+ ret = vpci_scan_bus(ntb);
+ if (ret)
+ goto err_unregister;
return 0;
+err_unregister:
+ pci_unregister_driver(&vntb_pci_driver);
+err_epc_cleanup:
+ epf_ntb_epc_cleanup(ntb);
err_bar_alloc:
epf_ntb_config_spad_bar_free(ntb);
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 0ea64e24ed..3b21e28f9b 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -64,6 +64,9 @@ static int pci_secondary_epc_epf_link(struct config_item *epf_item,
return ret;
}
+ /* Send any pending EPC initialization complete to the EPF driver */
+ pci_epc_notify_pending_init(epc, epf);
+
return 0;
}
@@ -125,6 +128,9 @@ static int pci_primary_epc_epf_link(struct config_item *epf_item,
return ret;
}
+ /* Send any pending EPC initialization complete to the EPF driver */
+ pci_epc_notify_pending_init(epc, epf);
+
return 0;
}
@@ -230,6 +236,9 @@ static int pci_epc_epf_link(struct config_item *epc_item,
return ret;
}
+ /* Send any pending EPC initialization complete to the EPF driver */
+ pci_epc_notify_pending_init(epc, epf);
+
return 0;
}
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index da3fc0795b..47d27ec743 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -748,11 +748,33 @@ void pci_epc_init_notify(struct pci_epc *epc)
epf->event_ops->core_init(epf);
mutex_unlock(&epf->lock);
}
+ epc->init_complete = true;
mutex_unlock(&epc->list_lock);
}
EXPORT_SYMBOL_GPL(pci_epc_init_notify);
/**
+ * pci_epc_notify_pending_init() - Notify the pending EPC device initialization
+ * complete to the EPF device
+ * @epc: the EPC device whose core initialization is pending to be notified
+ * @epf: the EPF device to be notified
+ *
+ * Invoke to notify the pending EPC device initialization complete to the EPF
+ * device. This is used to deliver the notification if the EPC initialization
+ * got completed before the EPF driver bind.
+ */
+void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
+{
+ if (epc->init_complete) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->core_init)
+ epf->event_ops->core_init(epf);
+ mutex_unlock(&epf->lock);
+ }
+}
+EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
+
+/**
* pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
* the BME event from the Root complex
* @epc: the EPC device that received the BME event
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 0a28a0b091..323f2a60ab 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -255,6 +255,8 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* @type: Identifies if the allocation is for primary EPC or secondary EPC
*
* Invoke to allocate memory for the PCI EPF register space.
+ * Flag PCI_BASE_ADDRESS_MEM_TYPE_64 will automatically get set if the BAR
+ * can only be a 64-bit BAR, or if the requested size is larger than 2 GB.
*/
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
const struct pci_epc_features *epc_features,
@@ -304,9 +306,10 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
epf_bar[bar].addr = space;
epf_bar[bar].size = size;
epf_bar[bar].barno = bar;
- epf_bar[bar].flags |= upper_32_bits(size) ?
- PCI_BASE_ADDRESS_MEM_TYPE_64 :
- PCI_BASE_ADDRESS_MEM_TYPE_32;
+ if (upper_32_bits(size) || epc_features->bar[bar].only_64bit)
+ epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+ else
+ epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_32;
return space;
}
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index fdb8dd6ea2..9d428b0ea5 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -6,6 +6,8 @@ cpcihp:
->set_power callbacks in struct cpci_hp_controller_ops. Why were they
introduced? Can they be removed from the struct?
+* Returned code from pci_hp_add_bridge() is not checked.
+
cpqphp:
* The driver spawns a kthread cpqhp_event_thread() which is woken by the
@@ -16,6 +18,8 @@ cpqphp:
* A large portion of cpqphp_ctrl.c and cpqphp_pci.c concerns resource
management. Doesn't this duplicate functionality in the core?
+* Returned code from pci_hp_add_bridge() is not checked.
+
ibmphp:
* Implementations of hotplug_slot_ops callbacks such as get_adapter_present()
@@ -43,13 +47,7 @@ ibmphp:
* A large portion of ibmphp_res.c and ibmphp_pci.c concerns resource
management. Doesn't this duplicate functionality in the core?
-sgi_hotplug:
-
-* Several functions access the pci_slot member in struct hotplug_slot even
- though pci_hotplug.h declares it private. See sn_hp_destroy() for an
- example. Either the pci_slot member should no longer be declared private
- or sgi_hotplug should store a pointer to it in struct slot. Probably the
- former.
+* Returned code from pci_hp_add_bridge() is not checked.
shpchp:
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index be679aa5db..b956ce591f 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -213,8 +213,8 @@ EXPORT_SYMBOL(pci_disable_msix);
* * %PCI_IRQ_MSIX Allow trying MSI-X vector allocations
* * %PCI_IRQ_MSI Allow trying MSI vector allocations
*
- * * %PCI_IRQ_LEGACY Allow trying legacy INTx interrupts, if
- * and only if @min_vecs == 1
+ * * %PCI_IRQ_INTX Allow trying INTx interrupts, if and
+ * only if @min_vecs == 1
*
* * %PCI_IRQ_AFFINITY Auto-manage IRQs affinity by spreading
* the vectors around available CPUs
@@ -279,8 +279,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
return nvecs;
}
- /* use legacy IRQ if allowed */
- if (flags & PCI_IRQ_LEGACY) {
+ /* use INTx IRQ if allowed */
+ if (flags & PCI_IRQ_INTX) {
if (min_vecs == 1 && dev->irq) {
/*
* Invoke the affinity spreading logic to ensure that
@@ -366,56 +366,6 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
EXPORT_SYMBOL(pci_irq_get_affinity);
/**
- * pci_ims_alloc_irq - Allocate an interrupt on a PCI/IMS interrupt domain
- * @dev: The PCI device to operate on
- * @icookie: Pointer to an IMS implementation specific cookie for this
- * IMS instance (PASID, queue ID, pointer...).
- * The cookie content is copied into the MSI descriptor for the
- * interrupt chip callbacks or domain specific setup functions.
- * @affdesc: Optional pointer to an interrupt affinity descriptor
- *
- * There is no index for IMS allocations as IMS is an implementation
- * specific storage and does not have any direct associations between
- * index, which might be a pure software construct, and device
- * functionality. This association is established by the driver either via
- * the index - if there is a hardware table - or in case of purely software
- * managed IMS implementation the association happens via the
- * irq_write_msi_msg() callback of the implementation specific interrupt
- * chip, which utilizes the provided @icookie to store the MSI message in
- * the appropriate place.
- *
- * Return: A struct msi_map
- *
- * On success msi_map::index contains the allocated index (>= 0) and
- * msi_map::virq the allocated Linux interrupt number (> 0).
- *
- * On fail msi_map::index contains the error code and msi_map::virq
- * is set to 0.
- */
-struct msi_map pci_ims_alloc_irq(struct pci_dev *dev, union msi_instance_cookie *icookie,
- const struct irq_affinity_desc *affdesc)
-{
- return msi_domain_alloc_irq_at(&dev->dev, MSI_SECONDARY_DOMAIN, MSI_ANY_INDEX,
- affdesc, icookie);
-}
-EXPORT_SYMBOL_GPL(pci_ims_alloc_irq);
-
-/**
- * pci_ims_free_irq - Allocate an interrupt on a PCI/IMS interrupt domain
- * which was allocated via pci_ims_alloc_irq()
- * @dev: The PCI device to operate on
- * @map: A struct msi_map describing the interrupt to free as
- * returned from pci_ims_alloc_irq()
- */
-void pci_ims_free_irq(struct pci_dev *dev, struct msi_map map)
-{
- if (WARN_ON_ONCE(map.index < 0 || map.virq <= 0))
- return;
- msi_domain_free_irqs_range(&dev->dev, MSI_SECONDARY_DOMAIN, map.index, map.index);
-}
-EXPORT_SYMBOL_GPL(pci_ims_free_irq);
-
-/**
* pci_free_irq_vectors() - Free previously allocated IRQs for a device
* @dev: the PCI device to operate on
*
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index cfd84a899c..03d2dd2579 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -355,65 +355,6 @@ bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask,
return (supported & feature_mask) == feature_mask;
}
-/**
- * pci_create_ims_domain - Create a secondary IMS domain for a PCI device
- * @pdev: The PCI device to operate on
- * @template: The MSI info template which describes the domain
- * @hwsize: The size of the hardware entry table or 0 if the domain
- * is purely software managed
- * @data: Optional pointer to domain specific data to be stored
- * in msi_domain_info::data
- *
- * Return: True on success, false otherwise
- *
- * An IMS domain is expected to have the following constraints:
- * - The index space is managed by the core code
- *
- * - There is no requirement for consecutive index ranges
- *
- * - The interrupt chip must provide the following callbacks:
- * - irq_mask()
- * - irq_unmask()
- * - irq_write_msi_msg()
- *
- * - The interrupt chip must provide the following optional callbacks
- * when the irq_mask(), irq_unmask() and irq_write_msi_msg() callbacks
- * cannot operate directly on hardware, e.g. in the case that the
- * interrupt message store is in queue memory:
- * - irq_bus_lock()
- * - irq_bus_unlock()
- *
- * These callbacks are invoked from preemptible task context and are
- * allowed to sleep. In this case the mandatory callbacks above just
- * store the information. The irq_bus_unlock() callback is supposed
- * to make the change effective before returning.
- *
- * - Interrupt affinity setting is handled by the underlying parent
- * interrupt domain and communicated to the IMS domain via
- * irq_write_msi_msg().
- *
- * The domain is automatically destroyed when the PCI device is removed.
- */
-bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
- unsigned int hwsize, void *data)
-{
- struct irq_domain *domain = dev_get_msi_domain(&pdev->dev);
-
- if (!domain || !irq_domain_is_msi_parent(domain))
- return false;
-
- if (template->info.bus_token != DOMAIN_BUS_PCI_DEVICE_IMS ||
- !(template->info.flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS) ||
- !(template->info.flags & MSI_FLAG_FREE_MSI_DESCS) ||
- !template->chip.irq_mask || !template->chip.irq_unmask ||
- !template->chip.irq_write_msi_msg || template->chip.irq_set_affinity)
- return false;
-
- return msi_create_device_irq_domain(&pdev->dev, MSI_SECONDARY_DOMAIN, template,
- hwsize, data, NULL);
-}
-EXPORT_SYMBOL_GPL(pci_create_ims_domain);
-
/*
* Users of the generic MSI infrastructure expect a device to have a single ID,
* so with DMA aliases we have to pick the least-worst compromise. Devices with
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index d9d3f57c6f..3a45879d85 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -86,9 +86,11 @@ static int pcim_setup_msi_release(struct pci_dev *dev)
return 0;
ret = devm_add_action(&dev->dev, pcim_msi_release, dev);
- if (!ret)
- dev->is_msi_managed = true;
- return ret;
+ if (ret)
+ return ret;
+
+ dev->is_msi_managed = true;
+ return 0;
}
/*
@@ -99,9 +101,10 @@ static int pci_setup_msi_context(struct pci_dev *dev)
{
int ret = msi_setup_device_data(&dev->dev);
- if (!ret)
- ret = pcim_setup_msi_release(dev);
- return ret;
+ if (ret)
+ return ret;
+
+ return pcim_setup_msi_release(dev);
}
/*
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index cbbf197df8..dff09e4892 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -142,8 +142,8 @@ enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
* the dfl or actual value as it sees fit. Don't forget this is
* measured in 32-bit words, not bytes.
*/
-u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
-u8 pci_cache_line_size;
+u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2;
+u8 pci_cache_line_size __ro_after_init ;
/*
* If we set up a device for bus mastering, we need to check the latency
@@ -2116,20 +2116,6 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
}
/**
- * pci_enable_device_io - Initialize a device for use with IO space
- * @dev: PCI device to be initialized
- *
- * Initialize device before it's used by a driver. Ask low-level code
- * to enable I/O resources. Wake up the device if it was suspended.
- * Beware, this function can fail.
- */
-int pci_enable_device_io(struct pci_dev *dev)
-{
- return pci_enable_device_flags(dev, IORESOURCE_IO);
-}
-EXPORT_SYMBOL(pci_enable_device_io);
-
-/**
* pci_enable_device_mem - Initialize a device for use with Memory space
* @dev: PCI device to be initialized
*
@@ -4642,9 +4628,10 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
/*
* Ensure the updated LNKCTL parameters are used during link
- * training by checking that there is no ongoing link training to
- * avoid LTSSM race as recommended in Implementation Note at the
- * end of PCIe r6.0.1 sec 7.5.3.7.
+ * training by checking that there is no ongoing link training that
+ * may have started before link parameters were changed, so as to
+ * avoid LTSSM race as recommended in Implementation Note at the end
+ * of PCIe r6.1 sec 7.5.3.7.
*/
rc = pcie_wait_for_link_status(pdev, true, false);
if (rc)
@@ -4766,7 +4753,7 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
*/
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
{
- struct pci_dev *child;
+ struct pci_dev *child __free(pci_dev_put) = NULL;
int delay;
if (pci_dev_is_disconnected(dev))
@@ -4795,8 +4782,8 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
return 0;
}
- child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
- bus_list);
+ child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
+ struct pci_dev, bus_list));
up_read(&pci_bus_sem);
/*
@@ -4944,16 +4931,96 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
}
+static u16 cxl_port_dvsec(struct pci_dev *dev)
+{
+ return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
+ PCI_DVSEC_CXL_PORT);
+}
+
+static bool cxl_sbr_masked(struct pci_dev *dev)
+{
+ u16 dvsec, reg;
+ int rc;
+
+ dvsec = cxl_port_dvsec(dev);
+ if (!dvsec)
+ return false;
+
+ rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
+ if (rc || PCI_POSSIBLE_ERROR(reg))
+ return false;
+
+ /*
+ * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR
+ * bit in Bridge Control has no effect. When 1, the Port generates
+ * hot reset when the SBR bit is set to 1.
+ */
+ if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR)
+ return false;
+
+ return true;
+}
+
static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
{
+ struct pci_dev *bridge = pci_upstream_bridge(dev);
int rc;
+ /*
+ * If "dev" is below a CXL port that has SBR control masked, SBR
+ * won't do anything, so return error.
+ */
+ if (bridge && cxl_sbr_masked(bridge)) {
+ if (probe)
+ return 0;
+
+ return -ENOTTY;
+ }
+
rc = pci_dev_reset_slot_function(dev, probe);
if (rc != -ENOTTY)
return rc;
return pci_parent_bus_reset(dev, probe);
}
+static int cxl_reset_bus_function(struct pci_dev *dev, bool probe)
+{
+ struct pci_dev *bridge;
+ u16 dvsec, reg, val;
+ int rc;
+
+ bridge = pci_upstream_bridge(dev);
+ if (!bridge)
+ return -ENOTTY;
+
+ dvsec = cxl_port_dvsec(bridge);
+ if (!dvsec)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
+ if (rc)
+ return -ENOTTY;
+
+ if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) {
+ val = reg;
+ } else {
+ val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR;
+ pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
+ val);
+ }
+
+ rc = pci_reset_bus_function(dev, probe);
+
+ if (reg != val)
+ pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
+ reg);
+
+ return rc;
+}
+
void pci_dev_lock(struct pci_dev *dev)
{
/* block PM suspend, driver probe, etc. */
@@ -5038,6 +5105,7 @@ static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
{ pci_af_flr, .name = "af_flr" },
{ pci_pm_reset, .name = "pm" },
{ pci_reset_bus_function, .name = "bus" },
+ { cxl_reset_bus_function, .name = "cxl_bus" },
};
static ssize_t reset_method_show(struct device *dev,
@@ -5262,11 +5330,20 @@ void pci_init_reset_methods(struct pci_dev *dev)
*/
int pci_reset_function(struct pci_dev *dev)
{
+ struct pci_dev *bridge;
int rc;
if (!pci_reset_supported(dev))
return -ENOTTY;
+ /*
+ * If there's no upstream bridge, no locking is needed since there is
+ * no upstream bridge configuration to hold consistent.
+ */
+ bridge = pci_upstream_bridge(dev);
+ if (bridge)
+ pci_dev_lock(bridge);
+
pci_dev_lock(dev);
pci_dev_save_and_disable(dev);
@@ -5275,6 +5352,9 @@ int pci_reset_function(struct pci_dev *dev)
pci_dev_restore(dev);
pci_dev_unlock(dev);
+ if (bridge)
+ pci_dev_unlock(bridge);
+
return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
@@ -6082,8 +6162,9 @@ EXPORT_SYMBOL(pcie_get_width_cap);
* and width, multiplying them, and applying encoding overhead. The result
* is in Mb/s, i.e., megabits/second of raw bandwidth.
*/
-u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width)
+static u32 pcie_bandwidth_capable(struct pci_dev *dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
{
*speed = pcie_get_speed_cap(dev);
*width = pcie_get_width_cap(dev);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 17fed18468..fd44565c47 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -293,8 +293,6 @@ void pci_bus_put(struct pci_bus *bus);
const char *pci_speed_string(enum pci_bus_speed speed);
enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
-u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width);
void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
void pcie_report_downtraining(struct pci_dev *dev);
void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 8999fcebde..17919b99fa 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -47,7 +47,7 @@ config PCIEAER_INJECT
error injection can fake almost all kinds of errors with the
help of a user space helper tool aer-inject, which can be
gotten from:
- https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
+ https://github.com/intel/aer-inject.git
config PCIEAER_CXL
bool "PCI Express CXL RAS support"
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 2dab275d25..f81b2303bf 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -6,7 +6,7 @@
* trigger various real hardware errors. Software based error
* injection can fake almost all kinds of errors with the help of a
* user space helper tool aer-inject, which can be gotten from:
- * https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
+ * https://github.com/intel/aer-inject.git
*
* Copyright 2009 Intel Corporation.
* Huang Ying <ying.huang@intel.com>
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 47761c7ef2..cee2365e54 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -8,6 +8,8 @@
*/
#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/build_bug.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/math.h>
@@ -189,21 +191,18 @@ void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
-/* Note: those are not register definitions */
-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
-#define ASPM_STATE_L1 (4) /* L1 state */
-#define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */
-#define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */
-#define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */
-#define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */
-#define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM)
-#define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM)
-#define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\
- ASPM_STATE_L1_2_MASK)
-#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
-#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
- ASPM_STATE_L1SS)
+/* Note: these are not register definitions */
+#define PCIE_LINK_STATE_L0S_UP BIT(0) /* Upstream direction L0s state */
+#define PCIE_LINK_STATE_L0S_DW BIT(1) /* Downstream direction L0s state */
+static_assert(PCIE_LINK_STATE_L0S == (PCIE_LINK_STATE_L0S_UP | PCIE_LINK_STATE_L0S_DW));
+
+#define PCIE_LINK_STATE_L1_SS_PCIPM (PCIE_LINK_STATE_L1_1_PCIPM |\
+ PCIE_LINK_STATE_L1_2_PCIPM)
+#define PCIE_LINK_STATE_L1_2_MASK (PCIE_LINK_STATE_L1_2 |\
+ PCIE_LINK_STATE_L1_2_PCIPM)
+#define PCIE_LINK_STATE_L1SS (PCIE_LINK_STATE_L1_1 |\
+ PCIE_LINK_STATE_L1_1_PCIPM |\
+ PCIE_LINK_STATE_L1_2_MASK)
struct pcie_link_state {
struct pci_dev *pdev; /* Upstream component of the Link */
@@ -275,10 +274,10 @@ static int policy_to_aspm_state(struct pcie_link_state *link)
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
- return (ASPM_STATE_L0S | ASPM_STATE_L1);
+ return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
case POLICY_POWER_SUPERSAVE:
/* Enable Everything */
- return ASPM_STATE_ALL;
+ return PCIE_LINK_STATE_ASPM_ALL;
case POLICY_DEFAULT:
return link->aspm_default;
}
@@ -581,14 +580,14 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
latency_dw_l1 = calc_l1_latency(lnkcap_dw);
/* Check upstream direction L0s latency */
- if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
+ if ((link->aspm_capable & PCIE_LINK_STATE_L0S_UP) &&
(latency_up_l0s > acceptable_l0s))
- link->aspm_capable &= ~ASPM_STATE_L0S_UP;
+ link->aspm_capable &= ~PCIE_LINK_STATE_L0S_UP;
/* Check downstream direction L0s latency */
- if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
+ if ((link->aspm_capable & PCIE_LINK_STATE_L0S_DW) &&
(latency_dw_l0s > acceptable_l0s))
- link->aspm_capable &= ~ASPM_STATE_L0S_DW;
+ link->aspm_capable &= ~PCIE_LINK_STATE_L0S_DW;
/*
* Check L1 latency.
* Every switch on the path to root complex need 1
@@ -603,9 +602,9 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
* substate latencies (and hence do not do any check).
*/
latency = max_t(u32, latency_up_l1, latency_dw_l1);
- if ((link->aspm_capable & ASPM_STATE_L1) &&
+ if ((link->aspm_capable & PCIE_LINK_STATE_L1) &&
(latency + l1_switch_latency > acceptable_l1))
- link->aspm_capable &= ~ASPM_STATE_L1;
+ link->aspm_capable &= ~PCIE_LINK_STATE_L1;
l1_switch_latency += NSEC_PER_USEC;
link = link->parent;
@@ -741,13 +740,13 @@ static void aspm_l1ss_init(struct pcie_link_state *link)
child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1;
+ link->aspm_support |= PCIE_LINK_STATE_L1_1;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2;
+ link->aspm_support |= PCIE_LINK_STATE_L1_2;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
+ link->aspm_support |= PCIE_LINK_STATE_L1_1_PCIPM;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
+ link->aspm_support |= PCIE_LINK_STATE_L1_2_PCIPM;
if (parent_l1ss_cap)
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
@@ -757,15 +756,15 @@ static void aspm_l1ss_init(struct pcie_link_state *link)
&child_l1ss_ctl1);
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_1;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_2;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_1_PCIPM;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_2_PCIPM;
- if (link->aspm_support & ASPM_STATE_L1_2_MASK)
+ if (link->aspm_support & PCIE_LINK_STATE_L1_2_MASK)
aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap);
}
@@ -778,8 +777,8 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
if (blacklist) {
/* Set enabled/disable so that we will disable ASPM later */
- link->aspm_enabled = ASPM_STATE_ALL;
- link->aspm_disable = ASPM_STATE_ALL;
+ link->aspm_enabled = PCIE_LINK_STATE_ASPM_ALL;
+ link->aspm_disable = PCIE_LINK_STATE_ASPM_ALL;
return;
}
@@ -814,19 +813,19 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* support L0s.
*/
if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
- link->aspm_support |= ASPM_STATE_L0S;
+ link->aspm_support |= PCIE_LINK_STATE_L0S;
if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
- link->aspm_enabled |= ASPM_STATE_L0S_UP;
+ link->aspm_enabled |= PCIE_LINK_STATE_L0S_UP;
if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
- link->aspm_enabled |= ASPM_STATE_L0S_DW;
+ link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW;
/* Setup L1 state */
if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
- link->aspm_support |= ASPM_STATE_L1;
+ link->aspm_support |= PCIE_LINK_STATE_L1;
if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
- link->aspm_enabled |= ASPM_STATE_L1;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1;
aspm_l1ss_init(link);
@@ -876,7 +875,7 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
* If needed, disable L1, and it gets enabled later
* in pcie_config_aspm_link().
*/
- if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
+ if (enable_req & (PCIE_LINK_STATE_L1_1 | PCIE_LINK_STATE_L1_2)) {
pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPM_L1);
pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
@@ -884,13 +883,13 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
}
val = 0;
- if (state & ASPM_STATE_L1_1)
+ if (state & PCIE_LINK_STATE_L1_1)
val |= PCI_L1SS_CTL1_ASPM_L1_1;
- if (state & ASPM_STATE_L1_2)
+ if (state & PCIE_LINK_STATE_L1_2)
val |= PCI_L1SS_CTL1_ASPM_L1_2;
- if (state & ASPM_STATE_L1_1_PCIPM)
+ if (state & PCIE_LINK_STATE_L1_1_PCIPM)
val |= PCI_L1SS_CTL1_PCIPM_L1_1;
- if (state & ASPM_STATE_L1_2_PCIPM)
+ if (state & PCIE_LINK_STATE_L1_2_PCIPM)
val |= PCI_L1SS_CTL1_PCIPM_L1_2;
/* Enable what we need to enable */
@@ -916,29 +915,29 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
state &= (link->aspm_capable & ~link->aspm_disable);
/* Can't enable any substates if L1 is not enabled */
- if (!(state & ASPM_STATE_L1))
- state &= ~ASPM_STATE_L1SS;
+ if (!(state & PCIE_LINK_STATE_L1))
+ state &= ~PCIE_LINK_STATE_L1SS;
/* Spec says both ports must be in D0 before enabling PCI PM substates*/
if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
- state &= ~ASPM_STATE_L1_SS_PCIPM;
- state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM);
+ state &= ~PCIE_LINK_STATE_L1_SS_PCIPM;
+ state |= (link->aspm_enabled & PCIE_LINK_STATE_L1_SS_PCIPM);
}
/* Nothing to do if the link is already in the requested state */
if (link->aspm_enabled == state)
return;
/* Convert ASPM state to upstream/downstream ASPM register state */
- if (state & ASPM_STATE_L0S_UP)
+ if (state & PCIE_LINK_STATE_L0S_UP)
dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
- if (state & ASPM_STATE_L0S_DW)
+ if (state & PCIE_LINK_STATE_L0S_DW)
upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
- if (state & ASPM_STATE_L1) {
+ if (state & PCIE_LINK_STATE_L1) {
upstream |= PCI_EXP_LNKCTL_ASPM_L1;
dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
}
- if (link->aspm_capable & ASPM_STATE_L1SS)
+ if (link->aspm_capable & PCIE_LINK_STATE_L1SS)
pcie_config_aspm_l1ss(link, state);
/*
@@ -947,11 +946,11 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
* upstream component first and then downstream, and vice
* versa for disabling ASPM L1. Spec doesn't mention L0S.
*/
- if (state & ASPM_STATE_L1)
+ if (state & PCIE_LINK_STATE_L1)
pcie_config_aspm_dev(parent, upstream);
list_for_each_entry(child, &linkbus->devices, bus_list)
pcie_config_aspm_dev(child, dwstream);
- if (!(state & ASPM_STATE_L1))
+ if (!(state & PCIE_LINK_STATE_L1))
pcie_config_aspm_dev(parent, upstream);
link->aspm_enabled = state;
@@ -1324,6 +1323,28 @@ static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
return bridge->link_state;
}
+static u8 pci_calc_aspm_disable_mask(int state)
+{
+ state &= ~PCIE_LINK_STATE_CLKPM;
+
+ /* L1 PM substates require L1 */
+ if (state & PCIE_LINK_STATE_L1)
+ state |= PCIE_LINK_STATE_L1SS;
+
+ return state;
+}
+
+static u8 pci_calc_aspm_enable_mask(int state)
+{
+ state &= ~PCIE_LINK_STATE_CLKPM;
+
+ /* L1 PM substates require L1 */
+ if (state & PCIE_LINK_STATE_L1SS)
+ state |= PCIE_LINK_STATE_L1;
+
+ return state;
+}
+
static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
@@ -1346,19 +1367,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked
if (!locked)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- if (state & PCIE_LINK_STATE_L0S)
- link->aspm_disable |= ASPM_STATE_L0S;
- if (state & PCIE_LINK_STATE_L1)
- /* L1 PM substates require L1 */
- link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
- if (state & PCIE_LINK_STATE_L1_1)
- link->aspm_disable |= ASPM_STATE_L1_1;
- if (state & PCIE_LINK_STATE_L1_2)
- link->aspm_disable |= ASPM_STATE_L1_2;
- if (state & PCIE_LINK_STATE_L1_1_PCIPM)
- link->aspm_disable |= ASPM_STATE_L1_1_PCIPM;
- if (state & PCIE_LINK_STATE_L1_2_PCIPM)
- link->aspm_disable |= ASPM_STATE_L1_2_PCIPM;
+ link->aspm_disable |= pci_calc_aspm_disable_mask(state);
pcie_config_aspm_link(link, policy_to_aspm_state(link));
if (state & PCIE_LINK_STATE_CLKPM)
@@ -1414,20 +1423,7 @@ static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
if (!locked)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- link->aspm_default = 0;
- if (state & PCIE_LINK_STATE_L0S)
- link->aspm_default |= ASPM_STATE_L0S;
- if (state & PCIE_LINK_STATE_L1)
- link->aspm_default |= ASPM_STATE_L1;
- /* L1 PM substates require L1 */
- if (state & PCIE_LINK_STATE_L1_1)
- link->aspm_default |= ASPM_STATE_L1_1 | ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_2)
- link->aspm_default |= ASPM_STATE_L1_2 | ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_1_PCIPM)
- link->aspm_default |= ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_2_PCIPM)
- link->aspm_default |= ASPM_STATE_L1_2_PCIPM | ASPM_STATE_L1;
+ link->aspm_default = pci_calc_aspm_enable_mask(state);
pcie_config_aspm_link(link, policy_to_aspm_state(link));
link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
@@ -1563,12 +1559,12 @@ static ssize_t aspm_attr_store_common(struct device *dev,
if (state_enable) {
link->aspm_disable &= ~state;
/* need to enable L1 for substates */
- if (state & ASPM_STATE_L1SS)
- link->aspm_disable &= ~ASPM_STATE_L1;
+ if (state & PCIE_LINK_STATE_L1SS)
+ link->aspm_disable &= ~PCIE_LINK_STATE_L1;
} else {
link->aspm_disable |= state;
- if (state & ASPM_STATE_L1)
- link->aspm_disable |= ASPM_STATE_L1SS;
+ if (state & PCIE_LINK_STATE_L1)
+ link->aspm_disable |= PCIE_LINK_STATE_L1SS;
}
pcie_config_aspm_link(link, policy_to_aspm_state(link));
@@ -1582,12 +1578,12 @@ static ssize_t aspm_attr_store_common(struct device *dev,
#define ASPM_ATTR(_f, _s) \
static ssize_t _f##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
-{ return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); } \
+{ return aspm_attr_show_common(dev, attr, buf, PCIE_LINK_STATE_##_s); } \
\
static ssize_t _f##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t len) \
-{ return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); }
+{ return aspm_attr_store_common(dev, attr, buf, len, PCIE_LINK_STATE_##_s); }
ASPM_ATTR(l0s_aspm, L0S)
ASPM_ATTR(l1_aspm, L1)
@@ -1654,12 +1650,12 @@ static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
struct pci_dev *pdev = to_pci_dev(dev);
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
static const u8 aspm_state_map[] = {
- ASPM_STATE_L0S,
- ASPM_STATE_L1,
- ASPM_STATE_L1_1,
- ASPM_STATE_L1_2,
- ASPM_STATE_L1_1_PCIPM,
- ASPM_STATE_L1_2_PCIPM,
+ PCIE_LINK_STATE_L0S,
+ PCIE_LINK_STATE_L1,
+ PCIE_LINK_STATE_L1_1,
+ PCIE_LINK_STATE_L1_2,
+ PCIE_LINK_STATE_L1_1_PCIPM,
+ PCIE_LINK_STATE_L1_2_PCIPM,
};
if (aspm_disabled || !link)
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 705893b5f7..31090770ff 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -116,9 +116,7 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->mmio_enabled)
+ if (!pdrv || !pdrv->err_handler || !pdrv->err_handler->mmio_enabled)
goto out;
err_handler = pdrv->err_handler;
@@ -137,9 +135,7 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->slot_reset)
+ if (!pdrv || !pdrv->err_handler || !pdrv->err_handler->slot_reset)
goto out;
err_handler = pdrv->err_handler;
@@ -158,9 +154,7 @@ static int report_resume(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
- !pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->resume)
+ !pdrv || !pdrv->err_handler || !pdrv->err_handler->resume)
goto out;
err_handler = pdrv->err_handler;
diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
index 14a4b89a3b..bb65dfe434 100644
--- a/drivers/pci/pcie/portdrv.c
+++ b/drivers/pci/pcie/portdrv.c
@@ -187,15 +187,15 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
* interrupt.
*/
if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi())
- goto legacy_irq;
+ goto intx_irq;
/* Try to use MSI-X or MSI if supported */
if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0)
return 0;
-legacy_irq:
- /* fall back to legacy IRQ */
- ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+intx_irq:
+ /* fall back to INTX IRQ */
+ ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_INTX);
if (ret < 0)
return -ENODEV;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 1325fbae2f..5fbabb4e34 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -95,7 +95,7 @@ static void release_pcibus_dev(struct device *dev)
kfree(pci_bus);
}
-static struct class pcibus_class = {
+static const struct class pcibus_class = {
.name = "pci_bus",
.dev_release = &release_pcibus_dev,
.dev_groups = pcibus_groups,
@@ -1482,6 +1482,9 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
}
out:
+ /* Clear errors in the Secondary Status Register */
+ pci_write_config_word(dev, PCI_SEC_STATUS, 0xffff);
+
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
pm_runtime_put(&dev->dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index eff7f5df08..568410e64c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -6253,3 +6253,23 @@ static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev)
pdev->d3cold_delay = 1000;
}
DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec);
+
+#ifdef CONFIG_PCIEAER
+static void pci_mask_replay_timer_timeout(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pci_upstream_bridge(pdev);
+ u32 val;
+
+ if (!parent || !parent->aer_cap)
+ return;
+
+ pci_info(parent, "mask Replay Timer Timeout Correctable Errors due to %s hardware defect",
+ pci_name(pdev));
+
+ pci_read_config_dword(parent, parent->aer_cap + PCI_ERR_COR_MASK, &val);
+ val |= PCI_ERR_COR_REP_TIMER;
+ pci_write_config_dword(parent, parent->aer_cap + PCI_ERR_COR_MASK, val);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_GLI, 0x9750, pci_mask_replay_timer_timeout);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_GLI, 0x9755, pci_mask_replay_timer_timeout);
+#endif
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 909e6a7c3c..141d6b3195 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -829,11 +829,9 @@ static resource_size_t calculate_memsize(resource_size_t size,
size = min_size;
if (old_size == 1)
old_size = 0;
- if (size < old_size)
- size = old_size;
- size = ALIGN(max(size, add_size) + children_add_size, align);
- return size;
+ size = max(size, add_size) + children_add_size;
+ return ALIGN(max(size, old_size), align);
}
resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,