summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig5
-rw-r--r--drivers/pci/Makefile7
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c14
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h6
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c630
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c12
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c5
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c120
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c21
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c13
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c15
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c14
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c6
-rw-r--r--drivers/pci/devres.c448
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c21
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c6
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c21
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c19
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c25
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c20
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c65
-rw-r--r--drivers/pci/iomap.c177
-rw-r--r--drivers/pci/irq.c204
-rw-r--r--drivers/pci/mmap.c29
-rw-r--r--drivers/pci/of_property.c2
-rw-r--r--drivers/pci/pci-driver.c16
-rw-r--r--drivers/pci/pci-sysfs.c167
-rw-r--r--drivers/pci/pci.c521
-rw-r--r--drivers/pci/pci.h55
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer.c20
-rw-r--r--drivers/pci/pcie/aspm.c268
-rw-r--r--drivers/pci/pcie/dpc.c74
-rw-r--r--drivers/pci/pcie/edr.c28
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/probe.c66
-rw-r--r--drivers/pci/setup-irq.c64
40 files changed, 2024 insertions, 1177 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 7414726262..d35001589d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -13,6 +13,11 @@ config FORCE_PCI
select HAVE_PCI
select PCI
+# select this to provide a generic PCI iomap,
+# without PCI itself having to be defined
+config GENERIC_PCI_IOMAP
+ bool
+
menuconfig PCI
bool "PCI support"
depends on HAVE_PCI
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index cc8b4e01e2..1753020368 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -4,16 +4,17 @@
obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
remove.o pci.o pci-driver.o search.o \
- pci-sysfs.o rom.o setup-res.o irq.o vpd.o \
- setup-bus.o vc.o mmap.o setup-irq.o
+ rom.o setup-res.o irq.o vpd.o \
+ setup-bus.o vc.o mmap.o devres.o
obj-$(CONFIG_PCI) += msi/
obj-$(CONFIG_PCI) += pcie/
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
-obj-$(CONFIG_SYSFS) += slot.o
+obj-$(CONFIG_SYSFS) += pci-sysfs.o slot.o
obj-$(CONFIG_ACPI) += pci-acpi.o
+obj-$(CONFIG_GENERIC_PCI_IOMAP) += iomap.o
endif
obj-$(CONFIG_OF) += of.o
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 2d0a8d78bf..81c50dc64d 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -565,7 +565,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
int max_epfs = sizeof(epc->function_num_map) * 8;
- int ret, value, epf;
+ int ret, epf, last_fn;
+ u32 reg, value;
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
@@ -573,6 +574,17 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
*/
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
+ /*
+ * Next function field in ARI_CAP_AND_CTR register for last function
+ * should be 0.
+ * Clearing Next Function Number field for the last function used.
+ */
+ last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG);
+ reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn);
+ value = cdns_pcie_readl(pcie, reg);
+ value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK;
+ cdns_pcie_writel(pcie, reg, value);
+
if (ep->quirk_disable_flr) {
for (epf = 0; epf < max_epfs; epf++) {
if (!(epc->function_num_map & BIT(epf)))
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 03b96798f8..7a66a2f815 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -131,6 +131,12 @@
#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
/*
+ * Endpoint PF Registers
+ */
+#define CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(fn) (0x144 + (fn) * 0x1000)
+#define CDNS_PCIE_ARI_CAP_NFN_MASK GENMASK(15, 8)
+
+/*
* Root Port Registers (PCI configuration space for the root port function)
*/
#define CDNS_PCIE_RP_BASE 0x00200000
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index dc2c036ab2..99a60270b2 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -42,6 +42,19 @@
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
+#define IMX95_PCIE_PHY_GEN_CTRL 0x0
+#define IMX95_PCIE_REF_USE_PAD BIT(17)
+
+#define IMX95_PCIE_SS_RW_REG_0 0xf0
+#define IMX95_PCIE_REF_CLKEN BIT(23)
+#define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9)
+
+#define IMX95_PE0_GEN_CTRL_1 0x1050
+#define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0)
+
+#define IMX95_PE0_GEN_CTRL_3 0x1058
+#define IMX95_PCIE_LTSSM_EN BIT(0)
+
#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
enum imx6_pcie_variants {
@@ -52,14 +65,29 @@ enum imx6_pcie_variants {
IMX8MQ,
IMX8MM,
IMX8MP,
+ IMX95,
IMX8MQ_EP,
IMX8MM_EP,
IMX8MP_EP,
+ IMX95_EP,
};
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX6_PCIE_FLAG_HAS_PHYDRV BIT(3)
+#define IMX6_PCIE_FLAG_HAS_APP_RESET BIT(4)
+#define IMX6_PCIE_FLAG_HAS_PHY_RESET BIT(5)
+#define IMX6_PCIE_FLAG_HAS_SERDES BIT(6)
+#define IMX6_PCIE_FLAG_SUPPORT_64BIT BIT(7)
+
+#define imx6_check_flag(pci, val) (pci->drvdata->flags & val)
+
+#define IMX6_PCIE_MAX_CLKS 6
+
+#define IMX6_PCIE_MAX_INSTANCES 2
+
+struct imx6_pcie;
struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
@@ -67,6 +95,14 @@ struct imx6_pcie_drvdata {
u32 flags;
int dbi_length;
const char *gpr;
+ const char * const *clk_names;
+ const u32 clks_cnt;
+ const u32 ltssm_off;
+ const u32 ltssm_mask;
+ const u32 mode_off[IMX6_PCIE_MAX_INSTANCES];
+ const u32 mode_mask[IMX6_PCIE_MAX_INSTANCES];
+ const struct pci_epc_features *epc_features;
+ int (*init_phy)(struct imx6_pcie *pcie);
};
struct imx6_pcie {
@@ -74,11 +110,7 @@ struct imx6_pcie {
int reset_gpio;
bool gpio_active_high;
bool link_is_up;
- struct clk *pcie_bus;
- struct clk *pcie_phy;
- struct clk *pcie_inbound_axi;
- struct clk *pcie;
- struct clk *pcie_aux;
+ struct clk_bulk_data clks[IMX6_PCIE_MAX_CLKS];
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
@@ -165,34 +197,44 @@ static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
+static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_PHY_CR_PARA_SEL,
+ IMX95_PCIE_PHY_CR_PARA_SEL);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX95_PCIE_PHY_GEN_CTRL,
+ IMX95_PCIE_REF_USE_PAD, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_REF_CLKEN,
+ IMX95_PCIE_REF_CLKEN);
+
+ return 0;
+}
+
static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
{
- unsigned int mask, val, mode;
+ const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ unsigned int mask, val, mode, id;
- if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
+ if (drvdata->mode == DW_PCIE_EP_TYPE)
mode = PCI_EXP_TYPE_ENDPOINT;
else
mode = PCI_EXP_TYPE_ROOT_PORT;
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MQ:
- case IMX8MQ_EP:
- if (imx6_pcie->controller_id == 1) {
- mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
- val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
- mode);
- } else {
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
- }
- break;
- default:
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
- break;
- }
+ id = imx6_pcie->controller_id;
+
+ /* If mode_mask[id] is zero, means each controller have its individual gpr */
+ if (!drvdata->mode_mask[id])
+ id = 0;
+
+ mask = drvdata->mode_mask[id];
+ val = mode << (ffs(mask) - 1);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
}
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
@@ -320,76 +362,66 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
return 0;
}
-static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx8mq_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- /*
- * The PHY initialization had been done in the PHY
- * driver, break here directly.
- */
- break;
- case IMX8MQ:
- case IMX8MQ_EP:
- /*
- * TODO: Currently this code assumes external
- * oscillator is being used
- */
+ /* TODO: Currently this code assumes external oscillator is being used */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_REF_USE_PAD,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is
+ * supplied by 3.3V, the VREG_BYPASS should be cleared to zero.
+ */
+ if (imx6_pcie->vph && regulator_get_voltage(imx6_pcie->vph) > 3000000)
regmap_update_bits(imx6_pcie->iomuxc_gpr,
imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_REF_USE_PAD,
- IMX8MQ_GPR_PCIE_REF_USE_PAD);
- /*
- * Regarding the datasheet, the PCIE_VPH is suggested
- * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
- * VREG_BYPASS should be cleared to zero.
- */
- if (imx6_pcie->vph &&
- regulator_get_voltage(imx6_pcie->vph) > 3000000)
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_VREG_BYPASS,
- 0);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_RX_EQ_MASK,
- IMX6SX_GPR12_PCIE_RX_EQ_2);
- fallthrough;
- default:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
+
+ return 0;
+}
+
+static int imx7d_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+
+ return 0;
+}
+
+static int imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
- /* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
- break;
- }
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx6_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx6_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx6_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx6_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx6_pcie->tx_swing_low << 25);
+ return 0;
+}
- imx6_pcie_configure_type(imx6_pcie);
+static int imx6sx_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2);
+
+ return imx6_pcie_init_phy(imx6_pcie);
}
static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
@@ -407,13 +439,18 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
{
- unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ unsigned long phy_rate = 0;
int mult, div;
u16 val;
+ int i;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
return 0;
+ for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
+ if (strncmp(imx6_pcie->clks[i].id, "pcie_phy", 8) == 0)
+ phy_rate = clk_get_rate(imx6_pcie->clks[i].clk);
+
switch (phy_rate) {
case 125000000:
/*
@@ -550,19 +587,11 @@ static int imx6_pcie_attach_pd(struct device *dev)
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
unsigned int offset;
int ret = 0;
switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
- ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
- if (ret) {
- dev_err(dev, "unable to enable pcie_axi clock\n");
- break;
- }
-
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
break;
@@ -582,6 +611,8 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
break;
case IMX7D:
+ case IMX95:
+ case IMX95_EP:
break;
case IMX8MM:
case IMX8MM_EP:
@@ -589,12 +620,6 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
case IMX8MQ_EP:
case IMX8MP:
case IMX8MP_EP:
- ret = clk_prepare_enable(imx6_pcie->pcie_aux);
- if (ret) {
- dev_err(dev, "unable to enable pcie_aux clock\n");
- break;
- }
-
offset = imx6_pcie_grp_offset(imx6_pcie);
/*
* Set the over ride low and enabled
@@ -615,9 +640,6 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
{
switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
- break;
case IMX6QP:
case IMX6Q:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
@@ -631,14 +653,6 @@ static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MP:
- case IMX8MP_EP:
- clk_disable_unprepare(imx6_pcie->pcie_aux);
- break;
default:
break;
}
@@ -650,23 +664,9 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
struct device *dev = pci->dev;
int ret;
- ret = clk_prepare_enable(imx6_pcie->pcie_phy);
- if (ret) {
- dev_err(dev, "unable to enable pcie_phy clock\n");
+ ret = clk_bulk_prepare_enable(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ if (ret)
return ret;
- }
-
- ret = clk_prepare_enable(imx6_pcie->pcie_bus);
- if (ret) {
- dev_err(dev, "unable to enable pcie_bus clock\n");
- goto err_pcie_bus;
- }
-
- ret = clk_prepare_enable(imx6_pcie->pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie clock\n");
- goto err_pcie;
- }
ret = imx6_pcie_enable_ref_clk(imx6_pcie);
if (ret) {
@@ -679,11 +679,7 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
return 0;
err_ref_clk:
- clk_disable_unprepare(imx6_pcie->pcie);
-err_pcie:
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-err_pcie_bus:
- clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
return ret;
}
@@ -691,25 +687,15 @@ err_pcie_bus:
static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
{
imx6_pcie_disable_ref_clk(imx6_pcie);
- clk_disable_unprepare(imx6_pcie->pcie);
- clk_disable_unprepare(imx6_pcie->pcie_bus);
- clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
}
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ reset_control_assert(imx6_pcie->apps_reset);
+
switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- case IMX8MQ:
- case IMX8MQ_EP:
- reset_control_assert(imx6_pcie->pciephy_reset);
- fallthrough;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
@@ -730,6 +716,8 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
break;
+ default:
+ break;
}
/* Some boards don't have PCIe reset GPIO. */
@@ -743,14 +731,10 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
struct dw_pcie *pci = imx6_pcie->pci;
struct device *dev = pci->dev;
+ reset_control_deassert(imx6_pcie->pciephy_reset);
+
switch (imx6_pcie->drvdata->variant) {
- case IMX8MQ:
- case IMX8MQ_EP:
- reset_control_deassert(imx6_pcie->pciephy_reset);
- break;
case IMX7D:
- reset_control_deassert(imx6_pcie->pciephy_reset);
-
/* Workaround for ERR010728, failure of PCI-e PLL VCO to
* oscillate, especially when cold. This turns off "Duty-cycle
* Corrector" and other mysterious undocumented things.
@@ -782,11 +766,7 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
usleep_range(200, 500);
break;
- case IMX6Q: /* Nothing to do */
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
+ default:
break;
}
@@ -824,48 +804,25 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
static void imx6_pcie_ltssm_enable(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
- switch (imx6_pcie->drvdata->variant) {
- case IMX6Q:
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2,
- IMX6Q_GPR12_PCIE_CTL_2);
- break;
- case IMX7D:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- reset_control_deassert(imx6_pcie->apps_reset);
- break;
- }
+ if (drvdata->ltssm_mask)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
+ drvdata->ltssm_mask);
+
+ reset_control_deassert(imx6_pcie->apps_reset);
}
static void imx6_pcie_ltssm_disable(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
- switch (imx6_pcie->drvdata->variant) {
- case IMX6Q:
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0);
- break;
- case IMX7D:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- }
+ if (drvdata->ltssm_mask)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off,
+ drvdata->ltssm_mask, 0);
+
+ reset_control_assert(imx6_pcie->apps_reset);
}
static int imx6_pcie_start_link(struct dw_pcie *pci)
@@ -977,7 +934,11 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
imx6_pcie_assert_core_reset(imx6_pcie);
- imx6_pcie_init_phy(imx6_pcie);
+
+ if (imx6_pcie->drvdata->init_phy)
+ imx6_pcie->drvdata->init_phy(imx6_pcie);
+
+ imx6_pcie_configure_type(imx6_pcie);
ret = imx6_pcie_clk_enable(imx6_pcie);
if (ret) {
@@ -1081,14 +1042,35 @@ static const struct pci_epc_features imx8m_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = false,
- .reserved_bar = 1 << BAR_1 | 1 << BAR_3,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
.align = SZ_64K,
};
+/*
+ * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme
+ * ================================================================================================
+ * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
+ * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
+ * BAR1 should be disabled if BAR0 is 64bit.
+ * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
+ * BAR4 | Enable | 32-bit | 1M | Programmable Size
+ * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
+ */
+static const struct pci_epc_features imx95_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
+ .align = SZ_4K,
+};
+
static const struct pci_epc_features*
imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
- return &imx8m_pcie_epc_features;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ return imx6_pcie->drvdata->epc_features;
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
@@ -1103,7 +1085,6 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
int ret;
unsigned int pcie_dbi2_offset;
struct dw_pcie_ep *ep;
- struct resource *res;
struct dw_pcie *pci = imx6_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
@@ -1122,14 +1103,20 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
pcie_dbi2_offset = SZ_4K;
break;
}
+
pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
- ep->page_size = SZ_64K;
+ /*
+ * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining
+ * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC
+ * core code can fetch that from DT. But once all platform DTs were fixed, this and the
+ * above "dbi_base2" setting should be removed.
+ */
+ if (device_property_match_string(dev, "reg-names", "dbi2") >= 0)
+ pci->dbi_base2 = NULL;
+
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_SUPPORT_64BIT))
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
ret = dw_pcie_ep_init(ep);
if (ret) {
@@ -1251,6 +1238,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
int ret;
u16 val;
+ int i;
imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
@@ -1304,81 +1292,48 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return imx6_pcie->reset_gpio;
}
- /* Fetch clocks */
- imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(imx6_pcie->pcie_bus))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
- "pcie_bus clock source missing or invalid\n");
+ if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS)
+ return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
- imx6_pcie->pcie = devm_clk_get(dev, "pcie");
- if (IS_ERR(imx6_pcie->pcie))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
- "pcie clock source missing or invalid\n");
+ for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
+ imx6_pcie->clks[i].id = imx6_pcie->drvdata->clk_names[i];
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
- "pcie_inbound_axi");
- if (IS_ERR(imx6_pcie->pcie_inbound_axi))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
- "pcie_inbound_axi clock missing or invalid\n");
- break;
- case IMX8MQ:
- case IMX8MQ_EP:
- imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
- "pcie_aux clock source missing or invalid\n");
- fallthrough;
- case IMX7D:
- if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
- imx6_pcie->controller_id = 1;
+ /* Fetch clocks */
+ ret = devm_clk_bulk_get(dev, imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ if (ret)
+ return ret;
- imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
- "pciephy");
- if (IS_ERR(imx6_pcie->pciephy_reset)) {
- dev_err(dev, "Failed to get PCIEPHY reset control\n");
- return PTR_ERR(imx6_pcie->pciephy_reset);
- }
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHYDRV)) {
+ imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx6_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
+ "failed to get pcie phy\n");
+ }
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
- "apps");
- if (IS_ERR(imx6_pcie->apps_reset)) {
- dev_err(dev, "Failed to get PCIE APPS reset control\n");
- return PTR_ERR(imx6_pcie->apps_reset);
- }
- break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
- "pcie_aux clock source missing or invalid\n");
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
- "apps");
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_APP_RESET)) {
+ imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
if (IS_ERR(imx6_pcie->apps_reset))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
"failed to get pcie apps reset control\n");
+ }
- imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
- if (IS_ERR(imx6_pcie->phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
- "failed to get pcie phy\n");
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHY_RESET)) {
+ imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
+ if (IS_ERR(imx6_pcie->pciephy_reset))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pciephy_reset),
+ "Failed to get PCIEPHY reset control\n");
+ }
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ case IMX7D:
+ if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
+ imx6_pcie->controller_id = 1;
break;
default:
break;
}
- /* Don't fetch the pcie_phy clock, if it has abstract PHY driver */
- if (imx6_pcie->phy == NULL) {
- imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pcie_phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
- "pcie_phy clock source missing or invalid\n");
- }
-
/* Grab turnoff reset */
imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
@@ -1387,12 +1342,32 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->turnoff_reset);
}
+ if (imx6_pcie->drvdata->gpr) {
/* Grab GPR config register range */
- imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
- if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
- dev_err(dev, "unable to find iomuxc registers\n");
- return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ imx6_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
+ if (IS_ERR(imx6_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
+ }
+
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_SERDES)) {
+ void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app");
+
+ if (IS_ERR(off))
+ return dev_err_probe(dev, PTR_ERR(off),
+ "unable to find serdes registers\n");
+
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ imx6_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(imx6_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
}
/* Grab PCIe PHY Tx Settings */
@@ -1469,6 +1444,11 @@ static void imx6_pcie_shutdown(struct platform_device *pdev)
imx6_pcie_assert_core_reset(imx6_pcie);
}
+static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"};
+static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
+static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
+static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
+
static const struct imx6_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
@@ -1476,6 +1456,13 @@ static const struct imx6_pcie_drvdata drvdata[] = {
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .clk_names = imx6q_clks,
+ .clks_cnt = ARRAY_SIZE(imx6q_clks),
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx6_pcie_init_phy,
},
[IMX6SX] = {
.variant = IMX6SX,
@@ -1483,6 +1470,13 @@ static const struct imx6_pcie_drvdata drvdata[] = {
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .clk_names = imx6sx_clks,
+ .clks_cnt = ARRAY_SIZE(imx6sx_clks),
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx6sx_pcie_init_phy,
},
[IMX6QP] = {
.variant = IMX6QP,
@@ -1491,40 +1485,122 @@ static const struct imx6_pcie_drvdata drvdata[] = {
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .clk_names = imx6q_clks,
+ .clks_cnt = ARRAY_SIZE(imx6q_clks),
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx6_pcie_init_phy,
},
[IMX7D] = {
.variant = IMX7D,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_HAS_APP_RESET |
+ IMX6_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx7d-iomuxc-gpr",
+ .clk_names = imx6q_clks,
+ .clks_cnt = ARRAY_SIZE(imx6q_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx7d_pcie_init_phy,
},
[IMX8MQ] = {
.variant = IMX8MQ,
+ .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
+ IMX6_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx8mq-iomuxc-gpr",
+ .clk_names = imx8mq_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .mode_off[1] = IOMUXC_GPR12,
+ .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ .init_phy = imx8mq_pcie_init_phy,
},
[IMX8MM] = {
.variant = IMX8MM,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_HAS_PHYDRV |
+ IMX6_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mm-iomuxc-gpr",
+ .clk_names = imx8mm_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mm_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
},
[IMX8MP] = {
.variant = IMX8MP,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_HAS_PHYDRV |
+ IMX6_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mp-iomuxc-gpr",
+ .clk_names = imx8mm_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mm_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ },
+ [IMX95] = {
+ .variant = IMX95,
+ .flags = IMX6_PCIE_FLAG_HAS_SERDES,
+ .clk_names = imx8mq_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .ltssm_off = IMX95_PE0_GEN_CTRL_3,
+ .ltssm_mask = IMX95_PCIE_LTSSM_EN,
+ .mode_off[0] = IMX95_PE0_GEN_CTRL_1,
+ .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .init_phy = imx95_pcie_init_phy,
},
[IMX8MQ_EP] = {
.variant = IMX8MQ_EP,
+ .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
+ IMX6_PCIE_FLAG_HAS_PHY_RESET,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mq-iomuxc-gpr",
+ .clk_names = imx8mq_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .mode_off[1] = IOMUXC_GPR12,
+ .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
+ .init_phy = imx8mq_pcie_init_phy,
},
[IMX8MM_EP] = {
.variant = IMX8MM_EP,
+ .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mm-iomuxc-gpr",
+ .clk_names = imx8mm_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mm_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
},
[IMX8MP_EP] = {
.variant = IMX8MP_EP,
+ .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mp-iomuxc-gpr",
+ .clk_names = imx8mm_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mm_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
+ },
+ [IMX95_EP] = {
+ .variant = IMX95_EP,
+ .flags = IMX6_PCIE_FLAG_HAS_SERDES |
+ IMX6_PCIE_FLAG_SUPPORT_64BIT,
+ .clk_names = imx8mq_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .ltssm_off = IMX95_PE0_GEN_CTRL_3,
+ .ltssm_mask = IMX95_PCIE_LTSSM_EN,
+ .mode_off[0] = IMX95_PE0_GEN_CTRL_1,
+ .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .init_phy = imx95_pcie_init_phy,
+ .epc_features = &imx95_pcie_epc_features,
+ .mode = DW_PCIE_EP_TYPE,
},
};
@@ -1536,9 +1612,11 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{ .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], },
{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
{ .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
{},
};
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index c0c62533a3..844de44187 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -924,12 +924,12 @@ static const struct pci_epc_features ks_pcie_am654_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
- .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
- .bar_fixed_64bit = 1 << BAR_0,
- .bar_fixed_size[2] = SZ_1M,
- .bar_fixed_size[3] = SZ_64K,
- .bar_fixed_size[4] = 256,
- .bar_fixed_size[5] = SZ_1M,
+ .bar[BAR_0] = { .type = BAR_RESERVED, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, },
+ .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
.align = SZ_1M,
};
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 2e398494e7..1f6ee1460e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -250,7 +250,10 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = pcie->drvdata->dw_pcie_ops;
- ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4);
+ ls_epc->bar[BAR_2].only_64bit = true;
+ ls_epc->bar[BAR_3].type = BAR_RESERVED;
+ ls_epc->bar[BAR_4].only_64bit = true;
+ ls_epc->bar[BAR_5].type = BAR_RESERVED;
ls_epc->linkup_notifier = true;
pcie->pci = pci;
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 746a11dcb6..c43a1479de 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -604,11 +604,16 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ struct device *dev = pci->dev;
+ struct pci_epc *epc = ep->epc;
unsigned int offset, ptm_cap_base;
unsigned int nbars;
u8 hdr_type;
+ u8 func_no;
+ int i, ret;
+ void *addr;
u32 reg;
- int i;
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
PCI_HEADER_TYPE_MASK;
@@ -619,6 +624,58 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
return -EIO;
}
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ return ret;
+
+ if (!ep->ib_window_map) {
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
+ if (!ep->ib_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->ob_window_map) {
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
+ if (!ep->ob_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->outbound_addr) {
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ goto err_remove_edma;
+ ep->outbound_addr = addr;
+ }
+
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (ep_func)
+ continue;
+
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ goto err_remove_edma;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
+ }
+
+ if (ep->ops->init)
+ ep->ops->init(ep);
+
offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
@@ -658,14 +715,17 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
int ret;
- void *addr;
- u8 func_no;
struct resource *res;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -673,7 +733,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
const struct pci_epc_features *epc_features;
- struct dw_pcie_ep_func *ep_func;
INIT_LIST_HEAD(&ep->func_list);
@@ -691,26 +750,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ep->ops->pre_init)
ep->ops->pre_init(ep);
- dw_pcie_version_detect(pci);
-
- dw_pcie_iatu_detect(pci);
-
- ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
- GFP_KERNEL);
- if (!ep->ib_window_map)
- return -ENOMEM;
-
- ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
- GFP_KERNEL);
- if (!ep->ob_window_map)
- return -ENOMEM;
-
- addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
- GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- ep->outbound_addr = addr;
-
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "Failed to create epc device\n");
@@ -724,23 +763,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ret < 0)
epc->max_functions = 1;
- for (func_no = 0; func_no < epc->max_functions; func_no++) {
- ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
- if (!ep_func)
- return -ENOMEM;
-
- ep_func->func_no = func_no;
- ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSI);
- ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSIX);
-
- list_add_tail(&ep_func->list, &ep->func_list);
- }
-
- if (ep->ops->init)
- ep->ops->init(ep);
-
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
if (ret < 0) {
@@ -756,25 +778,25 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
goto err_exit_epc_mem;
}
- ret = dw_pcie_edma_detect(pci);
- if (ret)
- goto err_free_epc_mem;
-
if (ep->ops->get_features) {
epc_features = ep->ops->get_features(ep);
if (epc_features->core_init_notifier)
return 0;
}
+ /*
+ * NOTE:- Avoid accessing the hardware (Ex:- DBI space) before this
+ * step as platforms that implement 'core_init_notifier' feature may
+ * not have the hardware ready (i.e. core initialized) for access
+ * (Ex: tegra194). Any hardware access on such platforms result
+ * in system hang.
+ */
ret = dw_pcie_ep_init_complete(ep);
if (ret)
- goto err_remove_edma;
+ goto err_free_epc_mem;
return 0;
-err_remove_edma:
- dw_pcie_edma_remove(pci);
-
err_free_epc_mem:
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d5fc31f834..d15a5c2d5b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -328,7 +328,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
- u64 *msi_vaddr;
+ u64 *msi_vaddr = NULL;
int ret;
u32 ctrl, num_ctrls;
@@ -379,15 +379,20 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
* memory.
*/
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
- if (ret)
- dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+ if (!ret)
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
- msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
- GFP_KERNEL);
if (!msi_vaddr) {
- dev_err(dev, "Failed to alloc and map MSI data\n");
- dw_pcie_free_msi(pp);
- return -ENOMEM;
+ dev_warn(dev, "Failed to allocate 32-bit MSI address\n");
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+ if (!msi_vaddr) {
+ dev_err(dev, "Failed to allocate MSI address\n");
+ dw_pcie_free_msi(pp);
+ return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 208d3b0ba1..5e8e54f597 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -312,8 +312,12 @@ static const struct pci_epc_features keembay_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
- .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5),
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_16K,
};
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 23bb0eee5c..14772edcf0 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1679,6 +1679,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-x1e80100", .data = &cfg_1_9_0 },
{ }
};
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index e9166619b1..0be760ed42 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -383,7 +383,9 @@ static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = false,
- .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_1M,
};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 7afa9e9aab..e440c09d1d 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -2007,9 +2007,13 @@ static const struct pci_epc_features tegra_pcie_epc_features = {
.core_init_notifier = true,
.msi_capable = false,
.msix_capable = false,
- .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
- .bar_fixed_64bit = 1 << BAR_0,
- .bar_fixed_size[0] = SZ_1M,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
+ .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features*
@@ -2269,11 +2273,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
ret = tegra_pcie_config_ep(pcie, pdev);
if (ret < 0)
goto fail;
+ else
+ return 0;
break;
default:
dev_err(dev, "Invalid PCIe device type %d\n",
pcie->of_data->mode);
+ ret = -EINVAL;
}
fail:
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index 3fced0d3e8..639bc2e124 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -411,8 +411,12 @@ static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
.msi_capable = true,
.msix_capable = false,
.align = 1 << 16,
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
- .reserved_bar = BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
},
};
@@ -425,7 +429,12 @@ static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = {
.msi_capable = true,
.msix_capable = false,
.align = 1 << 12,
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
},
};
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index e6909271de..05967c6c0b 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -440,11 +440,15 @@ static const struct pci_epc_features rcar_pcie_epc_features = {
.msi_capable = true,
.msix_capable = false,
/* use 64-bit BARs so mark BAR[1,3,5] as reserved */
- .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
- .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4,
- .bar_fixed_size[0] = 128,
- .bar_fixed_size[2] = 256,
- .bar_fixed_size[4] = 256,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = 128,
+ .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = 256,
+ .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256,
+ .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features*
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index c9046e97a1..79c3e7e9a2 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -98,10 +98,8 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
/* All functions share the same vendor ID with function 0 */
if (fn == 0) {
- u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
- (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
-
- rockchip_pcie_write(rockchip, vid_regs,
+ rockchip_pcie_write(rockchip,
+ hdr->vendorid | hdr->subsys_vendor_id << 16,
PCIE_CORE_CONFIG_VENDOR);
}
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
new file mode 100644
index 0000000000..2c562b9eaf
--- /dev/null
+++ b/drivers/pci/devres.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/pci.h>
+#include "pci.h"
+
+/*
+ * PCI iomap devres
+ */
+#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
+
+struct pcim_iomap_devres {
+ void __iomem *table[PCIM_IOMAP_MAX];
+};
+
+
+static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
+{
+ struct resource **res = ptr;
+
+ pci_unmap_iospace(*res);
+}
+
+/**
+ * devm_pci_remap_iospace - Managed pci_remap_iospace()
+ * @dev: Generic device to remap IO address for
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
+ *
+ * Managed pci_remap_iospace(). Map is automatically unmapped on driver
+ * detach.
+ */
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+ phys_addr_t phys_addr)
+{
+ const struct resource **ptr;
+ int error;
+
+ ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ error = pci_remap_iospace(res, phys_addr);
+ if (error) {
+ devres_free(ptr);
+ } else {
+ *ptr = res;
+ devres_add(dev, ptr);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL(devm_pci_remap_iospace);
+
+/**
+ * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *devm_pci_remap_cfgspace(struct device *dev,
+ resource_size_t offset,
+ resource_size_t size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = pci_remap_cfgspace(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_pci_remap_cfgspace);
+
+/**
+ * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
+ * @dev: generic device to handle the resource for
+ * @res: configuration space resource to be handled
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps with pci_remap_cfgspace() API that ensures the
+ * proper PCI configuration space memory attributes are guaranteed.
+ *
+ * All operations are managed and will be undone on driver detach.
+ *
+ * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure. Usage example::
+ *
+ * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ */
+void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
+ struct resource *res)
+{
+ resource_size_t size;
+ const char *name;
+ void __iomem *dest_ptr;
+
+ BUG_ON(!dev);
+
+ if (!res || resource_type(res) != IORESOURCE_MEM) {
+ dev_err(dev, "invalid resource\n");
+ return IOMEM_ERR_PTR(-EINVAL);
+ }
+
+ size = resource_size(res);
+
+ if (res->name)
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
+ res->name);
+ else
+ name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!name)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ if (!devm_request_mem_region(dev, res->start, size, name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return IOMEM_ERR_PTR(-EBUSY);
+ }
+
+ dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
+ if (!dest_ptr) {
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
+ devm_release_mem_region(dev, res->start, size);
+ dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
+ }
+
+ return dest_ptr;
+}
+EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
+
+/**
+ * pcim_set_mwi - a device-managed pci_set_mwi()
+ * @dev: the PCI device for which MWI is enabled
+ *
+ * Managed pci_set_mwi().
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pcim_set_mwi(struct pci_dev *dev)
+{
+ struct pci_devres *dr;
+
+ dr = find_pci_dr(dev);
+ if (!dr)
+ return -ENOMEM;
+
+ dr->mwi = 1;
+ return pci_set_mwi(dev);
+}
+EXPORT_SYMBOL(pcim_set_mwi);
+
+
+static void pcim_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = to_pci_dev(gendev);
+ struct pci_devres *this = res;
+ int i;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ if (this->region_mask & (1 << i))
+ pci_release_region(dev, i);
+
+ if (this->mwi)
+ pci_clear_mwi(dev);
+
+ if (this->restore_intx)
+ pci_intx(dev, this->orig_intx);
+
+ if (this->enabled && !this->pinned)
+ pci_disable_device(dev);
+}
+
+/*
+ * TODO: After the last four callers in pci.c are ported, find_pci_dr()
+ * needs to be made static again.
+ */
+struct pci_devres *find_pci_dr(struct pci_dev *pdev)
+{
+ if (pci_is_managed(pdev))
+ return devres_find(&pdev->dev, pcim_release, NULL, NULL);
+ return NULL;
+}
+
+static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
+{
+ struct pci_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
+ if (dr)
+ return dr;
+
+ new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
+ if (!new_dr)
+ return NULL;
+ return devres_get(&pdev->dev, new_dr, NULL, NULL);
+}
+
+/**
+ * pcim_enable_device - Managed pci_enable_device()
+ * @pdev: PCI device to be initialized
+ *
+ * Managed pci_enable_device().
+ */
+int pcim_enable_device(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+ int rc;
+
+ dr = get_pci_dr(pdev);
+ if (unlikely(!dr))
+ return -ENOMEM;
+ if (dr->enabled)
+ return 0;
+
+ rc = pci_enable_device(pdev);
+ if (!rc) {
+ pdev->is_managed = 1;
+ dr->enabled = 1;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(pcim_enable_device);
+
+/**
+ * pcim_pin_device - Pin managed PCI device
+ * @pdev: PCI device to pin
+ *
+ * Pin managed PCI device @pdev. Pinned device won't be disabled on
+ * driver detach. @pdev must have been enabled with
+ * pcim_enable_device().
+ */
+void pcim_pin_device(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+
+ dr = find_pci_dr(pdev);
+ WARN_ON(!dr || !dr->enabled);
+ if (dr)
+ dr->pinned = 1;
+}
+EXPORT_SYMBOL(pcim_pin_device);
+
+static void pcim_iomap_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = to_pci_dev(gendev);
+ struct pcim_iomap_devres *this = res;
+ int i;
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (this->table[i])
+ pci_iounmap(dev, this->table[i]);
+}
+
+/**
+ * pcim_iomap_table - access iomap allocation table
+ * @pdev: PCI device to access iomap table for
+ *
+ * Access iomap allocation table for @dev. If iomap table doesn't
+ * exist and @pdev is managed, it will be allocated. All iomaps
+ * recorded in the iomap table are automatically unmapped on driver
+ * detach.
+ *
+ * This function might sleep when the table is first allocated but can
+ * be safely called without context and guaranteed to succeed once
+ * allocated.
+ */
+void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
+{
+ struct pcim_iomap_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
+ if (dr)
+ return dr->table;
+
+ new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!new_dr)
+ return NULL;
+ dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
+ return dr->table;
+}
+EXPORT_SYMBOL(pcim_iomap_table);
+
+/**
+ * pcim_iomap - Managed pcim_iomap()
+ * @pdev: PCI device to iomap for
+ * @bar: BAR to iomap
+ * @maxlen: Maximum length of iomap
+ *
+ * Managed pci_iomap(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+{
+ void __iomem **tbl;
+
+ BUG_ON(bar >= PCIM_IOMAP_MAX);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
+ return NULL;
+
+ tbl[bar] = pci_iomap(pdev, bar, maxlen);
+ return tbl[bar];
+}
+EXPORT_SYMBOL(pcim_iomap);
+
+/**
+ * pcim_iounmap - Managed pci_iounmap()
+ * @pdev: PCI device to iounmap for
+ * @addr: Address to unmap
+ *
+ * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
+ */
+void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ void __iomem **tbl;
+ int i;
+
+ pci_iounmap(pdev, addr);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ BUG_ON(!tbl);
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (tbl[i] == addr) {
+ tbl[i] = NULL;
+ return;
+ }
+ WARN_ON(1);
+}
+EXPORT_SYMBOL(pcim_iounmap);
+
+/**
+ * pcim_iomap_regions - Request and iomap PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to request and iomap
+ * @name: Name used when requesting regions
+ *
+ * Request and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
+{
+ void __iomem * const *iomap;
+ int i, rc;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ unsigned long len;
+
+ if (!(mask & (1 << i)))
+ continue;
+
+ rc = -EINVAL;
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ goto err_inval;
+
+ rc = pci_request_region(pdev, i, name);
+ if (rc)
+ goto err_inval;
+
+ rc = -ENOMEM;
+ if (!pcim_iomap(pdev, i, 0))
+ goto err_region;
+ }
+
+ return 0;
+
+ err_region:
+ pci_release_region(pdev, i);
+ err_inval:
+ while (--i >= 0) {
+ if (!(mask & (1 << i)))
+ continue;
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions);
+
+/**
+ * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to iomap
+ * @name: Name used when requesting regions
+ *
+ * Request all PCI BARs and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
+ const char *name)
+{
+ int request_mask = ((1 << 6) - 1) & ~mask;
+ int rc;
+
+ rc = pci_request_selected_regions(pdev, request_mask, name);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, mask, name);
+ if (rc)
+ pci_release_selected_regions(pdev, request_mask);
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions_request_all);
+
+/**
+ * pcim_iounmap_regions - Unmap and release PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to unmap and release
+ *
+ * Unmap and release regions specified by @mask.
+ */
+void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
+{
+ void __iomem * const *iomap;
+ int i;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return;
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+}
+EXPORT_SYMBOL(pcim_iounmap_regions);
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 1c3e4ea76b..2c54d80107 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -123,6 +123,22 @@ static const struct pci_epf_mhi_ep_info sm8450_info = {
.flags = MHI_EPF_USE_DMA,
};
+static struct pci_epf_header sa8775p_header = {
+ .vendorid = PCI_VENDOR_ID_QCOM,
+ .deviceid = 0x0306, /* FIXME: Update deviceid for sa8775p EP */
+ .baseclass_code = PCI_CLASS_OTHERS,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static const struct pci_epf_mhi_ep_info sa8775p_info = {
+ .config = &mhi_v1_config,
+ .epf_header = &sa8775p_header,
+ .bar_num = BAR_0,
+ .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
+ .msi_count = 32,
+ .mru = 0x8000,
+};
+
struct pci_epf_mhi {
const struct pci_epc_features *epc_features;
const struct pci_epf_mhi_ep_info *info;
@@ -913,8 +929,9 @@ static int pci_epf_mhi_probe(struct pci_epf *epf,
}
static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
- { .name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
- { .name = "sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
+ { .name = "pci_epf_mhi_sa8775p", .driver_data = (kernel_ulong_t)&sa8775p_info },
+ { .name = "pci_epf_mhi_sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
+ { .name = "pci_epf_mhi_sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
{},
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index 0553946005..e01a98e74d 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -1012,13 +1012,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
epc_features = ntb_epc->epc_features;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
- size = epc_features->bar_fixed_size[barno];
+ size = epc_features->bar[barno].fixed_size;
align = epc_features->align;
peer_ntb_epc = ntb->epc[!type];
peer_epc_features = peer_ntb_epc->epc_features;
peer_barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
- peer_size = peer_epc_features->bar_fixed_size[peer_barno];
+ peer_size = peer_epc_features->bar[peer_barno].fixed_size;
/* Check if epc_features is populated incorrectly */
if ((!IS_ALIGNED(size, align)))
@@ -1067,7 +1067,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
else if (size < ctrl_size + spad_size)
return -EINVAL;
- base = pci_epf_alloc_space(epf, size, barno, align, type);
+ base = pci_epf_alloc_space(epf, size, barno, epc_features, type);
if (!base) {
dev_err(dev, "%s intf: Config/Status/SPAD alloc region fail\n",
pci_epc_interface_string(type));
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 18c80002d3..cd4ffb39dc 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -729,7 +729,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
*/
add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
- if (!!(epc_features->reserved_bar & (1 << bar)))
+ if (epc_features->bar[bar].type == BAR_RESERVED)
continue;
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
@@ -841,14 +841,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
- if (epc_features->bar_fixed_size[test_reg_bar]) {
- if (test_reg_size > bar_size[test_reg_bar])
- return -ENOMEM;
- test_reg_size = bar_size[test_reg_bar];
- }
-
base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
- epc_features->align, PRIMARY_INTERFACE);
+ epc_features, PRIMARY_INTERFACE);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
@@ -862,12 +856,11 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
if (bar == test_reg_bar)
continue;
- if (!!(epc_features->reserved_bar & (1 << bar)))
+ if (epc_features->bar[bar].type == BAR_RESERVED)
continue;
base = pci_epf_alloc_space(epf, bar_size[bar], bar,
- epc_features->align,
- PRIMARY_INTERFACE);
+ epc_features, PRIMARY_INTERFACE);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
@@ -881,16 +874,12 @@ static void pci_epf_configure_bar(struct pci_epf *epf,
const struct pci_epc_features *epc_features)
{
struct pci_epf_bar *epf_bar;
- bool bar_fixed_64bit;
int i;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
epf_bar = &epf->bar[i];
- bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
- if (bar_fixed_64bit)
+ if (epc_features->bar[i].only_64bit)
epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
- if (epc_features->bar_fixed_size[i])
- bar_size[i] = epc_features->bar_fixed_size[i];
}
}
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 5b84821c0d..8e779eecd6 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -422,7 +422,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
epf->func_no,
epf->vfunc_no);
barno = ntb->epf_ntb_bar[BAR_CONFIG];
- size = epc_features->bar_fixed_size[barno];
+ size = epc_features->bar[barno].fixed_size;
align = epc_features->align;
if ((!IS_ALIGNED(size, align)))
@@ -446,7 +446,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
else if (size < ctrl_size + spad_size)
return -EINVAL;
- base = pci_epf_alloc_space(epf, size, barno, align, 0);
+ base = pci_epf_alloc_space(epf, size, barno, epc_features, 0);
if (!base) {
dev_err(dev, "Config/Status/SPAD alloc region fail\n");
return -ENOMEM;
@@ -527,7 +527,6 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
{
const struct pci_epc_features *epc_features;
- u32 align;
struct device *dev = &ntb->epf->dev;
int ret;
struct pci_epf_bar *epf_bar;
@@ -538,19 +537,9 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
epc_features = pci_epc_get_features(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no);
- align = epc_features->align;
-
- if (size < 128)
- size = 128;
-
- if (align)
- size = ALIGN(size, align);
- else
- size = roundup_pow_of_two(size);
-
barno = ntb->epf_ntb_bar[BAR_DB];
- mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0);
+ mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, epc_features, 0);
if (!mw_addr) {
dev_err(dev, "Failed to allocate OB address\n");
return -ENOMEM;
@@ -1269,7 +1258,7 @@ static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "Cannot set DMA mask\n");
- return -EINVAL;
+ return ret;
}
ret = ntb_register_device(&ndev->ntb);
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index dcd4e66430..da3fc0795b 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -87,7 +87,7 @@ EXPORT_SYMBOL_GPL(pci_epc_get);
* @epc_features: pci_epc_features structure that holds the reserved bar bitmap
*
* Invoke to get the first unreserved BAR that can be used by the endpoint
- * function. For any incorrect value in reserved_bar return '0'.
+ * function.
*/
enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
@@ -102,32 +102,27 @@ EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
* @bar: the starting BAR number from where unreserved BAR should be searched
*
* Invoke to get the next unreserved BAR starting from @bar that can be used
- * for endpoint function. For any incorrect value in reserved_bar return '0'.
+ * for endpoint function.
*/
enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
*epc_features, enum pci_barno bar)
{
- unsigned long free_bar;
+ int i;
if (!epc_features)
return BAR_0;
/* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
- if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
+ if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
bar++;
- /* Find if the reserved BAR is also a 64-bit BAR */
- free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
-
- /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
- free_bar <<= 1;
- free_bar |= epc_features->reserved_bar;
-
- free_bar = find_next_zero_bit(&free_bar, 6, bar);
- if (free_bar > 5)
- return NO_BAR;
+ for (i = bar; i < PCI_STD_NUM_BARS; i++) {
+ /* If the BAR is not reserved, return it. */
+ if (epc_features->bar[i].type != BAR_RESERVED)
+ return i;
+ }
- return free_bar;
+ return NO_BAR;
}
EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 2c32de6679..0a28a0b091 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -17,7 +17,7 @@
static DEFINE_MUTEX(pci_epf_mutex);
-static struct bus_type pci_epf_bus_type;
+static const struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
/**
@@ -251,14 +251,17 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* @epf: the EPF device to whom allocate the memory
* @size: the size of the memory that has to be allocated
* @bar: the BAR number corresponding to the allocated register space
- * @align: alignment size for the allocation region
+ * @epc_features: the features provided by the EPC specific to this EPF
* @type: Identifies if the allocation is for primary EPC or secondary EPC
*
* Invoke to allocate memory for the PCI EPF register space.
*/
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
- size_t align, enum pci_epc_interface_type type)
+ const struct pci_epc_features *epc_features,
+ enum pci_epc_interface_type type)
{
+ u64 bar_fixed_size = epc_features->bar[bar].fixed_size;
+ size_t align = epc_features->align;
struct pci_epf_bar *epf_bar;
dma_addr_t phys_addr;
struct pci_epc *epc;
@@ -268,6 +271,15 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
if (size < 128)
size = 128;
+ if (epc_features->bar[bar].type == BAR_FIXED && bar_fixed_size) {
+ if (size > bar_fixed_size) {
+ dev_err(&epf->dev,
+ "requested BAR size is larger than fixed size\n");
+ return NULL;
+ }
+ size = bar_fixed_size;
+ }
+
if (align)
size = ALIGN(size, align);
else
@@ -507,7 +519,7 @@ static void pci_epf_device_remove(struct device *dev)
epf->driver = NULL;
}
-static struct bus_type pci_epf_bus_type = {
+static const struct bus_type pci_epf_bus_type = {
.name = "pci-epf",
.match = pci_epf_device_match,
.probe = pci_epf_device_probe,
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index a89b7de72d..7333b305f2 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -26,58 +26,79 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
hotplug_slot);
int rc;
- if (zdev->state != ZPCI_FN_STATE_STANDBY)
- return -EIO;
+ mutex_lock(&zdev->state_lock);
+ if (zdev->state != ZPCI_FN_STATE_STANDBY) {
+ rc = -EIO;
+ goto out;
+ }
rc = sclp_pci_configure(zdev->fid);
zpci_dbg(3, "conf fid:%x, rc:%d\n", zdev->fid, rc);
if (rc)
- return rc;
+ goto out;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
- return zpci_scan_configured_device(zdev, zdev->fh);
+ rc = zpci_scan_configured_device(zdev, zdev->fh);
+out:
+ mutex_unlock(&zdev->state_lock);
+ return rc;
}
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
- struct pci_dev *pdev;
+ struct pci_dev *pdev = NULL;
+ int rc;
- if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
- return -EIO;
+ mutex_lock(&zdev->state_lock);
+ if (zdev->state != ZPCI_FN_STATE_CONFIGURED) {
+ rc = -EIO;
+ goto out;
+ }
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
if (pdev && pci_num_vf(pdev)) {
pci_dev_put(pdev);
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
- pci_dev_put(pdev);
- return zpci_deconfigure_device(zdev);
+ rc = zpci_deconfigure_device(zdev);
+out:
+ mutex_unlock(&zdev->state_lock);
+ if (pdev)
+ pci_dev_put(pdev);
+ return rc;
}
static int reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
{
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
+ int rc = -EIO;
- if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
- return -EIO;
/*
- * We can't take the zdev->lock as reset_slot may be called during
- * probing and/or device removal which already happens under the
- * zdev->lock. Instead the user should use the higher level
- * pci_reset_function() or pci_bus_reset() which hold the PCI device
- * lock preventing concurrent removal. If not using these functions
- * holding the PCI device lock is required.
+ * If we can't get the zdev->state_lock the device state is
+ * currently undergoing a transition and we bail out - just
+ * the same as if the device's state is not configured at all.
*/
+ if (!mutex_trylock(&zdev->state_lock))
+ return rc;
- /* As long as the function is configured we can reset */
- if (probe)
- return 0;
+ /* We can reset only if the function is configured */
+ if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
+ goto out;
+
+ if (probe) {
+ rc = 0;
+ goto out;
+ }
- return zpci_hot_reset_device(zdev);
+ rc = zpci_hot_reset_device(zdev);
+out:
+ mutex_unlock(&zdev->state_lock);
+ return rc;
}
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
new file mode 100644
index 0000000000..c9725428e3
--- /dev/null
+++ b/drivers/pci/iomap.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implement the default iomap interfaces
+ *
+ * (C) Copyright 2004 Linus Torvalds
+ */
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <linux/export.h>
+
+/**
+ * pci_iomap_range - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_range(struct pci_dev *dev,
+ int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (len <= offset || !start)
+ return NULL;
+ len -= offset;
+ start += offset;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return __pci_ioport_map(dev, start, len);
+ if (flags & IORESOURCE_MEM)
+ return ioremap(start, len);
+ /* What? */
+ return NULL;
+}
+EXPORT_SYMBOL(pci_iomap_range);
+
+/**
+ * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
+ int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+
+ if (flags & IORESOURCE_IO)
+ return NULL;
+
+ if (len <= offset || !start)
+ return NULL;
+
+ len -= offset;
+ start += offset;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+
+ if (flags & IORESOURCE_MEM)
+ return ioremap_wc(start, len);
+
+ /* What? */
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
+
+/**
+ * pci_iomap - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL(pci_iomap);
+
+/**
+ * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_wc_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+ uintptr_t start = (uintptr_t) PCI_IOBASE;
+ uintptr_t addr = (uintptr_t) p;
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+#endif
+ iounmap(p);
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index 0050e8f681..4555630be9 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -8,9 +8,13 @@
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/errno.h>
#include <linux/export.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
+#include "pci.h"
+
/**
* pci_request_irq - allocate an interrupt line for a PCI device
* @dev: PCI device to operate on
@@ -74,3 +78,203 @@ void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id)
kfree(free_irq(pci_irq_vector(dev, nr), dev_id));
}
EXPORT_SYMBOL(pci_free_irq);
+
+/**
+ * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
+ * @dev: the PCI device
+ * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
+ *
+ * Perform INTx swizzling for a device behind one level of bridge. This is
+ * required by section 9.1 of the PCI-to-PCI bridge specification for devices
+ * behind bridges on add-in cards. For devices with ARI enabled, the slot
+ * number is always 0 (see the Implementation Note in section 2.2.8.1 of
+ * the PCI Express Base Specification, Revision 2.1)
+ */
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
+{
+ int slot;
+
+ if (pci_ari_enabled(dev->bus))
+ slot = 0;
+ else
+ slot = PCI_SLOT(dev->devfn);
+
+ return (((pin - 1) + slot) % 4) + 1;
+}
+
+int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
+{
+ u8 pin;
+
+ pin = dev->pin;
+ if (!pin)
+ return -1;
+
+ while (!pci_is_root_bus(dev->bus)) {
+ pin = pci_swizzle_interrupt_pin(dev, pin);
+ dev = dev->bus->self;
+ }
+ *bridge = dev;
+ return pin;
+}
+
+/**
+ * pci_common_swizzle - swizzle INTx all the way to root bridge
+ * @dev: the PCI device
+ * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
+ *
+ * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
+ * bridges all the way up to a PCI root bus.
+ */
+u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ u8 pin = *pinp;
+
+ while (!pci_is_root_bus(dev->bus)) {
+ pin = pci_swizzle_interrupt_pin(dev, pin);
+ dev = dev->bus->self;
+ }
+ *pinp = pin;
+ return PCI_SLOT(dev->devfn);
+}
+EXPORT_SYMBOL_GPL(pci_common_swizzle);
+
+void pci_assign_irq(struct pci_dev *dev)
+{
+ u8 pin;
+ u8 slot = -1;
+ int irq = 0;
+ struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus);
+
+ if (!(hbrg->map_irq)) {
+ pci_dbg(dev, "runtime IRQ mapping not provided by arch\n");
+ return;
+ }
+
+ /*
+ * If this device is not on the primary bus, we need to figure out
+ * which interrupt pin it will come in on. We know which slot it
+ * will come in on because that slot is where the bridge is. Each
+ * time the interrupt line passes through a PCI-PCI bridge we must
+ * apply the swizzle function.
+ */
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ /* Cope with illegal. */
+ if (pin > 4)
+ pin = 1;
+
+ if (pin) {
+ /* Follow the chain of bridges, swizzling as we go. */
+ if (hbrg->swizzle_irq)
+ slot = (*(hbrg->swizzle_irq))(dev, &pin);
+
+ /*
+ * If a swizzling function is not used, map_irq() must
+ * ignore slot.
+ */
+ irq = (*(hbrg->map_irq))(dev, slot, pin);
+ if (irq == -1)
+ irq = 0;
+ }
+ dev->irq = irq;
+
+ pci_dbg(dev, "assign IRQ: got %d\n", dev->irq);
+
+ /*
+ * Always tell the device, so the driver knows what is the real IRQ
+ * to use; the device does not use it.
+ */
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
+{
+ struct pci_bus *bus = dev->bus;
+ bool mask_updated = true;
+ u32 cmd_status_dword;
+ u16 origcmd, newcmd;
+ unsigned long flags;
+ bool irq_pending;
+
+ /*
+ * We do a single dword read to retrieve both command and status.
+ * Document assumptions that make this possible.
+ */
+ BUILD_BUG_ON(PCI_COMMAND % 4);
+ BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+
+ bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
+
+ irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
+
+ /*
+ * Check interrupt status register to see whether our device
+ * triggered the interrupt (when masking) or the next IRQ is
+ * already pending (when unmasking).
+ */
+ if (mask != irq_pending) {
+ mask_updated = false;
+ goto done;
+ }
+
+ origcmd = cmd_status_dword;
+ newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
+ if (mask)
+ newcmd |= PCI_COMMAND_INTX_DISABLE;
+ if (newcmd != origcmd)
+ bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
+
+done:
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ return mask_updated;
+}
+
+/**
+ * pci_check_and_mask_intx - mask INTx on pending interrupt
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, mask it and return
+ * true in that case. False is returned if no interrupt was pending.
+ */
+bool pci_check_and_mask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, true);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
+
+/**
+ * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, unmask it if not and
+ * return true. False is returned and the mask remains active if there was
+ * still an interrupt pending.
+ */
+bool pci_check_and_unmask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, false);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
+
+/**
+ * pcibios_penalize_isa_irq - penalize an ISA IRQ
+ * @irq: ISA IRQ to penalize
+ * @active: IRQ active or not
+ *
+ * Permits the platform to provide architecture-specific functionality when
+ * penalizing ISA IRQs. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+void __weak pcibios_penalize_isa_irq(int irq, int active) {}
+
+int __weak pcibios_alloc_irq(struct pci_dev *dev)
+{
+ return 0;
+}
+
+void __weak pcibios_free_irq(struct pci_dev *dev)
+{
+}
diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c
index 4504039056..8da3347a95 100644
--- a/drivers/pci/mmap.c
+++ b/drivers/pci/mmap.c
@@ -11,6 +11,8 @@
#include <linux/mm.h>
#include <linux/pci.h>
+#include "pci.h"
+
#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
static const struct vm_operations_struct pci_phys_vm_ops = {
@@ -50,3 +52,30 @@ int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
}
#endif
+
+#if (defined(CONFIG_SYSFS) || defined(CONFIG_PROC_FS)) && \
+ (defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE))
+
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
+ enum pci_mmap_api mmap_api)
+{
+ resource_size_t pci_start = 0, pci_end;
+ unsigned long nr, start, size;
+
+ if (pci_resource_len(pdev, resno) == 0)
+ return 0;
+ nr = vma_pages(vma);
+ start = vma->vm_pgoff;
+ size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
+ if (mmap_api == PCI_MMAP_PROCFS) {
+ pci_resource_to_user(pdev, resno, &pdev->resource[resno],
+ &pci_start, &pci_end);
+ pci_start >>= PAGE_SHIFT;
+ }
+ if (start >= pci_start && start < pci_start + size &&
+ start + nr <= pci_start + size)
+ return 1;
+ return 0;
+}
+
+#endif
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
index c2c7334152..03539e5053 100644
--- a/drivers/pci/of_property.c
+++ b/drivers/pci/of_property.c
@@ -238,6 +238,8 @@ static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs,
return 0;
int_map = kcalloc(map_sz, sizeof(u32), GFP_KERNEL);
+ if (!int_map)
+ return -ENOMEM;
mapp = int_map;
list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 9c59bf03d6..af2996d0d1 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -419,15 +419,6 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
return error;
}
-int __weak pcibios_alloc_irq(struct pci_dev *dev)
-{
- return 0;
-}
-
-void __weak pcibios_free_irq(struct pci_dev *dev)
-{
-}
-
#ifdef CONFIG_PCI_IOV
static inline bool pci_device_can_probe(struct pci_dev *pdev)
{
@@ -1389,10 +1380,7 @@ static int pci_pm_runtime_idle(struct device *dev)
if (!pci_dev->driver)
return 0;
- if (!pm)
- return -ENOSYS;
-
- if (pm->runtime_idle)
+ if (pm && pm->runtime_idle)
return pm->runtime_idle(dev);
return 0;
@@ -1721,7 +1709,7 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
return 1;
}
-struct bus_type pcie_port_bus_type = {
+const struct bus_type pcie_port_bus_type = {
.name = "pci_express",
.match = pcie_port_bus_match,
};
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 2321fdfefd..40cfa71639 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1022,29 +1022,6 @@ void pci_remove_legacy_files(struct pci_bus *b)
#endif /* HAVE_PCI_LEGACY */
#if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
-
-int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
- enum pci_mmap_api mmap_api)
-{
- unsigned long nr, start, size;
- resource_size_t pci_start = 0, pci_end;
-
- if (pci_resource_len(pdev, resno) == 0)
- return 0;
- nr = vma_pages(vma);
- start = vma->vm_pgoff;
- size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
- if (mmap_api == PCI_MMAP_PROCFS) {
- pci_resource_to_user(pdev, resno, &pdev->resource[resno],
- &pci_start, &pci_end);
- pci_start >>= PAGE_SHIFT;
- }
- if (start >= pci_start && start < pci_start + size &&
- start + nr <= pci_start + size)
- return 1;
- return 0;
-}
-
/**
* pci_mmap_resource - map a PCI resource into user memory space
* @kobj: kobject for mapping
@@ -1410,79 +1387,89 @@ static const struct attribute_group pci_dev_reset_attr_group = {
.is_visible = pci_dev_reset_attr_is_visible,
};
+static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t ret;
+
+ pci_config_pm_runtime_get(pdev);
+
+ ret = sysfs_emit(buf, "%016llx\n",
+ (u64)pci_rebar_get_possible_sizes(pdev, n));
+
+ pci_config_pm_runtime_put(pdev);
+
+ return ret;
+}
+
+static ssize_t __resource_resize_store(struct device *dev, int n,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long size, flags;
+ int ret, i;
+ u16 cmd;
+
+ if (kstrtoul(buf, 0, &size) < 0)
+ return -EINVAL;
+
+ device_lock(dev);
+ if (dev->driver) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ pci_config_pm_runtime_get(pdev);
+
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
+ ret = aperture_remove_conflicting_pci_devices(pdev,
+ "resourceN_resize");
+ if (ret)
+ goto pm_put;
+ }
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_MEMORY);
+
+ flags = pci_resource_flags(pdev, n);
+
+ pci_remove_resource_files(pdev);
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) &&
+ pci_resource_flags(pdev, i) == flags)
+ pci_release_resource(pdev, i);
+ }
+
+ ret = pci_resize_resource(pdev, n, size);
+
+ pci_assign_unassigned_bus_resources(pdev->bus);
+
+ if (pci_create_resource_files(pdev))
+ pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");
+
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+pm_put:
+ pci_config_pm_runtime_put(pdev);
+unlock:
+ device_unlock(dev);
+
+ return ret ? ret : count;
+}
+
#define pci_dev_resource_resize_attr(n) \
static ssize_t resource##n##_resize_show(struct device *dev, \
struct device_attribute *attr, \
- char * buf) \
+ char *buf) \
{ \
- struct pci_dev *pdev = to_pci_dev(dev); \
- ssize_t ret; \
- \
- pci_config_pm_runtime_get(pdev); \
- \
- ret = sysfs_emit(buf, "%016llx\n", \
- (u64)pci_rebar_get_possible_sizes(pdev, n)); \
- \
- pci_config_pm_runtime_put(pdev); \
- \
- return ret; \
+ return __resource_resize_show(dev, n, buf); \
} \
- \
static ssize_t resource##n##_resize_store(struct device *dev, \
struct device_attribute *attr,\
const char *buf, size_t count)\
{ \
- struct pci_dev *pdev = to_pci_dev(dev); \
- unsigned long size, flags; \
- int ret, i; \
- u16 cmd; \
- \
- if (kstrtoul(buf, 0, &size) < 0) \
- return -EINVAL; \
- \
- device_lock(dev); \
- if (dev->driver) { \
- ret = -EBUSY; \
- goto unlock; \
- } \
- \
- pci_config_pm_runtime_get(pdev); \
- \
- if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \
- ret = aperture_remove_conflicting_pci_devices(pdev, \
- "resourceN_resize"); \
- if (ret) \
- goto pm_put; \
- } \
- \
- pci_read_config_word(pdev, PCI_COMMAND, &cmd); \
- pci_write_config_word(pdev, PCI_COMMAND, \
- cmd & ~PCI_COMMAND_MEMORY); \
- \
- flags = pci_resource_flags(pdev, n); \
- \
- pci_remove_resource_files(pdev); \
- \
- for (i = 0; i < PCI_STD_NUM_BARS; i++) { \
- if (pci_resource_len(pdev, i) && \
- pci_resource_flags(pdev, i) == flags) \
- pci_release_resource(pdev, i); \
- } \
- \
- ret = pci_resize_resource(pdev, n, size); \
- \
- pci_assign_unassigned_bus_resources(pdev->bus); \
- \
- if (pci_create_resource_files(pdev)) \
- pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\
- \
- pci_write_config_word(pdev, PCI_COMMAND, cmd); \
-pm_put: \
- pci_config_pm_runtime_put(pdev); \
-unlock: \
- device_unlock(dev); \
- \
- return ret ? ret : count; \
+ return __resource_resize_store(dev, n, buf, count); \
} \
static DEVICE_ATTR_RW(resource##n##_resize)
@@ -1660,7 +1647,7 @@ static const struct attribute_group pcie_dev_attr_group = {
.is_visible = pcie_dev_attrs_are_visible,
};
-static const struct attribute_group *pci_dev_attr_groups[] = {
+const struct attribute_group *pci_dev_attr_groups[] = {
&pci_dev_attr_group,
&pci_dev_hp_attr_group,
#ifdef CONFIG_PCI_IOV
@@ -1677,7 +1664,3 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
#endif
NULL,
};
-
-const struct device_type pci_dev_type = {
- .groups = pci_dev_attr_groups,
-};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c3585229c1..cbbf197df8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -24,7 +24,6 @@
#include <linux/log2.h>
#include <linux/logic_pio.h>
#include <linux/pm_wakeup.h>
-#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
@@ -1068,6 +1067,34 @@ disable_acs_redir:
}
/**
+ * pcie_read_tlp_log - read TLP Header Log
+ * @dev: PCIe device
+ * @where: PCI Config offset of TLP Header Log
+ * @tlp_log: TLP Log structure to fill
+ *
+ * Fill @tlp_log from TLP Header Log registers, e.g., AER or DPC.
+ *
+ * Return: 0 on success and filled TLP Log structure, <0 on error.
+ */
+int pcie_read_tlp_log(struct pci_dev *dev, int where,
+ struct pcie_tlp_log *tlp_log)
+{
+ int i, ret;
+
+ memset(tlp_log, 0, sizeof(*tlp_log));
+
+ for (i = 0; i < 4; i++) {
+ ret = pci_read_config_dword(dev, where + i * 4,
+ &tlp_log->dw[i]);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcie_read_tlp_log);
+
+/**
* pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
@@ -1250,6 +1277,11 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
for (;;) {
u32 id;
+ if (pci_dev_is_disconnected(dev)) {
+ pci_dbg(dev, "disconnected; not waiting\n");
+ return -ENOTTY;
+ }
+
pci_read_config_dword(dev, PCI_COMMAND, &id);
if (!PCI_POSSIBLE_ERROR(id))
break;
@@ -1649,25 +1681,10 @@ static int pci_save_pcie_state(struct pci_dev *dev)
pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
- return 0;
-}
-
-void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
-{
-#ifdef CONFIG_PCIEASPM
- struct pci_dev *bridge;
- u32 ctl;
+ pci_save_aspm_l1ss_state(dev);
+ pci_save_ltr_state(dev);
- bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->ltr_path) {
- pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
- if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
- pci_dbg(bridge, "re-enabling LTR\n");
- pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
- }
- }
-#endif
+ return 0;
}
static void pci_restore_pcie_state(struct pci_dev *dev)
@@ -1676,6 +1693,13 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
struct pci_cap_saved_state *save_state;
u16 *cap;
+ /*
+ * Restore max latencies (in the LTR capability) before enabling
+ * LTR itself in PCI_EXP_DEVCTL2.
+ */
+ pci_restore_ltr_state(dev);
+ pci_restore_aspm_l1ss_state(dev);
+
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
if (!save_state)
return;
@@ -1733,46 +1757,6 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
}
-static void pci_save_ltr_state(struct pci_dev *dev)
-{
- int ltr;
- struct pci_cap_saved_state *save_state;
- u32 *cap;
-
- if (!pci_is_pcie(dev))
- return;
-
- ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
- if (!ltr)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
- if (!save_state) {
- pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
- return;
- }
-
- /* Some broken devices only support dword access to LTR */
- cap = &save_state->cap.data[0];
- pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
-}
-
-static void pci_restore_ltr_state(struct pci_dev *dev)
-{
- struct pci_cap_saved_state *save_state;
- int ltr;
- u32 *cap;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
- ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
- if (!save_state || !ltr)
- return;
-
- /* Some broken devices only support dword access to LTR */
- cap = &save_state->cap.data[0];
- pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
-}
-
/**
* pci_save_state - save the PCI configuration space of a device before
* suspending
@@ -1797,7 +1781,6 @@ int pci_save_state(struct pci_dev *dev)
if (i != 0)
return i;
- pci_save_ltr_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
@@ -1898,12 +1881,6 @@ void pci_restore_state(struct pci_dev *dev)
if (!dev->state_saved)
return;
- /*
- * Restore max latencies (in the LTR capability) before enabling
- * LTR itself (in the PCIe capability).
- */
- pci_restore_ltr_state(dev);
-
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
pci_restore_pri_state(dev);
@@ -2184,107 +2161,6 @@ int pci_enable_device(struct pci_dev *dev)
EXPORT_SYMBOL(pci_enable_device);
/*
- * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
- * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
- * there's no need to track it separately. pci_devres is initialized
- * when a device is enabled using managed PCI device enable interface.
- */
-struct pci_devres {
- unsigned int enabled:1;
- unsigned int pinned:1;
- unsigned int orig_intx:1;
- unsigned int restore_intx:1;
- unsigned int mwi:1;
- u32 region_mask;
-};
-
-static void pcim_release(struct device *gendev, void *res)
-{
- struct pci_dev *dev = to_pci_dev(gendev);
- struct pci_devres *this = res;
- int i;
-
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
- if (this->region_mask & (1 << i))
- pci_release_region(dev, i);
-
- if (this->mwi)
- pci_clear_mwi(dev);
-
- if (this->restore_intx)
- pci_intx(dev, this->orig_intx);
-
- if (this->enabled && !this->pinned)
- pci_disable_device(dev);
-}
-
-static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
-{
- struct pci_devres *dr, *new_dr;
-
- dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
- if (dr)
- return dr;
-
- new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
- if (!new_dr)
- return NULL;
- return devres_get(&pdev->dev, new_dr, NULL, NULL);
-}
-
-static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
-{
- if (pci_is_managed(pdev))
- return devres_find(&pdev->dev, pcim_release, NULL, NULL);
- return NULL;
-}
-
-/**
- * pcim_enable_device - Managed pci_enable_device()
- * @pdev: PCI device to be initialized
- *
- * Managed pci_enable_device().
- */
-int pcim_enable_device(struct pci_dev *pdev)
-{
- struct pci_devres *dr;
- int rc;
-
- dr = get_pci_dr(pdev);
- if (unlikely(!dr))
- return -ENOMEM;
- if (dr->enabled)
- return 0;
-
- rc = pci_enable_device(pdev);
- if (!rc) {
- pdev->is_managed = 1;
- dr->enabled = 1;
- }
- return rc;
-}
-EXPORT_SYMBOL(pcim_enable_device);
-
-/**
- * pcim_pin_device - Pin managed PCI device
- * @pdev: PCI device to pin
- *
- * Pin managed PCI device @pdev. Pinned device won't be disabled on
- * driver detach. @pdev must have been enabled with
- * pcim_enable_device().
- */
-void pcim_pin_device(struct pci_dev *pdev)
-{
- struct pci_devres *dr;
-
- dr = find_pci_dr(pdev);
- WARN_ON(!dr || !dr->enabled);
- if (dr)
- dr->pinned = 1;
-}
-EXPORT_SYMBOL(pcim_pin_device);
-
-/*
* pcibios_device_add - provide arch specific hooks when adding device dev
* @dev: the PCI device being added
*
@@ -2318,17 +2194,6 @@ void __weak pcibios_release_device(struct pci_dev *dev) {}
*/
void __weak pcibios_disable_device(struct pci_dev *dev) {}
-/**
- * pcibios_penalize_isa_irq - penalize an ISA IRQ
- * @irq: ISA IRQ to penalize
- * @active: IRQ active or not
- *
- * Permits the platform to provide architecture-specific functionality when
- * penalizing ISA IRQs. This is the default implementation. Architecture
- * implementations can override this.
- */
-void __weak pcibios_penalize_isa_irq(int irq, int active) {}
-
static void do_pci_disable_device(struct pci_dev *dev)
{
u16 pci_command;
@@ -2532,7 +2397,7 @@ static void pci_pme_list_scan(struct work_struct *work)
* course of the call.
*/
if (bdev) {
- bref = pm_runtime_get_if_active(bdev, true);
+ bref = pm_runtime_get_if_active(bdev);
if (!bref)
continue;
@@ -3102,6 +2967,18 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
},
},
+ {
+ /*
+ * Changing power state of root port dGPU is connected fails
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
+ */
+ .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_NAME, "1972"),
+ DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
+ },
+ },
#endif
{ }
};
@@ -3998,66 +3875,6 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
/**
- * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
- * @dev: the PCI device
- * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
- *
- * Perform INTx swizzling for a device behind one level of bridge. This is
- * required by section 9.1 of the PCI-to-PCI bridge specification for devices
- * behind bridges on add-in cards. For devices with ARI enabled, the slot
- * number is always 0 (see the Implementation Note in section 2.2.8.1 of
- * the PCI Express Base Specification, Revision 2.1)
- */
-u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
-{
- int slot;
-
- if (pci_ari_enabled(dev->bus))
- slot = 0;
- else
- slot = PCI_SLOT(dev->devfn);
-
- return (((pin - 1) + slot) % 4) + 1;
-}
-
-int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
-{
- u8 pin;
-
- pin = dev->pin;
- if (!pin)
- return -1;
-
- while (!pci_is_root_bus(dev->bus)) {
- pin = pci_swizzle_interrupt_pin(dev, pin);
- dev = dev->bus->self;
- }
- *bridge = dev;
- return pin;
-}
-
-/**
- * pci_common_swizzle - swizzle INTx all the way to root bridge
- * @dev: the PCI device
- * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
- *
- * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
- * bridges all the way up to a PCI root bus.
- */
-u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
-{
- u8 pin = *pinp;
-
- while (!pci_is_root_bus(dev->bus)) {
- pin = pci_swizzle_interrupt_pin(dev, pin);
- dev = dev->bus->self;
- }
- *pinp = pin;
- return PCI_SLOT(dev->devfn);
-}
-EXPORT_SYMBOL_GPL(pci_common_swizzle);
-
-/**
* pci_release_region - Release a PCI bar
* @pdev: PCI device whose resources were previously reserved by
* pci_request_region()
@@ -4353,8 +4170,8 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
if (res->end > IO_SPACE_LIMIT)
return -EINVAL;
- return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
- pgprot_device(PAGE_KERNEL));
+ return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+ pgprot_device(PAGE_KERNEL));
#else
/*
* This architecture does not have memory mapped I/O space,
@@ -4385,133 +4202,6 @@ void pci_unmap_iospace(struct resource *res)
}
EXPORT_SYMBOL(pci_unmap_iospace);
-static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
-{
- struct resource **res = ptr;
-
- pci_unmap_iospace(*res);
-}
-
-/**
- * devm_pci_remap_iospace - Managed pci_remap_iospace()
- * @dev: Generic device to remap IO address for
- * @res: Resource describing the I/O space
- * @phys_addr: physical address of range to be mapped
- *
- * Managed pci_remap_iospace(). Map is automatically unmapped on driver
- * detach.
- */
-int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
- phys_addr_t phys_addr)
-{
- const struct resource **ptr;
- int error;
-
- ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- error = pci_remap_iospace(res, phys_addr);
- if (error) {
- devres_free(ptr);
- } else {
- *ptr = res;
- devres_add(dev, ptr);
- }
-
- return error;
-}
-EXPORT_SYMBOL(devm_pci_remap_iospace);
-
-/**
- * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
- * @dev: Generic device to remap IO address for
- * @offset: Resource address to map
- * @size: Size of map
- *
- * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
- * detach.
- */
-void __iomem *devm_pci_remap_cfgspace(struct device *dev,
- resource_size_t offset,
- resource_size_t size)
-{
- void __iomem **ptr, *addr;
-
- ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- addr = pci_remap_cfgspace(offset, size);
- if (addr) {
- *ptr = addr;
- devres_add(dev, ptr);
- } else
- devres_free(ptr);
-
- return addr;
-}
-EXPORT_SYMBOL(devm_pci_remap_cfgspace);
-
-/**
- * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
- * @dev: generic device to handle the resource for
- * @res: configuration space resource to be handled
- *
- * Checks that a resource is a valid memory region, requests the memory
- * region and ioremaps with pci_remap_cfgspace() API that ensures the
- * proper PCI configuration space memory attributes are guaranteed.
- *
- * All operations are managed and will be undone on driver detach.
- *
- * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example::
- *
- * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
- * if (IS_ERR(base))
- * return PTR_ERR(base);
- */
-void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
- struct resource *res)
-{
- resource_size_t size;
- const char *name;
- void __iomem *dest_ptr;
-
- BUG_ON(!dev);
-
- if (!res || resource_type(res) != IORESOURCE_MEM) {
- dev_err(dev, "invalid resource\n");
- return IOMEM_ERR_PTR(-EINVAL);
- }
-
- size = resource_size(res);
-
- if (res->name)
- name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
- res->name);
- else
- name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
- if (!name)
- return IOMEM_ERR_PTR(-ENOMEM);
-
- if (!devm_request_mem_region(dev, res->start, size, name)) {
- dev_err(dev, "can't request region for resource %pR\n", res);
- return IOMEM_ERR_PTR(-EBUSY);
- }
-
- dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
- if (!dest_ptr) {
- dev_err(dev, "ioremap failed for resource %pR\n", res);
- devm_release_mem_region(dev, res->start, size);
- dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
- }
-
- return dest_ptr;
-}
-EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
-
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
@@ -4662,27 +4352,6 @@ int pci_set_mwi(struct pci_dev *dev)
EXPORT_SYMBOL(pci_set_mwi);
/**
- * pcim_set_mwi - a device-managed pci_set_mwi()
- * @dev: the PCI device for which MWI is enabled
- *
- * Managed pci_set_mwi().
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int pcim_set_mwi(struct pci_dev *dev)
-{
- struct pci_devres *dr;
-
- dr = find_pci_dr(dev);
- if (!dr)
- return -ENOMEM;
-
- dr->mwi = 1;
- return pci_set_mwi(dev);
-}
-EXPORT_SYMBOL(pcim_set_mwi);
-
-/**
* pci_try_set_mwi - enables memory-write-invalidate PCI transaction
* @dev: the PCI device for which MWI is enabled
*
@@ -4770,78 +4439,6 @@ void pci_intx(struct pci_dev *pdev, int enable)
}
EXPORT_SYMBOL_GPL(pci_intx);
-static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
-{
- struct pci_bus *bus = dev->bus;
- bool mask_updated = true;
- u32 cmd_status_dword;
- u16 origcmd, newcmd;
- unsigned long flags;
- bool irq_pending;
-
- /*
- * We do a single dword read to retrieve both command and status.
- * Document assumptions that make this possible.
- */
- BUILD_BUG_ON(PCI_COMMAND % 4);
- BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
-
- raw_spin_lock_irqsave(&pci_lock, flags);
-
- bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
-
- irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
-
- /*
- * Check interrupt status register to see whether our device
- * triggered the interrupt (when masking) or the next IRQ is
- * already pending (when unmasking).
- */
- if (mask != irq_pending) {
- mask_updated = false;
- goto done;
- }
-
- origcmd = cmd_status_dword;
- newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
- if (mask)
- newcmd |= PCI_COMMAND_INTX_DISABLE;
- if (newcmd != origcmd)
- bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
-
-done:
- raw_spin_unlock_irqrestore(&pci_lock, flags);
-
- return mask_updated;
-}
-
-/**
- * pci_check_and_mask_intx - mask INTx on pending interrupt
- * @dev: the PCI device to operate on
- *
- * Check if the device dev has its INTx line asserted, mask it and return
- * true in that case. False is returned if no interrupt was pending.
- */
-bool pci_check_and_mask_intx(struct pci_dev *dev)
-{
- return pci_check_and_set_intx_mask(dev, true);
-}
-EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
-
-/**
- * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
- * @dev: the PCI device to operate on
- *
- * Check if the device dev has its INTx line asserted, unmask it if not and
- * return true. False is returned and the mask remains active if there was
- * still an interrupt pending.
- */
-bool pci_check_and_unmask_intx(struct pci_dev *dev)
-{
- return pci_check_and_set_intx_mask(dev, false);
-}
-EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
-
/**
* pci_wait_for_pending_transaction - wait for pending transaction
* @dev: the PCI device to operate on
@@ -5049,7 +4646,7 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
* avoid LTSSM race as recommended in Implementation Note at the
* end of PCIe r6.0.1 sec 7.5.3.7.
*/
- rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+ rc = pcie_wait_for_link_status(pdev, true, false);
if (rc)
return rc;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index bfc56f7bee..17fed18468 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -31,9 +31,6 @@ bool pcie_cap_has_rtctl(const struct pci_dev *dev);
/* Functions internal to the PCI core code */
-int pci_create_sysfs_dev_files(struct pci_dev *pdev);
-void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
-void pci_cleanup_rom(struct pci_dev *dev);
#ifdef CONFIG_DMI
extern const struct attribute_group pci_dev_smbios_attr_group;
#endif
@@ -97,7 +94,6 @@ void pci_msi_init(struct pci_dev *dev);
void pci_msix_init(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
-void pci_bridge_reconfigure_ltr(struct pci_dev *dev);
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type);
static inline void pci_wakeup_event(struct pci_dev *dev)
@@ -152,7 +148,7 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
/* Functions for PCI Hotplug drivers to use */
int pci_hp_add_bridge(struct pci_dev *dev);
-#ifdef HAVE_PCI_LEGACY
+#if defined(CONFIG_SYSFS) && defined(HAVE_PCI_LEGACY)
void pci_create_legacy_files(struct pci_bus *bus);
void pci_remove_legacy_files(struct pci_bus *bus);
#else
@@ -185,10 +181,22 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
return (dev->no_d1d2 || parent_dstates);
}
+
+#ifdef CONFIG_SYSFS
+int pci_create_sysfs_dev_files(struct pci_dev *pdev);
+void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
extern const struct attribute_group *pci_dev_groups[];
+extern const struct attribute_group *pci_dev_attr_groups[];
extern const struct attribute_group *pcibus_groups[];
-extern const struct device_type pci_dev_type;
extern const struct attribute_group *pci_bus_groups[];
+#else
+static inline int pci_create_sysfs_dev_files(struct pci_dev *pdev) { return 0; }
+static inline void pci_remove_sysfs_dev_files(struct pci_dev *pdev) { }
+#define pci_dev_groups NULL
+#define pci_dev_attr_groups NULL
+#define pcibus_groups NULL
+#define pci_bus_groups NULL
+#endif
extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mmio_size;
@@ -404,7 +412,7 @@ struct aer_err_info {
unsigned int status; /* COR/UNCOR Error Status */
unsigned int mask; /* COR/UNCOR Error Mask */
- struct aer_header_log_regs tlp; /* TLP Header */
+ struct pcie_tlp_log tlp; /* TLP Header */
};
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
@@ -563,16 +571,28 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
int pcie_retrain_link(struct pci_dev *pdev, bool use_lt);
+
+/* ASPM-related functionality we need even without CONFIG_PCIEASPM */
+void pci_save_ltr_state(struct pci_dev *dev);
+void pci_restore_ltr_state(struct pci_dev *dev);
+void pci_configure_aspm_l1ss(struct pci_dev *dev);
+void pci_save_aspm_l1ss_state(struct pci_dev *dev);
+void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
+
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+void pci_configure_ltr(struct pci_dev *pdev);
+void pci_bridge_reconfigure_ltr(struct pci_dev *pdev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
+static inline void pci_configure_ltr(struct pci_dev *pdev) { }
+static inline void pci_bridge_reconfigure_ltr(struct pci_dev *pdev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
@@ -793,6 +813,27 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
#endif
/*
+ * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
+ * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
+ * there's no need to track it separately. pci_devres is initialized
+ * when a device is enabled using managed PCI device enable interface.
+ *
+ * TODO: Struct pci_devres and find_pci_dr() only need to be here because
+ * they're used in pci.c. Port or move these functions to devres.c and
+ * then remove them from here.
+ */
+struct pci_devres {
+ unsigned int enabled:1;
+ unsigned int pinned:1;
+ unsigned int orig_intx:1;
+ unsigned int restore_intx:1;
+ unsigned int mwi:1;
+ u32 region_mask;
+};
+
+struct pci_devres *find_pci_dr(struct pci_dev *pdev);
+
+/*
* Config Address for PCI Configuration Mechanism #1
*
* See PCI Local Bus Specification, Revision 3.0,
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 8de4ed5f98..6461aa93fe 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -6,7 +6,7 @@ pcieportdrv-y := portdrv.o rcec.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
-obj-$(CONFIG_PCIEASPM) += aspm.o
+obj-y += aspm.o
obj-$(CONFIG_PCIEAER) += aer.o err.o
obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
obj-$(CONFIG_PCIE_PME) += pme.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 05fc30bb51..ac6293c249 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -664,11 +664,10 @@ static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
}
}
-static void __print_tlp_header(struct pci_dev *dev,
- struct aer_header_log_regs *t)
+static void __print_tlp_header(struct pci_dev *dev, struct pcie_tlp_log *t)
{
pci_err(dev, " TLP Header: %08x %08x %08x %08x\n",
- t->dw0, t->dw1, t->dw2, t->dw3);
+ t->dw[0], t->dw[1], t->dw[2], t->dw[3]);
}
static void __aer_print_error(struct pci_dev *dev,
@@ -1210,7 +1209,7 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
int type = pci_pcie_type(dev);
int aer = dev->aer_cap;
- int temp;
+ u32 aercc;
/* Must reset in this function */
info->status = 0;
@@ -1241,19 +1240,12 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
return 0;
/* Get First Error Pointer */
- pci_read_config_dword(dev, aer + PCI_ERR_CAP, &temp);
- info->first_error = PCI_ERR_CAP_FEP(temp);
+ pci_read_config_dword(dev, aer + PCI_ERR_CAP, &aercc);
+ info->first_error = PCI_ERR_CAP_FEP(aercc);
if (info->status & AER_LOG_TLP_MASKS) {
info->tlp_header_valid = 1;
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
+ pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG, &info->tlp);
}
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index bc0bd86695..47761c7ef2 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -24,6 +24,166 @@
#include "../pci.h"
+void pci_save_ltr_state(struct pci_dev *dev)
+{
+ int ltr;
+ struct pci_cap_saved_state *save_state;
+ u32 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
+ if (!ltr)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
+ if (!save_state) {
+ pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
+ return;
+ }
+
+ /* Some broken devices only support dword access to LTR */
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
+}
+
+void pci_restore_ltr_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ int ltr;
+ u32 *cap;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
+ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
+ if (!save_state || !ltr)
+ return;
+
+ /* Some broken devices only support dword access to LTR */
+ cap = &save_state->cap.data[0];
+ pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
+}
+
+void pci_configure_aspm_l1ss(struct pci_dev *pdev)
+{
+ int rc;
+
+ pdev->l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+
+ rc = pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_L1SS,
+ 2 * sizeof(u32));
+ if (rc)
+ pci_err(pdev, "unable to allocate ASPM L1SS save buffer (%pe)\n",
+ ERR_PTR(rc));
+}
+
+void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
+{
+ struct pci_cap_saved_state *save_state;
+ u16 l1ss = pdev->l1ss;
+ u32 *cap;
+
+ /*
+ * Save L1 substate configuration. The ASPM L0s/L1 configuration
+ * in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
+ */
+ if (!l1ss)
+ return;
+
+ save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
+ return;
+
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, cap++);
+}
+
+void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
+{
+ struct pci_cap_saved_state *pl_save_state, *cl_save_state;
+ struct pci_dev *parent = pdev->bus->self;
+ u32 *cap, pl_ctl1, pl_ctl2, pl_l1_2_enable;
+ u32 cl_ctl1, cl_ctl2, cl_l1_2_enable;
+ u16 clnkctl, plnkctl;
+
+ /*
+ * In case BIOS enabled L1.2 when resuming, we need to disable it first
+ * on the downstream component before the upstream. So, don't attempt to
+ * restore either until we are at the downstream component.
+ */
+ if (pcie_downstream_port(pdev) || !parent)
+ return;
+
+ if (!pdev->l1ss || !parent->l1ss)
+ return;
+
+ cl_save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ pl_save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
+ if (!cl_save_state || !pl_save_state)
+ return;
+
+ cap = &cl_save_state->cap.data[0];
+ cl_ctl2 = *cap++;
+ cl_ctl1 = *cap;
+ cap = &pl_save_state->cap.data[0];
+ pl_ctl2 = *cap++;
+ pl_ctl1 = *cap;
+
+ /* Make sure L0s/L1 are disabled before updating L1SS config */
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &clnkctl);
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &plnkctl);
+ if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
+ FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
+ clnkctl & ~PCI_EXP_LNKCTL_ASPMC);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
+ plnkctl & ~PCI_EXP_LNKCTL_ASPMC);
+ }
+
+ /*
+ * Disable L1.2 on this downstream endpoint device first, followed
+ * by the upstream
+ */
+ pci_clear_and_set_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+
+ /*
+ * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD
+ * in PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
+ * enable bits, even though they're all in PCI_L1SS_CTL1.
+ */
+ pl_l1_2_enable = pl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ pl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
+ cl_l1_2_enable = cl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ cl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
+
+ /* Write back without enables first (above we cleared them in ctl1) */
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, pl_ctl2);
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cl_ctl2);
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, pl_ctl1);
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cl_ctl1);
+
+ /* Then write back the enables */
+ if (pl_l1_2_enable || cl_l1_2_enable) {
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ pl_ctl1 | pl_l1_2_enable);
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
+ cl_ctl1 | cl_l1_2_enable);
+ }
+
+ /* Restore L0s/L1 if they were enabled */
+ if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
+ FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, plnkctl);
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, clnkctl);
+ }
+}
+
+#ifdef CONFIG_PCIEASPM
+
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
@@ -141,16 +301,42 @@ static int policy_to_clkpm_state(struct pcie_link_state *link)
return 0;
}
+static void pci_update_aspm_saved_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u16 *cap, lnkctl, aspm_ctl;
+
+ save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
+ if (!save_state)
+ return;
+
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnkctl);
+
+ /*
+ * Update ASPM and CLKREQ bits of LNKCTL in save_state. We only
+ * write PCI_EXP_LNKCTL_CCC during enumeration, so it shouldn't
+ * change after being captured in save_state.
+ */
+ aspm_ctl = lnkctl & (PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
+ lnkctl &= ~(PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
+
+ /* Depends on pci_save_pcie_state(): cap[1] is LNKCTL */
+ cap = (u16 *)&save_state->cap.data[0];
+ cap[1] = lnkctl | aspm_ctl;
+}
+
static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
{
struct pci_dev *child;
struct pci_bus *linkbus = link->pdev->subordinate;
u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
- list_for_each_entry(child, &linkbus->devices, bus_list)
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_CLKREQ_EN,
val);
+ pci_update_aspm_saved_state(child);
+ }
link->clkpm_enabled = !!enable;
}
@@ -769,6 +955,12 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
pcie_config_aspm_dev(parent, upstream);
link->aspm_enabled = state;
+
+ /* Update latest ASPM configuration in saved context */
+ pci_save_aspm_l1ss_state(link->downstream);
+ pci_update_aspm_saved_state(link->downstream);
+ pci_save_aspm_l1ss_state(parent);
+ pci_update_aspm_saved_state(parent);
}
static void pcie_config_aspm_path(struct pcie_link_state *link)
@@ -938,6 +1130,78 @@ out:
up_read(&pci_bus_sem);
}
+void pci_bridge_reconfigure_ltr(struct pci_dev *pdev)
+{
+ struct pci_dev *bridge;
+ u32 ctl;
+
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge && bridge->ltr_path) {
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
+ if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
+ pci_dbg(bridge, "re-enabling LTR\n");
+ pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ }
+ }
+}
+
+void pci_configure_ltr(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
+ struct pci_dev *bridge;
+ u32 cap, ctl;
+
+ if (!pci_is_pcie(pdev))
+ return;
+
+ pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &cap);
+ if (!(cap & PCI_EXP_DEVCAP2_LTR))
+ return;
+
+ pcie_capability_read_dword(pdev, PCI_EXP_DEVCTL2, &ctl);
+ if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
+ pdev->ltr_path = 1;
+ return;
+ }
+
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge && bridge->ltr_path)
+ pdev->ltr_path = 1;
+
+ return;
+ }
+
+ if (!host->native_ltr)
+ return;
+
+ /*
+ * Software must not enable LTR in an Endpoint unless the Root
+ * Complex and all intermediate Switches indicate support for LTR.
+ * PCIe r4.0, sec 6.18.
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ pdev->ltr_path = 1;
+ return;
+ }
+
+ /*
+ * If we're configuring a hot-added device, LTR was likely
+ * disabled in the upstream bridge, so re-enable it before enabling
+ * it in the new device.
+ */
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge && bridge->ltr_path) {
+ pci_bridge_reconfigure_ltr(pdev);
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ pdev->ltr_path = 1;
+ }
+}
+
/* Recheck latencies and update aspm_capable for links under the root */
static void pcie_update_aspm_capable(struct pcie_link_state *root)
{
@@ -1447,3 +1711,5 @@ bool pcie_aspm_support_enabled(void)
{
return aspm_support_enabled;
}
+
+#endif /* CONFIG_PCIEASPM */
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index e5d7c12854..a668820696 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -190,7 +190,8 @@ out:
static void dpc_process_rp_pio_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, dpc_status, first_error;
- u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
+ u32 status, mask, sev, syserr, exc, log, prefix;
+ struct pcie_tlp_log tlp_log;
int i;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
@@ -216,16 +217,9 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
if (pdev->dpc_rp_log_size < 4)
goto clear_status;
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
- &dw0);
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
- &dw1);
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8,
- &dw2);
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
- &dw3);
+ pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG, &tlp_log);
pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
- dw0, dw1, dw2, dw3);
+ tlp_log.dw[0], tlp_log.dw[1], tlp_log.dw[2], tlp_log.dw[3]);
if (pdev->dpc_rp_log_size < 5)
goto clear_status;
@@ -303,10 +297,70 @@ void dpc_process_error(struct pci_dev *pdev)
}
}
+static void pci_clear_surpdn_errors(struct pci_dev *pdev)
+{
+ if (pdev->dpc_rp_extensions)
+ pci_write_config_dword(pdev, pdev->dpc_cap +
+ PCI_EXP_DPC_RP_PIO_STATUS, ~0);
+
+ /*
+ * In practice, Surprise Down errors have been observed to also set
+ * error bits in the Status Register as well as the Fatal Error
+ * Detected bit in the Device Status Register.
+ */
+ pci_write_config_word(pdev, PCI_STATUS, 0xffff);
+
+ pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_FED);
+}
+
+static void dpc_handle_surprise_removal(struct pci_dev *pdev)
+{
+ if (!pcie_wait_for_link(pdev, false)) {
+ pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
+ goto out;
+ }
+
+ if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev))
+ goto out;
+
+ pci_aer_raw_clear_status(pdev);
+ pci_clear_surpdn_errors(pdev);
+
+ pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_TRIGGER);
+
+out:
+ clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
+ wake_up_all(&dpc_completed_waitqueue);
+}
+
+static bool dpc_is_surprise_removal(struct pci_dev *pdev)
+{
+ u16 status;
+
+ if (!pdev->is_hotplug_bridge)
+ return false;
+
+ if (pci_read_config_word(pdev, pdev->aer_cap + PCI_ERR_UNCOR_STATUS,
+ &status))
+ return false;
+
+ return status & PCI_ERR_UNC_SURPDN;
+}
+
static irqreturn_t dpc_handler(int irq, void *context)
{
struct pci_dev *pdev = context;
+ /*
+ * According to PCIe r6.0 sec 6.7.6, errors are an expected side effect
+ * of async removal and should be ignored by software.
+ */
+ if (dpc_is_surprise_removal(pdev)) {
+ dpc_handle_surprise_removal(pdev);
+ return IRQ_HANDLED;
+ }
+
dpc_process_error(pdev);
/* We configure DPC so it only triggers on ERR_FATAL */
diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
index 5f4914d313..e86298dbbc 100644
--- a/drivers/pci/pcie/edr.c
+++ b/drivers/pci/pcie/edr.c
@@ -32,10 +32,10 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
int status = 0;
/*
- * Behavior when calling unsupported _DSM functions is undefined,
- * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ * Per PCI Firmware r3.3, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
+ * optional. Return success if it's not implemented.
*/
- if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
1ULL << EDR_PORT_DPC_ENABLE_DSM))
return 0;
@@ -46,12 +46,7 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
argv4.package.count = 1;
argv4.package.elements = &req;
- /*
- * Per Downstream Port Containment Related Enhancements ECN to PCI
- * Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
- * optional. Return success if it's not implemented.
- */
- obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
EDR_PORT_DPC_ENABLE_DSM, &argv4);
if (!obj)
return 0;
@@ -85,8 +80,9 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
u16 port;
/*
- * Behavior when calling unsupported _DSM functions is undefined,
- * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ * If EDR_PORT_LOCATE_DSM is not implemented under the target of
+ * EDR, the target is the port that experienced the containment
+ * event (PCI Firmware r3.3, sec 4.6.13).
*/
if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
1ULL << EDR_PORT_LOCATE_DSM))
@@ -104,6 +100,16 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
}
/*
+ * Bit 31 represents the success/failure of the operation. If bit
+ * 31 is set, the operation failed.
+ */
+ if (obj->integer.value & BIT(31)) {
+ ACPI_FREE(obj);
+ pci_err(pdev, "Locate Port _DSM failed\n");
+ return NULL;
+ }
+
+ /*
* Firmware returns DPC port BDF details in following format:
* 15:8 = bus
* 7:3 = device
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 1f3803bde7..12c89ea031 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -96,7 +96,7 @@ struct pcie_port_service_driver {
int pcie_port_service_register(struct pcie_port_service_driver *new);
void pcie_port_service_unregister(struct pcie_port_service_driver *new);
-extern struct bus_type pcie_port_bus_type;
+extern const struct bus_type pcie_port_bus_type;
struct pci_dev;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b7335be560..1325fbae2f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2209,67 +2209,6 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
}
}
-static void pci_configure_ltr(struct pci_dev *dev)
-{
-#ifdef CONFIG_PCIEASPM
- struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
- struct pci_dev *bridge;
- u32 cap, ctl;
-
- if (!pci_is_pcie(dev))
- return;
-
- /* Read L1 PM substate capabilities */
- dev->l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
- if (!(cap & PCI_EXP_DEVCAP2_LTR))
- return;
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl);
- if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
- dev->ltr_path = 1;
- return;
- }
-
- bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->ltr_path)
- dev->ltr_path = 1;
-
- return;
- }
-
- if (!host->native_ltr)
- return;
-
- /*
- * Software must not enable LTR in an Endpoint unless the Root
- * Complex and all intermediate Switches indicate support for LTR.
- * PCIe r4.0, sec 6.18.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
- dev->ltr_path = 1;
- return;
- }
-
- /*
- * If we're configuring a hot-added device, LTR was likely
- * disabled in the upstream bridge, so re-enable it before enabling
- * it in the new device.
- */
- bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->ltr_path) {
- pci_bridge_reconfigure_ltr(dev);
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
- dev->ltr_path = 1;
- }
-#endif
-}
-
static void pci_configure_eetlp_prefix(struct pci_dev *dev)
{
#ifdef CONFIG_PCI_PASID
@@ -2320,6 +2259,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_extended_tags(dev, NULL);
pci_configure_relaxed_ordering(dev);
pci_configure_ltr(dev);
+ pci_configure_aspm_l1ss(dev);
pci_configure_eetlp_prefix(dev);
pci_configure_serr(dev);
@@ -2357,6 +2297,10 @@ static void pci_release_dev(struct device *dev)
kfree(pci_dev);
}
+static const struct device_type pci_dev_type = {
+ .groups = pci_dev_attr_groups,
+};
+
struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
{
struct pci_dev *dev;
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
deleted file mode 100644
index cc7d26b015..0000000000
--- a/drivers/pci/setup-irq.c
+++ /dev/null
@@ -1,64 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support routines for initializing a PCI subsystem
- *
- * Extruded from code written by
- * Dave Rusling (david.rusling@reo.mts.dec.com)
- * David Mosberger (davidm@cs.arizona.edu)
- * David Miller (davem@redhat.com)
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/cache.h>
-#include "pci.h"
-
-void pci_assign_irq(struct pci_dev *dev)
-{
- u8 pin;
- u8 slot = -1;
- int irq = 0;
- struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus);
-
- if (!(hbrg->map_irq)) {
- pci_dbg(dev, "runtime IRQ mapping not provided by arch\n");
- return;
- }
-
- /*
- * If this device is not on the primary bus, we need to figure out
- * which interrupt pin it will come in on. We know which slot it
- * will come in on because that slot is where the bridge is. Each
- * time the interrupt line passes through a PCI-PCI bridge we must
- * apply the swizzle function.
- */
- pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
- /* Cope with illegal. */
- if (pin > 4)
- pin = 1;
-
- if (pin) {
- /* Follow the chain of bridges, swizzling as we go. */
- if (hbrg->swizzle_irq)
- slot = (*(hbrg->swizzle_irq))(dev, &pin);
-
- /*
- * If a swizzling function is not used, map_irq() must
- * ignore slot.
- */
- irq = (*(hbrg->map_irq))(dev, slot, pin);
- if (irq == -1)
- irq = 0;
- }
- dev->irq = irq;
-
- pci_dbg(dev, "assign IRQ: got %d\n", dev->irq);
-
- /*
- * Always tell the device, so the driver knows what is the real IRQ
- * to use; the device does not use it.
- */
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
-}