diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:12 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:12 +0000 |
commit | 8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 (patch) | |
tree | 8d58900dc0ebd4a3011f92c128d2fe45bc7c4bf2 /drivers/pci | |
parent | Adding debian version 6.7.12-1. (diff) | |
download | linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.tar.xz linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.zip |
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/pci')
61 files changed, 1363 insertions, 679 deletions
diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 6554a2e89d..6449056b57 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -598,3 +598,15 @@ int pci_write_config_dword(const struct pci_dev *dev, int where, return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_dword); + +void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, + u32 clear, u32 set) +{ + u32 val; + + pci_read_config_dword(dev, pos, &val); + val &= ~clear; + val |= set; + pci_write_config_dword(dev, pos, val); +} +EXPORT_SYMBOL(pci_clear_and_set_config_dword); diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig index 291d127113..1d5a70c905 100644 --- a/drivers/pci/controller/cadence/Kconfig +++ b/drivers/pci/controller/cadence/Kconfig @@ -47,6 +47,7 @@ config PCI_J721E config PCI_J721E_HOST bool "TI J721E PCIe controller (host mode)" + depends on ARCH_K3 || COMPILE_TEST depends on OF select PCIE_CADENCE_HOST select PCI_J721E @@ -57,6 +58,7 @@ config PCI_J721E_HOST config PCI_J721E_EP bool "TI J721E PCIe controller (endpoint mode)" + depends on ARCH_K3 || COMPILE_TEST depends on OF depends on PCI_ENDPOINT select PCIE_CADENCE_EP diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 2c87e7728a..8571824601 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -42,18 +42,16 @@ enum link_status { }; #define J721E_MODE_RC BIT(7) -#define LANE_COUNT_MASK BIT(8) #define LANE_COUNT(n) ((n) << 8) #define GENERATION_SEL_MASK GENMASK(1, 0) -#define MAX_LANES 2 - struct j721e_pcie { struct cdns_pcie *cdns_pcie; struct clk *refclk; u32 mode; u32 num_lanes; + u32 max_lanes; void __iomem *user_cfg_base; void __iomem *intd_cfg_base; u32 linkdown_irq_regfield; @@ -71,6 +69,7 @@ struct j721e_pcie_data { unsigned int quirk_disable_flr:1; u32 linkdown_irq_regfield; unsigned int byte_access_allowed:1; + unsigned int max_lanes; }; static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset) @@ -206,11 +205,15 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie, { struct device *dev = pcie->cdns_pcie->dev; u32 lanes = pcie->num_lanes; + u32 mask = BIT(8); u32 val = 0; int ret; + if (pcie->max_lanes == 4) + mask = GENMASK(9, 8); + val = LANE_COUNT(lanes - 1); - ret = regmap_update_bits(syscon, offset, LANE_COUNT_MASK, val); + ret = regmap_update_bits(syscon, offset, mask, val); if (ret) dev_err(dev, "failed to set link count\n"); @@ -290,11 +293,13 @@ static const struct j721e_pcie_data j721e_pcie_rc_data = { .quirk_retrain_flag = true, .byte_access_allowed = false, .linkdown_irq_regfield = LINK_DOWN, + .max_lanes = 2, }; static const struct j721e_pcie_data j721e_pcie_ep_data = { .mode = PCI_MODE_EP, .linkdown_irq_regfield = LINK_DOWN, + .max_lanes = 2, }; static const struct j721e_pcie_data j7200_pcie_rc_data = { @@ -302,23 +307,41 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = { .quirk_detect_quiet_flag = true, .linkdown_irq_regfield = J7200_LINK_DOWN, .byte_access_allowed = true, + .max_lanes = 2, }; static const struct j721e_pcie_data j7200_pcie_ep_data = { .mode = PCI_MODE_EP, .quirk_detect_quiet_flag = true, .quirk_disable_flr = true, + .max_lanes = 2, }; static const struct j721e_pcie_data am64_pcie_rc_data = { .mode = PCI_MODE_RC, .linkdown_irq_regfield = J7200_LINK_DOWN, .byte_access_allowed = true, + .max_lanes = 1, }; static const struct j721e_pcie_data am64_pcie_ep_data = { .mode = PCI_MODE_EP, .linkdown_irq_regfield = J7200_LINK_DOWN, + .max_lanes = 1, +}; + +static const struct j721e_pcie_data j784s4_pcie_rc_data = { + .mode = PCI_MODE_RC, + .quirk_retrain_flag = true, + .byte_access_allowed = false, + .linkdown_irq_regfield = LINK_DOWN, + .max_lanes = 4, +}; + +static const struct j721e_pcie_data j784s4_pcie_ep_data = { + .mode = PCI_MODE_EP, + .linkdown_irq_regfield = LINK_DOWN, + .max_lanes = 4, }; static const struct of_device_id of_j721e_pcie_match[] = { @@ -346,6 +369,14 @@ static const struct of_device_id of_j721e_pcie_match[] = { .compatible = "ti,am64-pcie-ep", .data = &am64_pcie_ep_data, }, + { + .compatible = "ti,j784s4-pcie-host", + .data = &j784s4_pcie_rc_data, + }, + { + .compatible = "ti,j784s4-pcie-ep", + .data = &j784s4_pcie_ep_data, + }, {}, }; @@ -432,9 +463,13 @@ static int j721e_pcie_probe(struct platform_device *pdev) pcie->user_cfg_base = base; ret = of_property_read_u32(node, "num-lanes", &num_lanes); - if (ret || num_lanes > MAX_LANES) + if (ret || num_lanes > data->max_lanes) { + dev_warn(dev, "num-lanes property not provided or invalid, setting num-lanes to 1\n"); num_lanes = 1; + } + pcie->num_lanes = num_lanes; + pcie->max_lanes = data->max_lanes; if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48))) return -EINVAL; diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 3142feb8ac..2d0a8d78bf 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -360,8 +360,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, writel(0, ep->irq_cpu_addr + offset); } -static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, - u8 intx) +static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, + u8 intx) { u16 cmd; @@ -371,7 +371,7 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, cdns_pcie_ep_assert_intx(ep, fn, intx, true); /* - * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq() + * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq() */ mdelay(1); cdns_pcie_ep_assert_intx(ep, fn, intx, false); @@ -532,25 +532,24 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, } static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; struct device *dev = pcie->dev; switch (type) { - case PCI_EPC_IRQ_LEGACY: + case PCI_IRQ_INTX: if (vfn > 0) { - dev_err(dev, "Cannot raise legacy interrupts for VF\n"); + dev_err(dev, "Cannot raise INTX interrupts for VF\n"); return -EINVAL; } - return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0); + return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, 0); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num); default: diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index 373cb50fcd..03b96798f8 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -347,16 +347,16 @@ struct cdns_pcie_epf { * @max_regions: maximum number of regions supported by hardware * @ob_region_map: bitmask of mapped outbound regions * @ob_addr: base addresses in the AXI bus where the outbound regions start - * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ + * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ * dedicated outbound regions is mapped. * @irq_cpu_addr: base address in the CPU space where a write access triggers - * the sending of a memory write (MSI) / normal message (legacy + * the sending of a memory write (MSI) / normal message (INTX * IRQ) TLP through the PCIe bus. - * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ + * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ * dedicated outbound region. * @irq_pci_fn: the latest PCI function that has updated the mapping of - * the MSI/legacy IRQ dedicated outbound region. - * @irq_pending: bitmask of asserted legacy IRQs. + * the MSI/INTX IRQ dedicated outbound region. + * @irq_pending: bitmask of asserted INTX IRQs. * @lock: spin lock to disable interrupts while modifying PCIe controller * registers fields (RMW) accessible by both remote RC and EP to * minimize time between read and write @@ -374,7 +374,7 @@ struct cdns_pcie_ep { u64 irq_pci_addr; u8 irq_pci_fn; u8 irq_pending; - /* protect writing to PCI_STATUS while raising legacy interrupts */ + /* protect writing to PCI_STATUS while raising INTX interrupts */ spinlock_t lock; struct cdns_pcie_epf *epf; unsigned int quirk_detect_quiet_flag:1; diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 5ac021dbd4..8afacc90c6 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -336,7 +336,7 @@ config PCI_EXYNOS config PCIE_FU740 bool "SiFive FU740 PCIe controller" depends on PCI_MSI - depends on SOC_SIFIVE || COMPILE_TEST + depends on ARCH_SIFIVE || COMPILE_TEST select PCIE_DW_HOST help Say Y here if you want PCIe controller support for the SiFive diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index b445ffe95e..0e40667706 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -371,7 +371,7 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { - .host_init = dra7xx_pcie_host_init, + .init = dra7xx_pcie_host_init, }; static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) @@ -386,7 +386,7 @@ static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) dra7xx_pcie_enable_wrapper_interrupts(dra7xx); } -static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) +static void dra7xx_pcie_raise_intx_irq(struct dra7xx_pcie *dra7xx) { dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); mdelay(1); @@ -404,16 +404,16 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, } static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); switch (type) { - case PCI_EPC_IRQ_LEGACY: - dra7xx_pcie_raise_legacy_irq(dra7xx); + case PCI_IRQ_INTX: + dra7xx_pcie_raise_intx_irq(dra7xx); break; - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); break; default: @@ -436,7 +436,7 @@ dra7xx_pcie_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops pcie_ep_ops = { - .ep_init = dra7xx_pcie_ep_init, + .init = dra7xx_pcie_ep_init, .raise_irq = dra7xx_pcie_raise_irq, .get_features = dra7xx_pcie_get_features, }; diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index c6bede3469..a33fa98a25 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -268,7 +268,7 @@ static int exynos_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops exynos_pcie_host_ops = { - .host_init = exynos_pcie_host_init, + .init = exynos_pcie_host_init, }; static int exynos_add_pcie_port(struct exynos_pcie *ep, @@ -375,7 +375,7 @@ fail_probe: return ret; } -static int exynos_pcie_remove(struct platform_device *pdev) +static void exynos_pcie_remove(struct platform_device *pdev) { struct exynos_pcie *ep = platform_get_drvdata(pdev); @@ -385,8 +385,6 @@ static int exynos_pcie_remove(struct platform_device *pdev) phy_exit(ep->phy); exynos_pcie_deinit_clk_resources(ep); regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies); - - return 0; } static int exynos_pcie_suspend_noirq(struct device *dev) @@ -431,7 +429,7 @@ static const struct of_device_id exynos_pcie_of_match[] = { static struct platform_driver exynos_pcie_driver = { .probe = exynos_pcie_probe, - .remove = exynos_pcie_remove, + .remove_new = exynos_pcie_remove, .driver = { .name = "exynos-pcie", .of_match_table = exynos_pcie_of_match, diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 74703362ae..dc2c036ab2 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -1039,8 +1039,8 @@ static void imx6_pcie_host_exit(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops imx6_pcie_host_ops = { - .host_init = imx6_pcie_host_init, - .host_deinit = imx6_pcie_host_exit, + .init = imx6_pcie_host_init, + .deinit = imx6_pcie_host_exit, }; static const struct dw_pcie_ops dw_pcie_ops = { @@ -1058,17 +1058,16 @@ static void imx6_pcie_ep_init(struct dw_pcie_ep *ep) } static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return dw_pcie_ep_raise_legacy_irq(ep, func_no); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return dw_pcie_ep_raise_intx_irq(ep, func_no); + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); @@ -1093,7 +1092,7 @@ imx6_pcie_ep_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops pcie_ep_ops = { - .ep_init = imx6_pcie_ep_init, + .init = imx6_pcie_ep_init, .raise_irq = imx6_pcie_ep_raise_irq, .get_features = imx6_pcie_ep_get_features, }; diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index cf38365613..c0c62533a3 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -115,8 +115,7 @@ struct keystone_pcie { struct dw_pcie *pci; /* PCI Device ID */ u32 device_id; - int legacy_host_irqs[PCI_NUM_INTX]; - struct device_node *legacy_intc_np; + int intx_host_irqs[PCI_NUM_INTX]; int msi_host_irq; int num_lanes; @@ -124,7 +123,7 @@ struct keystone_pcie { struct phy **phy; struct device_link **link; struct device_node *msi_intc_np; - struct irq_domain *legacy_irq_domain; + struct irq_domain *intx_irq_domain; struct device_node *np; /* Application register space */ @@ -252,8 +251,8 @@ static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp) return dw_pcie_allocate_domains(pp); } -static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, - int offset) +static void ks_pcie_handle_intx_irq(struct keystone_pcie *ks_pcie, + int offset) { struct dw_pcie *pci = ks_pcie->pci; struct device *dev = pci->dev; @@ -263,7 +262,7 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, if (BIT(0) & pending) { dev_dbg(dev, ": irq: irq_offset %d", offset); - generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset); + generic_handle_domain_irq(ks_pcie->intx_irq_domain, offset); } /* EOI the INTx interrupt */ @@ -307,38 +306,37 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) return IRQ_HANDLED; } -static void ks_pcie_ack_legacy_irq(struct irq_data *d) +static void ks_pcie_ack_intx_irq(struct irq_data *d) { } -static void ks_pcie_mask_legacy_irq(struct irq_data *d) +static void ks_pcie_mask_intx_irq(struct irq_data *d) { } -static void ks_pcie_unmask_legacy_irq(struct irq_data *d) +static void ks_pcie_unmask_intx_irq(struct irq_data *d) { } -static struct irq_chip ks_pcie_legacy_irq_chip = { - .name = "Keystone-PCI-Legacy-IRQ", - .irq_ack = ks_pcie_ack_legacy_irq, - .irq_mask = ks_pcie_mask_legacy_irq, - .irq_unmask = ks_pcie_unmask_legacy_irq, +static struct irq_chip ks_pcie_intx_irq_chip = { + .name = "Keystone-PCI-INTX-IRQ", + .irq_ack = ks_pcie_ack_intx_irq, + .irq_mask = ks_pcie_mask_intx_irq, + .irq_unmask = ks_pcie_unmask_intx_irq, }; -static int ks_pcie_init_legacy_irq_map(struct irq_domain *d, - unsigned int irq, - irq_hw_number_t hw_irq) +static int ks_pcie_init_intx_irq_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hw_irq) { - irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip, + irq_set_chip_and_handler(irq, &ks_pcie_intx_irq_chip, handle_level_irq); irq_set_chip_data(irq, d->host_data); return 0; } -static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = { - .map = ks_pcie_init_legacy_irq_map, +static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = { + .map = ks_pcie_init_intx_irq_map, .xlate = irq_domain_xlate_onetwocell, }; @@ -605,22 +603,22 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc) } /** - * ks_pcie_legacy_irq_handler() - Handle legacy interrupt + * ks_pcie_intx_irq_handler() - Handle INTX interrupt * @desc: Pointer to irq descriptor * - * Traverse through pending legacy interrupts and invoke handler for each. Also + * Traverse through pending INTX interrupts and invoke handler for each. Also * takes care of interrupt controller level mask/ack operation. */ -static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) +static void ks_pcie_intx_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); struct dw_pcie *pci = ks_pcie->pci; struct device *dev = pci->dev; - u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; + u32 irq_offset = irq - ks_pcie->intx_host_irqs[0]; struct irq_chip *chip = irq_desc_get_chip(desc); - dev_dbg(dev, ": Handling legacy irq %d\n", irq); + dev_dbg(dev, ": Handling INTX irq %d\n", irq); /* * The chained irq handler installation would have replaced normal @@ -628,7 +626,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) * ack operation. */ chained_irq_enter(chip, desc); - ks_pcie_handle_legacy_irq(ks_pcie, irq_offset); + ks_pcie_handle_intx_irq(ks_pcie, irq_offset); chained_irq_exit(chip, desc); } @@ -686,10 +684,10 @@ err: return ret; } -static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie) +static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie) { struct device *dev = ks_pcie->pci->dev; - struct irq_domain *legacy_irq_domain; + struct irq_domain *intx_irq_domain; struct device_node *np = ks_pcie->np; struct device_node *intc_np; int irq_count, irq, ret = 0, i; @@ -697,7 +695,7 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie) intc_np = of_get_child_by_name(np, "legacy-interrupt-controller"); if (!intc_np) { /* - * Since legacy interrupts are modeled as edge-interrupts in + * Since INTX interrupts are modeled as edge-interrupts in * AM6, keep it disabled for now. */ if (ks_pcie->is_am6) @@ -719,22 +717,21 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie) ret = -EINVAL; goto err; } - ks_pcie->legacy_host_irqs[i] = irq; + ks_pcie->intx_host_irqs[i] = irq; irq_set_chained_handler_and_data(irq, - ks_pcie_legacy_irq_handler, + ks_pcie_intx_irq_handler, ks_pcie); } - legacy_irq_domain = - irq_domain_add_linear(intc_np, PCI_NUM_INTX, - &ks_pcie_legacy_irq_domain_ops, NULL); - if (!legacy_irq_domain) { - dev_err(dev, "Failed to add irq domain for legacy irqs\n"); + intx_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX, + &ks_pcie_intx_irq_domain_ops, NULL); + if (!intx_irq_domain) { + dev_err(dev, "Failed to add irq domain for INTX irqs\n"); ret = -EINVAL; goto err; } - ks_pcie->legacy_irq_domain = legacy_irq_domain; + ks_pcie->intx_irq_domain = intx_irq_domain; for (i = 0; i < PCI_NUM_INTX; i++) ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN); @@ -808,7 +805,7 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp) if (!ks_pcie->is_am6) pp->bridge->child_ops = &ks_child_pcie_ops; - ret = ks_pcie_config_legacy_irq(ks_pcie); + ret = ks_pcie_config_intx_irq(ks_pcie); if (ret) return ret; @@ -838,12 +835,12 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops ks_pcie_host_ops = { - .host_init = ks_pcie_host_init, - .msi_host_init = ks_pcie_msi_host_init, + .init = ks_pcie_host_init, + .msi_init = ks_pcie_msi_host_init, }; static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = { - .host_init = ks_pcie_host_init, + .init = ks_pcie_host_init, }; static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv) @@ -881,7 +878,7 @@ static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags); } -static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie) +static void ks_pcie_am654_raise_intx_irq(struct keystone_pcie *ks_pcie) { struct dw_pcie *pci = ks_pcie->pci; u8 int_pin; @@ -900,20 +897,19 @@ static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie) } static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); switch (type) { - case PCI_EPC_IRQ_LEGACY: - ks_pcie_am654_raise_legacy_irq(ks_pcie); + case PCI_IRQ_INTX: + ks_pcie_am654_raise_intx_irq(ks_pcie); break; - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); break; - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); break; default: @@ -944,7 +940,7 @@ ks_pcie_am654_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = { - .ep_init = ks_pcie_am654_ep_init, + .init = ks_pcie_am654_ep_init, .raise_irq = ks_pcie_am654_raise_irq, .get_features = &ks_pcie_am654_get_features, }; @@ -1311,7 +1307,7 @@ err_link: return ret; } -static int ks_pcie_remove(struct platform_device *pdev) +static void ks_pcie_remove(struct platform_device *pdev) { struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); struct device_link **link = ks_pcie->link; @@ -1323,13 +1319,11 @@ static int ks_pcie_remove(struct platform_device *pdev) ks_pcie_disable_phy(ks_pcie); while (num_lanes--) device_link_del(link[num_lanes]); - - return 0; } static struct platform_driver ks_pcie_driver = { .probe = ks_pcie_probe, - .remove = ks_pcie_remove, + .remove_new = ks_pcie_remove, .driver = { .name = "keystone-pcie", .of_match_table = ks_pcie_of_match, diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index 3d3c50ef4b..2e398494e7 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c @@ -49,7 +49,7 @@ struct ls_pcie_ep { bool big_endian; }; -static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset) +static u32 ls_pcie_pf_lut_readl(struct ls_pcie_ep *pcie, u32 offset) { struct dw_pcie *pci = pcie->pci; @@ -59,7 +59,7 @@ static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset) return ioread32(pci->dbi_base + offset); } -static void ls_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value) +static void ls_pcie_pf_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value) { struct dw_pcie *pci = pcie->pci; @@ -76,8 +76,8 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id) u32 val, cfg; u8 offset; - val = ls_lut_readl(pcie, PEX_PF0_PME_MES_DR); - ls_lut_writel(pcie, PEX_PF0_PME_MES_DR, val); + val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_DR); + ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_DR, val); if (!val) return IRQ_NONE; @@ -96,9 +96,9 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id) dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, pcie->lnkcap); dw_pcie_dbi_ro_wr_dis(pci); - cfg = ls_lut_readl(pcie, PEX_PF0_CONFIG); + cfg = ls_pcie_pf_lut_readl(pcie, PEX_PF0_CONFIG); cfg |= PEX_PF0_CFG_READY; - ls_lut_writel(pcie, PEX_PF0_CONFIG, cfg); + ls_pcie_pf_lut_writel(pcie, PEX_PF0_CONFIG, cfg); dw_pcie_ep_linkup(&pci->ep); dev_dbg(pci->dev, "Link up\n"); @@ -130,10 +130,10 @@ static int ls_pcie_ep_interrupt_init(struct ls_pcie_ep *pcie, } /* Enable interrupts */ - val = ls_lut_readl(pcie, PEX_PF0_PME_MES_IER); + val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_IER); val |= PEX_PF0_PME_MES_IER_LDDIE | PEX_PF0_PME_MES_IER_HRDIE | PEX_PF0_PME_MES_IER_LUDIE; - ls_lut_writel(pcie, PEX_PF0_PME_MES_IER, val); + ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_IER, val); return 0; } @@ -166,16 +166,16 @@ static void ls_pcie_ep_init(struct dw_pcie_ep *ep) } static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return dw_pcie_ep_raise_legacy_irq(ep, func_no); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return dw_pcie_ep_raise_intx_irq(ep, func_no); + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no, interrupt_num); default: @@ -184,8 +184,7 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, } } -static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep, - u8 func_no) +static unsigned int ls_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep, u8 func_no) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci); @@ -195,10 +194,10 @@ static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep, } static const struct dw_pcie_ep_ops ls_pcie_ep_ops = { - .ep_init = ls_pcie_ep_init, + .init = ls_pcie_ep_init, .raise_irq = ls_pcie_ep_raise_irq, .get_features = ls_pcie_ep_get_features, - .func_conf_select = ls_pcie_ep_func_conf_select, + .get_dbi_offset = ls_pcie_ep_get_dbi_offset, }; static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = { diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index 37956e09c6..ee6f525681 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -35,21 +35,41 @@ #define PF_MCR_PTOMR BIT(0) #define PF_MCR_EXL2S BIT(1) +/* LS1021A PEXn PM Write Control Register */ +#define SCFG_PEXPMWRCR(idx) (0x5c + (idx) * 0x64) +#define PMXMTTURNOFF BIT(31) +#define SCFG_PEXSFTRSTCR 0x190 +#define PEXSR(idx) BIT(idx) + +/* LS1043A PEX PME control register */ +#define SCFG_PEXPMECR 0x144 +#define PEXPME(idx) BIT(31 - (idx) * 4) + +/* LS1043A PEX LUT debug register */ +#define LS_PCIE_LDBG 0x7fc +#define LDBG_SR BIT(30) +#define LDBG_WE BIT(31) + #define PCIE_IATU_NUM 6 struct ls_pcie_drvdata { - const u32 pf_off; + const u32 pf_lut_off; + const struct dw_pcie_host_ops *ops; + int (*exit_from_l2)(struct dw_pcie_rp *pp); + bool scfg_support; bool pm_support; }; struct ls_pcie { struct dw_pcie *pci; const struct ls_pcie_drvdata *drvdata; - void __iomem *pf_base; + void __iomem *pf_lut_base; + struct regmap *scfg; + int index; bool big_endian; }; -#define ls_pcie_pf_readl_addr(addr) ls_pcie_pf_readl(pcie, addr) +#define ls_pcie_pf_lut_readl_addr(addr) ls_pcie_pf_lut_readl(pcie, addr) #define to_ls_pcie(x) dev_get_drvdata((x)->dev) static bool ls_pcie_is_bridge(struct ls_pcie *pcie) @@ -90,20 +110,20 @@ static void ls_pcie_fix_error_response(struct ls_pcie *pcie) iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR); } -static u32 ls_pcie_pf_readl(struct ls_pcie *pcie, u32 off) +static u32 ls_pcie_pf_lut_readl(struct ls_pcie *pcie, u32 off) { if (pcie->big_endian) - return ioread32be(pcie->pf_base + off); + return ioread32be(pcie->pf_lut_base + off); - return ioread32(pcie->pf_base + off); + return ioread32(pcie->pf_lut_base + off); } -static void ls_pcie_pf_writel(struct ls_pcie *pcie, u32 off, u32 val) +static void ls_pcie_pf_lut_writel(struct ls_pcie *pcie, u32 off, u32 val) { if (pcie->big_endian) - iowrite32be(val, pcie->pf_base + off); + iowrite32be(val, pcie->pf_lut_base + off); else - iowrite32(val, pcie->pf_base + off); + iowrite32(val, pcie->pf_lut_base + off); } static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp) @@ -113,11 +133,11 @@ static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp) u32 val; int ret; - val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR); + val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR); val |= PF_MCR_PTOMR; - ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val); + ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val); - ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR, + ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR, val, !(val & PF_MCR_PTOMR), PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US); @@ -125,7 +145,7 @@ static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp) dev_err(pcie->pci->dev, "PME_Turn_off timeout\n"); } -static void ls_pcie_exit_from_l2(struct dw_pcie_rp *pp) +static int ls_pcie_exit_from_l2(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct ls_pcie *pcie = to_ls_pcie(pci); @@ -136,20 +156,22 @@ static void ls_pcie_exit_from_l2(struct dw_pcie_rp *pp) * Set PF_MCR_EXL2S bit in LS_PCIE_PF_MCR register for the link * to exit L2 state. */ - val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR); + val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR); val |= PF_MCR_EXL2S; - ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val); + ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val); /* * L2 exit timeout of 10ms is not defined in the specifications, * it was chosen based on empirical observations. */ - ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR, + ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR, val, !(val & PF_MCR_EXL2S), 1000, 10000); if (ret) dev_err(pcie->pci->dev, "L2 exit timeout\n"); + + return ret; } static int ls_pcie_host_init(struct dw_pcie_rp *pp) @@ -168,25 +190,130 @@ static int ls_pcie_host_init(struct dw_pcie_rp *pp) return 0; } +static void scfg_pcie_send_turnoff_msg(struct regmap *scfg, u32 reg, u32 mask) +{ + /* Send PME_Turn_Off message */ + regmap_write_bits(scfg, reg, mask, mask); + + /* + * There is no specific register to check for PME_To_Ack from endpoint. + * So on the safe side, wait for PCIE_PME_TO_L2_TIMEOUT_US. + */ + mdelay(PCIE_PME_TO_L2_TIMEOUT_US/1000); + + /* + * Layerscape hardware reference manual recommends clearing the PMXMTTURNOFF bit + * to complete the PME_Turn_Off handshake. + */ + regmap_write_bits(scfg, reg, mask, 0); +} + +static void ls1021a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + + scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), PMXMTTURNOFF); +} + +static int scfg_pcie_exit_from_l2(struct regmap *scfg, u32 reg, u32 mask) +{ + /* Reset the PEX wrapper to bring the link out of L2 */ + regmap_write_bits(scfg, reg, mask, mask); + regmap_write_bits(scfg, reg, mask, 0); + + return 0; +} + +static int ls1021a_pcie_exit_from_l2(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + + return scfg_pcie_exit_from_l2(pcie->scfg, SCFG_PEXSFTRSTCR, PEXSR(pcie->index)); +} + +static void ls1043a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + + scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMECR, PEXPME(pcie->index)); +} + +static int ls1043a_pcie_exit_from_l2(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + u32 val; + + /* + * Reset the PEX wrapper to bring the link out of L2. + * LDBG_WE: allows the user to have write access to the PEXDBG[SR] for both setting and + * clearing the soft reset on the PEX module. + * LDBG_SR: When SR is set to 1, the PEX module enters soft reset. + */ + val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG); + val |= LDBG_WE; + ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val); + + val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG); + val |= LDBG_SR; + ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val); + + val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG); + val &= ~LDBG_SR; + ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val); + + val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG); + val &= ~LDBG_WE; + ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val); + + return 0; +} + static const struct dw_pcie_host_ops ls_pcie_host_ops = { - .host_init = ls_pcie_host_init, + .init = ls_pcie_host_init, .pme_turn_off = ls_pcie_send_turnoff_msg, }; +static const struct dw_pcie_host_ops ls1021a_pcie_host_ops = { + .init = ls_pcie_host_init, + .pme_turn_off = ls1021a_pcie_send_turnoff_msg, +}; + static const struct ls_pcie_drvdata ls1021a_drvdata = { - .pm_support = false, + .pm_support = true, + .scfg_support = true, + .ops = &ls1021a_pcie_host_ops, + .exit_from_l2 = ls1021a_pcie_exit_from_l2, +}; + +static const struct dw_pcie_host_ops ls1043a_pcie_host_ops = { + .init = ls_pcie_host_init, + .pme_turn_off = ls1043a_pcie_send_turnoff_msg, +}; + +static const struct ls_pcie_drvdata ls1043a_drvdata = { + .pf_lut_off = 0x10000, + .pm_support = true, + .scfg_support = true, + .ops = &ls1043a_pcie_host_ops, + .exit_from_l2 = ls1043a_pcie_exit_from_l2, }; static const struct ls_pcie_drvdata layerscape_drvdata = { - .pf_off = 0xc0000, + .pf_lut_off = 0xc0000, .pm_support = true, + .ops = &ls_pcie_host_ops, + .exit_from_l2 = ls_pcie_exit_from_l2, }; static const struct of_device_id ls_pcie_of_match[] = { { .compatible = "fsl,ls1012a-pcie", .data = &layerscape_drvdata }, { .compatible = "fsl,ls1021a-pcie", .data = &ls1021a_drvdata }, { .compatible = "fsl,ls1028a-pcie", .data = &layerscape_drvdata }, - { .compatible = "fsl,ls1043a-pcie", .data = &ls1021a_drvdata }, + { .compatible = "fsl,ls1043a-pcie", .data = &ls1043a_drvdata }, { .compatible = "fsl,ls1046a-pcie", .data = &layerscape_drvdata }, { .compatible = "fsl,ls2080a-pcie", .data = &layerscape_drvdata }, { .compatible = "fsl,ls2085a-pcie", .data = &layerscape_drvdata }, @@ -201,6 +328,8 @@ static int ls_pcie_probe(struct platform_device *pdev) struct dw_pcie *pci; struct ls_pcie *pcie; struct resource *dbi_base; + u32 index[2]; + int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) @@ -213,9 +342,8 @@ static int ls_pcie_probe(struct platform_device *pdev) pcie->drvdata = of_device_get_match_data(dev); pci->dev = dev; - pci->pp.ops = &ls_pcie_host_ops; - pcie->pci = pci; + pci->pp.ops = pcie->drvdata->ops; dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); @@ -224,7 +352,21 @@ static int ls_pcie_probe(struct platform_device *pdev) pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian"); - pcie->pf_base = pci->dbi_base + pcie->drvdata->pf_off; + pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off; + + if (pcie->drvdata->scfg_support) { + pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg"); + if (IS_ERR(pcie->scfg)) { + dev_err(dev, "No syscfg phandle specified\n"); + return PTR_ERR(pcie->scfg); + } + + ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2); + if (ret) + return ret; + + pcie->index = index[1]; + } if (!ls_pcie_is_bridge(pcie)) return -ENODEV; @@ -247,11 +389,14 @@ static int ls_pcie_suspend_noirq(struct device *dev) static int ls_pcie_resume_noirq(struct device *dev) { struct ls_pcie *pcie = dev_get_drvdata(dev); + int ret; if (!pcie->drvdata->pm_support) return 0; - ls_pcie_exit_from_l2(&pcie->pci->pp); + ret = pcie->drvdata->exit_from_l2(&pcie->pci->pp); + if (ret) + return ret; return dw_pcie_resume_noirq(pcie->pci); } diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index 407558f5d7..6477c83262 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -389,7 +389,7 @@ static int meson_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops meson_pcie_host_ops = { - .host_init = meson_pcie_host_init, + .init = meson_pcie_host_init, }; static const struct dw_pcie_ops dw_pcie_ops = { diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c index b8cb77c9c4..6dfdda59f3 100644 --- a/drivers/pci/controller/dwc/pcie-al.c +++ b/drivers/pci/controller/dwc/pcie-al.c @@ -311,7 +311,7 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops al_pcie_host_ops = { - .host_init = al_pcie_host_init, + .init = al_pcie_host_init, }; static int al_pcie_probe(struct platform_device *pdev) diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index 5c999e15c3..b5c599ccaa 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -225,7 +225,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) } static const struct dw_pcie_host_ops armada8k_pcie_host_ops = { - .host_init = armada8k_pcie_host_init, + .init = armada8k_pcie_host_init, }; static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index 9b572a2b2c..9ed0a9ba76 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -333,7 +333,7 @@ static int artpec6_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { - .host_init = artpec6_pcie_host_init, + .init = artpec6_pcie_host_init, }; static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep) @@ -352,15 +352,15 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep) } static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); + case PCI_IRQ_INTX: + dev_err(pci->dev, "EP cannot trigger INTx IRQs\n"); return -EINVAL; - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); @@ -370,7 +370,7 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, } static const struct dw_pcie_ep_ops pcie_ep_ops = { - .ep_init = artpec6_pcie_ep_init, + .init = artpec6_pcie_ep_init, .raise_irq = artpec6_pcie_raise_irq, }; diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c index 17e696797f..76d0ddea80 100644 --- a/drivers/pci/controller/dwc/pcie-bt1.c +++ b/drivers/pci/controller/dwc/pcie-bt1.c @@ -559,8 +559,8 @@ static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops bt1_pcie_host_ops = { - .host_init = bt1_pcie_host_init, - .host_deinit = bt1_pcie_host_deinit, + .init = bt1_pcie_host_init, + .deinit = bt1_pcie_host_deinit, }; static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev) diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 19b6708f60..746a11dcb6 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -44,46 +44,19 @@ dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no) return NULL; } -static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no) -{ - unsigned int func_offset = 0; - - if (ep->ops->func_conf_select) - func_offset = ep->ops->func_conf_select(ep, func_no); - - return func_offset; -} - -static unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep, u8 func_no) -{ - unsigned int dbi2_offset = 0; - - if (ep->ops->get_dbi2_offset) - dbi2_offset = ep->ops->get_dbi2_offset(ep, func_no); - else if (ep->ops->func_conf_select) /* for backward compatibility */ - dbi2_offset = ep->ops->func_conf_select(ep, func_no); - - return dbi2_offset; -} - static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no, enum pci_barno bar, int flags) { - unsigned int func_offset, dbi2_offset; struct dw_pcie_ep *ep = &pci->ep; - u32 reg, reg_dbi2; - - func_offset = dw_pcie_ep_func_select(ep, func_no); - dbi2_offset = dw_pcie_ep_get_dbi2_offset(ep, func_no); + u32 reg; - reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar); - reg_dbi2 = dbi2_offset + PCI_BASE_ADDRESS_0 + (4 * bar); + reg = PCI_BASE_ADDRESS_0 + (4 * bar); dw_pcie_dbi_ro_wr_en(pci); - dw_pcie_writel_dbi2(pci, reg_dbi2, 0x0); - dw_pcie_writel_dbi(pci, reg, 0x0); + dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0); + dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0); if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { - dw_pcie_writel_dbi2(pci, reg_dbi2 + 4, 0x0); - dw_pcie_writel_dbi(pci, reg + 4, 0x0); + dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0); + dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0); } dw_pcie_dbi_ro_wr_dis(pci); } @@ -100,19 +73,15 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar); static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no, - u8 cap_ptr, u8 cap) + u8 cap_ptr, u8 cap) { - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - unsigned int func_offset = 0; u8 cap_id, next_cap_ptr; u16 reg; if (!cap_ptr) return 0; - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr); + reg = dw_pcie_ep_readw_dbi(ep, func_no, cap_ptr); cap_id = (reg & 0x00ff); if (cap_id > PCI_CAP_ID_MAX) @@ -127,14 +96,10 @@ static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no, static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap) { - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - unsigned int func_offset = 0; u8 next_cap_ptr; u16 reg; - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST); + reg = dw_pcie_ep_readw_dbi(ep, func_no, PCI_CAPABILITY_LIST); next_cap_ptr = (reg & 0x00ff); return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap); @@ -145,24 +110,21 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - unsigned int func_offset = 0; - - func_offset = dw_pcie_ep_func_select(ep, func_no); dw_pcie_dbi_ro_wr_en(pci); - dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid); - dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid); - dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid); - dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code); - dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE, - hdr->subclass_code | hdr->baseclass_code << 8); - dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE, - hdr->cache_line_size); - dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID, - hdr->subsys_vendor_id); - dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id); - dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN, - hdr->interrupt_pin); + dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid); + dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid); + dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid); + dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code); + dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE, + hdr->subclass_code | hdr->baseclass_code << 8); + dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE, + hdr->cache_line_size); + dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID, + hdr->subsys_vendor_id); + dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id); + dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN, + hdr->interrupt_pin); dw_pcie_dbi_ro_wr_dis(pci); return 0; @@ -244,18 +206,13 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - unsigned int func_offset, dbi2_offset; enum pci_barno bar = epf_bar->barno; size_t size = epf_bar->size; int flags = epf_bar->flags; - u32 reg, reg_dbi2; int ret, type; + u32 reg; - func_offset = dw_pcie_ep_func_select(ep, func_no); - dbi2_offset = dw_pcie_ep_get_dbi2_offset(ep, func_no); - - reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset; - reg_dbi2 = PCI_BASE_ADDRESS_0 + (4 * bar) + dbi2_offset; + reg = PCI_BASE_ADDRESS_0 + (4 * bar); if (!(flags & PCI_BASE_ADDRESS_SPACE)) type = PCIE_ATU_TYPE_MEM; @@ -271,12 +228,12 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, dw_pcie_dbi_ro_wr_en(pci); - dw_pcie_writel_dbi2(pci, reg_dbi2, lower_32_bits(size - 1)); - dw_pcie_writel_dbi(pci, reg, flags); + dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1)); + dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { - dw_pcie_writel_dbi2(pci, reg_dbi2 + 4, upper_32_bits(size - 1)); - dw_pcie_writel_dbi(pci, reg + 4, 0); + dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1)); + dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); } ep->epf_bar[bar] = epf_bar; @@ -336,19 +293,15 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - u32 val, reg; - unsigned int func_offset = 0; struct dw_pcie_ep_func *ep_func; + u32 val, reg; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msi_cap) return -EINVAL; - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS; - val = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msi_cap + PCI_MSI_FLAGS; + val = dw_pcie_ep_readw_dbi(ep, func_no, reg); if (!(val & PCI_MSI_FLAGS_ENABLE)) return -EINVAL; @@ -362,22 +315,19 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - u32 val, reg; - unsigned int func_offset = 0; struct dw_pcie_ep_func *ep_func; + u32 val, reg; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msi_cap) return -EINVAL; - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS; - val = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msi_cap + PCI_MSI_FLAGS; + val = dw_pcie_ep_readw_dbi(ep, func_no, reg); val &= ~PCI_MSI_FLAGS_QMASK; val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, interrupts); dw_pcie_dbi_ro_wr_en(pci); - dw_pcie_writew_dbi(pci, reg, val); + dw_pcie_ep_writew_dbi(ep, func_no, reg, val); dw_pcie_dbi_ro_wr_dis(pci); return 0; @@ -386,19 +336,15 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - u32 val, reg; - unsigned int func_offset = 0; struct dw_pcie_ep_func *ep_func; + u32 val, reg; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msix_cap) return -EINVAL; - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS; - val = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msix_cap + PCI_MSIX_FLAGS; + val = dw_pcie_ep_readw_dbi(ep, func_no, reg); if (!(val & PCI_MSIX_FLAGS_ENABLE)) return -EINVAL; @@ -412,9 +358,8 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - u32 val, reg; - unsigned int func_offset = 0; struct dw_pcie_ep_func *ep_func; + u32 val, reg; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msix_cap) @@ -422,21 +367,19 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, dw_pcie_dbi_ro_wr_en(pci); - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS; - val = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msix_cap + PCI_MSIX_FLAGS; + val = dw_pcie_ep_readw_dbi(ep, func_no, reg); val &= ~PCI_MSIX_FLAGS_QSIZE; val |= interrupts; dw_pcie_writew_dbi(pci, reg, val); - reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE; + reg = ep_func->msix_cap + PCI_MSIX_TABLE; val = offset | bir; - dw_pcie_writel_dbi(pci, reg, val); + dw_pcie_ep_writel_dbi(ep, func_no, reg, val); - reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA; + reg = ep_func->msix_cap + PCI_MSIX_PBA; val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; - dw_pcie_writel_dbi(pci, reg, val); + dw_pcie_ep_writel_dbi(ep, func_no, reg, val); dw_pcie_dbi_ro_wr_dis(pci); @@ -444,7 +387,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, } static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, - enum pci_epc_irq_type type, u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); @@ -497,56 +440,53 @@ static const struct pci_epc_ops epc_ops = { .get_features = dw_pcie_ep_get_features, }; -int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) +int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct device *dev = pci->dev; - dev_err(dev, "EP cannot trigger legacy IRQs\n"); + dev_err(dev, "EP cannot raise INTX IRQs\n"); return -EINVAL; } -EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_legacy_irq); +EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq); int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u8 interrupt_num) { - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u32 msg_addr_lower, msg_addr_upper, reg; struct dw_pcie_ep_func *ep_func; struct pci_epc *epc = ep->epc; unsigned int aligned_offset; - unsigned int func_offset = 0; u16 msg_ctrl, msg_data; - u32 msg_addr_lower, msg_addr_upper, reg; - u64 msg_addr; bool has_upper; + u64 msg_addr; int ret; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); if (!ep_func || !ep_func->msi_cap) return -EINVAL; - func_offset = dw_pcie_ep_func_select(ep, func_no); - /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ - reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS; - msg_ctrl = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msi_cap + PCI_MSI_FLAGS; + msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg); has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); - reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO; - msg_addr_lower = dw_pcie_readl_dbi(pci, reg); + reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO; + msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg); if (has_upper) { - reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI; - msg_addr_upper = dw_pcie_readl_dbi(pci, reg); - reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64; - msg_data = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI; + msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg); + reg = ep_func->msi_cap + PCI_MSI_DATA_64; + msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); } else { msg_addr_upper = 0; - reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32; - msg_data = dw_pcie_readw_dbi(pci, reg); + reg = ep_func->msi_cap + PCI_MSI_DATA_32; + msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); } - aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1); - msg_addr = ((u64)msg_addr_upper) << 32 | - (msg_addr_lower & ~aligned_offset); + msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower; + + aligned_offset = msg_addr & (epc->mem->window.page_size - 1); + msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, epc->mem->window.page_size); if (ret) @@ -583,10 +523,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - struct dw_pcie_ep_func *ep_func; struct pci_epf_msix_tbl *msix_tbl; + struct dw_pcie_ep_func *ep_func; struct pci_epc *epc = ep->epc; - unsigned int func_offset = 0; u32 reg, msg_data, vec_ctrl; unsigned int aligned_offset; u32 tbl_offset; @@ -598,10 +537,8 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, if (!ep_func || !ep_func->msix_cap) return -EINVAL; - func_offset = dw_pcie_ep_func_select(ep, func_no); - - reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE; - tbl_offset = dw_pcie_readl_dbi(pci, reg); + reg = ep_func->msix_cap + PCI_MSIX_TABLE; + tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg); bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset); tbl_offset &= PCI_MSIX_TABLE_OFFSET; @@ -801,8 +738,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) list_add_tail(&ep_func->list, &ep->func_list); } - if (ep->ops->ep_init) - ep->ops->ep_init(ep); + if (ep->ops->init) + ep->ops->init(ep); ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, ep->page_size); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 7991f0e179..d5fc31f834 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -441,14 +441,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) bridge->ops = &dw_pcie_ops; bridge->child_ops = &dw_child_pcie_ops; - if (pp->ops->host_init) { - ret = pp->ops->host_init(pp); + if (pp->ops->init) { + ret = pp->ops->init(pp); if (ret) return ret; } if (pci_msi_enabled()) { - pp->has_msi_ctrl = !(pp->ops->msi_host_init || + pp->has_msi_ctrl = !(pp->ops->msi_init || of_property_read_bool(np, "msi-parent") || of_property_read_bool(np, "msi-map")); @@ -464,8 +464,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) goto err_deinit_host; } - if (pp->ops->msi_host_init) { - ret = pp->ops->msi_host_init(pp); + if (pp->ops->msi_init) { + ret = pp->ops->msi_init(pp); if (ret < 0) goto err_deinit_host; } else if (pp->has_msi_ctrl) { @@ -502,8 +502,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) if (ret) goto err_stop_link; - if (pp->ops->host_post_init) - pp->ops->host_post_init(pp); + if (pp->ops->post_init) + pp->ops->post_init(pp); return 0; @@ -518,8 +518,8 @@ err_free_msi: dw_pcie_free_msi(pp); err_deinit_host: - if (pp->ops->host_deinit) - pp->ops->host_deinit(pp); + if (pp->ops->deinit) + pp->ops->deinit(pp); return ret; } @@ -539,8 +539,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp) if (pp->has_msi_ctrl) dw_pcie_free_msi(pp); - if (pp->ops->host_deinit) - pp->ops->host_deinit(pp); + if (pp->ops->deinit) + pp->ops->deinit(pp); } EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); @@ -842,8 +842,8 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci) return ret; } - if (pci->pp.ops->host_deinit) - pci->pp.ops->host_deinit(&pci->pp); + if (pci->pp.ops->deinit) + pci->pp.ops->deinit(&pci->pp); pci->suspended = true; @@ -860,8 +860,8 @@ int dw_pcie_resume_noirq(struct dw_pcie *pci) pci->suspended = false; - if (pci->pp.ops->host_init) { - ret = pci->pp.ops->host_init(&pci->pp); + if (pci->pp.ops->init) { + ret = pci->pp.ops->init(&pci->pp); if (ret) { dev_err(pci->dev, "Host init failed: %d\n", ret); return ret; diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index b625841e98..778588b4be 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -42,17 +42,16 @@ static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep) } static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return dw_pcie_ep_raise_legacy_irq(ep, func_no); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return dw_pcie_ep_raise_intx_irq(ep, func_no); + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); @@ -74,7 +73,7 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops pcie_ep_ops = { - .ep_init = dw_plat_pcie_ep_init, + .init = dw_plat_pcie_ep_init, .raise_irq = dw_plat_pcie_ep_raise_irq, .get_features = dw_plat_pcie_get_features, }; diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 55ff76e3d3..26dae48374 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -300,10 +300,10 @@ enum dw_pcie_ltssm { }; struct dw_pcie_host_ops { - int (*host_init)(struct dw_pcie_rp *pp); - void (*host_deinit)(struct dw_pcie_rp *pp); - void (*host_post_init)(struct dw_pcie_rp *pp); - int (*msi_host_init)(struct dw_pcie_rp *pp); + int (*init)(struct dw_pcie_rp *pp); + void (*deinit)(struct dw_pcie_rp *pp); + void (*post_init)(struct dw_pcie_rp *pp); + int (*msi_init)(struct dw_pcie_rp *pp); void (*pme_turn_off)(struct dw_pcie_rp *pp); }; @@ -332,10 +332,10 @@ struct dw_pcie_rp { struct dw_pcie_ep_ops { void (*pre_init)(struct dw_pcie_ep *ep); - void (*ep_init)(struct dw_pcie_ep *ep); + void (*init)(struct dw_pcie_ep *ep); void (*deinit)(struct dw_pcie_ep *ep); int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u16 interrupt_num); + unsigned int type, u16 interrupt_num); const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep); /* * Provide a method to implement the different func config space @@ -344,7 +344,7 @@ struct dw_pcie_ep_ops { * return a 0, and implement code in callback function of platform * driver. */ - unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no); + unsigned int (*get_dbi_offset)(struct dw_pcie_ep *ep, u8 func_no); unsigned int (*get_dbi2_offset)(struct dw_pcie_ep *ep, u8 func_no); }; @@ -486,6 +486,99 @@ static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) dw_pcie_write_dbi2(pci, reg, 0x4, val); } +static inline unsigned int dw_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep, + u8 func_no) +{ + unsigned int dbi_offset = 0; + + if (ep->ops->get_dbi_offset) + dbi_offset = ep->ops->get_dbi_offset(ep, func_no); + + return dbi_offset; +} + +static inline u32 dw_pcie_ep_read_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, size_t size) +{ + unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + return dw_pcie_read_dbi(pci, offset + reg, size); +} + +static inline void dw_pcie_ep_write_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, size_t size, u32 val) +{ + unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + dw_pcie_write_dbi(pci, offset + reg, size, val); +} + +static inline void dw_pcie_ep_writel_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, u32 val) +{ + dw_pcie_ep_write_dbi(ep, func_no, reg, 0x4, val); +} + +static inline u32 dw_pcie_ep_readl_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg) +{ + return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x4); +} + +static inline void dw_pcie_ep_writew_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, u16 val) +{ + dw_pcie_ep_write_dbi(ep, func_no, reg, 0x2, val); +} + +static inline u16 dw_pcie_ep_readw_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg) +{ + return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x2); +} + +static inline void dw_pcie_ep_writeb_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, u8 val) +{ + dw_pcie_ep_write_dbi(ep, func_no, reg, 0x1, val); +} + +static inline u8 dw_pcie_ep_readb_dbi(struct dw_pcie_ep *ep, u8 func_no, + u32 reg) +{ + return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x1); +} + +static inline unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep, + u8 func_no) +{ + unsigned int dbi2_offset = 0; + + if (ep->ops->get_dbi2_offset) + dbi2_offset = ep->ops->get_dbi2_offset(ep, func_no); + else if (ep->ops->get_dbi_offset) /* for backward compatibility */ + dbi2_offset = ep->ops->get_dbi_offset(ep, func_no); + + return dbi2_offset; +} + +static inline void dw_pcie_ep_write_dbi2(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, size_t size, u32 val) +{ + unsigned int offset = dw_pcie_ep_get_dbi2_offset(ep, func_no); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + dw_pcie_write_dbi2(pci, offset + reg, size, val); +} + +static inline void dw_pcie_ep_writel_dbi2(struct dw_pcie_ep *ep, u8 func_no, + u32 reg, u32 val) +{ + dw_pcie_ep_write_dbi2(ep, func_no, reg, 0x4, val); +} + static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci) { u32 reg; @@ -580,7 +673,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep); int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep); void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep); void dw_pcie_ep_exit(struct dw_pcie_ep *ep); -int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no); +int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no); int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u8 interrupt_num); int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, @@ -613,7 +706,7 @@ static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) { } -static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) +static inline int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no) { return 0; } diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index 2fe42c7009..d6842141d3 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -72,7 +72,7 @@ static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, writel_relaxed(val, rockchip->apb_base + reg); } -static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) +static void rockchip_pcie_intx_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); @@ -202,7 +202,7 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp) if (ret < 0) dev_err(dev, "failed to init irq domain\n"); - irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler, + irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler, rockchip); /* LTSSM enable control mode */ @@ -215,7 +215,7 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops rockchip_pcie_host_ops = { - .host_init = rockchip_pcie_host_init, + .init = rockchip_pcie_host_init, }; static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip) diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c index 1e9b44b8bb..6636725203 100644 --- a/drivers/pci/controller/dwc/pcie-fu740.c +++ b/drivers/pci/controller/dwc/pcie-fu740.c @@ -279,7 +279,7 @@ static int fu740_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops fu740_pcie_host_ops = { - .host_init = fu740_pcie_host_init, + .init = fu740_pcie_host_init, }; static const struct dw_pcie_ops dw_pcie_ops = { diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index fd484cc7c4..7a11c618b9 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -198,7 +198,7 @@ static int histb_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops histb_pcie_host_ops = { - .host_init = histb_pcie_host_init, + .init = histb_pcie_host_init, }; static void histb_pcie_host_disable(struct histb_pcie *hipcie) diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index c9c93524e0..acbe4f6d32 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -391,7 +391,7 @@ static const struct dw_pcie_ops intel_pcie_ops = { }; static const struct dw_pcie_host_ops intel_pcie_dw_ops = { - .host_init = intel_pcie_rc_init, + .init = intel_pcie_rc_init, }; static int intel_pcie_probe(struct platform_device *pdev) diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c index 289bff99d7..208d3b0ba1 100644 --- a/drivers/pci/controller/dwc/pcie-keembay.c +++ b/drivers/pci/controller/dwc/pcie-keembay.c @@ -289,19 +289,18 @@ static void keembay_pcie_ep_init(struct dw_pcie_ep *ep) } static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - /* Legacy interrupts are not supported in Keem Bay */ - dev_err(pci->dev, "Legacy IRQ is not supported\n"); + case PCI_IRQ_INTX: + /* INTx interrupts are not supported in Keem Bay */ + dev_err(pci->dev, "INTx IRQ is not supported\n"); return -EINVAL; - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "Unknown IRQ type %d\n", type); @@ -325,7 +324,7 @@ keembay_pcie_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = { - .ep_init = keembay_pcie_ep_init, + .init = keembay_pcie_ep_init, .raise_irq = keembay_pcie_ep_raise_irq, .get_features = keembay_pcie_get_features, }; diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 2ee1467679..d5523f3021 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -366,7 +366,6 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie, struct platform_device *pdev) { struct device *dev = &pdev->dev; - char name[32]; int ret, i; /* This is an optional property */ @@ -387,9 +386,8 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie, if (pcie->gpio_id_clkreq[i] < 0) return pcie->gpio_id_clkreq[i]; - sprintf(name, "pcie_clkreq_%d", i); - pcie->clkreq_names[i] = devm_kstrdup_const(dev, name, - GFP_KERNEL); + pcie->clkreq_names[i] = devm_kasprintf(dev, GFP_KERNEL, + "pcie_clkreq_%d", i); if (!pcie->clkreq_names[i]) return -ENOMEM; } @@ -404,7 +402,6 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie, struct device *dev = &pdev->dev; struct device_node *parent, *child; int ret, slot, i; - char name[32]; for_each_available_child_of_node(node, parent) { for_each_available_child_of_node(parent, child) { @@ -430,9 +427,9 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie, slot = PCI_SLOT(ret); - sprintf(name, "pcie_perst_%d", slot); - pcie->reset_names[i] = devm_kstrdup_const(dev, name, - GFP_KERNEL); + pcie->reset_names[i] = devm_kasprintf(dev, GFP_KERNEL, + "pcie_perst_%d", + slot); if (!pcie->reset_names[i]) { ret = -ENOMEM; goto put_node; @@ -672,7 +669,7 @@ static const struct dw_pcie_ops kirin_dw_pcie_ops = { }; static const struct dw_pcie_host_ops kirin_pcie_host_ops = { - .host_init = kirin_pcie_host_init, + .init = kirin_pcie_host_init, }; static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie) @@ -741,15 +738,13 @@ err: return ret; } -static int kirin_pcie_remove(struct platform_device *pdev) +static void kirin_pcie_remove(struct platform_device *pdev) { struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev); dw_pcie_host_deinit(&kirin_pcie->pci->pp); kirin_pcie_power_off(kirin_pcie); - - return 0; } struct kirin_pcie_data { @@ -818,7 +813,7 @@ static int kirin_pcie_probe(struct platform_device *pdev) static struct platform_driver kirin_pcie_driver = { .probe = kirin_pcie_probe, - .remove = kirin_pcie_remove, + .remove_new = kirin_pcie_remove, .driver = { .name = "kirin-pcie", .of_match_table = kirin_pcie_match, diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index 9e58f05519..36e5e80cd2 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -726,14 +726,14 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev, } static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return dw_pcie_ep_raise_legacy_irq(ep, func_no); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return dw_pcie_ep_raise_intx_irq(ep, func_no); + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "Unknown IRQ type\n"); @@ -796,7 +796,7 @@ static void qcom_pcie_ep_init(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops pci_ep_ops = { - .ep_init = qcom_pcie_ep_init, + .init = qcom_pcie_ep_init, .raise_irq = qcom_pcie_ep_raise_irq, .get_features = qcom_pcie_epc_get_features, }; diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index e3dc27f2ac..23bb0eee5c 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -1279,9 +1279,9 @@ static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { - .host_init = qcom_pcie_host_init, - .host_deinit = qcom_pcie_host_deinit, - .host_post_init = qcom_pcie_host_post_init, + .init = qcom_pcie_host_init, + .deinit = qcom_pcie_host_deinit, + .post_init = qcom_pcie_host_post_init, }; /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c index 3bc45e513b..e9166619b1 100644 --- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c +++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c @@ -8,7 +8,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -307,8 +307,8 @@ static void rcar_gen4_pcie_host_deinit(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops rcar_gen4_pcie_host_ops = { - .host_init = rcar_gen4_pcie_host_init, - .host_deinit = rcar_gen4_pcie_host_deinit, + .init = rcar_gen4_pcie_host_init, + .deinit = rcar_gen4_pcie_host_deinit, }; static int rcar_gen4_add_dw_pcie_rp(struct rcar_gen4_pcie *rcar) @@ -362,15 +362,14 @@ static void rcar_gen4_pcie_ep_deinit(struct dw_pcie_ep *ep) } static int rcar_gen4_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *dw = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return dw_pcie_ep_raise_legacy_irq(ep, func_no); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return dw_pcie_ep_raise_intx_irq(ep, func_no); + case PCI_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); default: dev_err(dw->dev, "Unknown IRQ type\n"); @@ -394,7 +393,7 @@ rcar_gen4_pcie_ep_get_features(struct dw_pcie_ep *ep) return &rcar_gen4_pcie_epc_features; } -static unsigned int rcar_gen4_pcie_ep_func_conf_select(struct dw_pcie_ep *ep, +static unsigned int rcar_gen4_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep, u8 func_no) { return func_no * RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET; @@ -408,11 +407,11 @@ static unsigned int rcar_gen4_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep, static const struct dw_pcie_ep_ops pcie_ep_ops = { .pre_init = rcar_gen4_pcie_ep_pre_init, - .ep_init = rcar_gen4_pcie_ep_init, + .init = rcar_gen4_pcie_ep_init, .deinit = rcar_gen4_pcie_ep_deinit, .raise_irq = rcar_gen4_pcie_ep_raise_irq, .get_features = rcar_gen4_pcie_ep_get_features, - .func_conf_select = rcar_gen4_pcie_ep_func_conf_select, + .get_dbi_offset = rcar_gen4_pcie_ep_get_dbi_offset, .get_dbi2_offset = rcar_gen4_pcie_ep_get_dbi2_offset, }; @@ -436,7 +435,7 @@ static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar) /* Common */ static int rcar_gen4_add_dw_pcie(struct rcar_gen4_pcie *rcar) { - rcar->mode = (enum dw_pcie_device_mode)of_device_get_match_data(&rcar->pdev->dev); + rcar->mode = (uintptr_t)of_device_get_match_data(&rcar->pdev->dev); switch (rcar->mode) { case DW_PCIE_RC_TYPE: diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c index 99d47ae803..201dced209 100644 --- a/drivers/pci/controller/dwc/pcie-spear13xx.c +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -148,7 +148,7 @@ static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { - .host_init = spear13xx_pcie_host_init, + .init = spear13xx_pcie_host_init, }; static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 0fe113598e..7afa9e9aab 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -773,13 +773,13 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp) val_w); } -static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp) +static void tegra_pcie_enable_intx_interrupts(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val; - /* Enable legacy interrupt generation */ + /* Enable INTX interrupt generation */ val = appl_readl(pcie, APPL_INTR_EN_L0_0); val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; val |= APPL_INTR_EN_L0_0_INT_INT_EN; @@ -830,7 +830,7 @@ static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp) appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); tegra_pcie_enable_system_interrupts(pp); - tegra_pcie_enable_legacy_interrupts(pp); + tegra_pcie_enable_intx_interrupts(pp); if (IS_ENABLED(CONFIG_PCI_MSI)) tegra_pcie_enable_msi_interrupts(pp); } @@ -1060,7 +1060,7 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = { }; static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { - .host_init = tegra_pcie_dw_host_init, + .init = tegra_pcie_dw_host_init, }; static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) @@ -1947,7 +1947,7 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) return IRQ_HANDLED; } -static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) +static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq) { /* Tegra194 supports only INTA */ if (irq > 1) @@ -1979,20 +1979,19 @@ static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq) } static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num); + case PCI_IRQ_INTX: + return tegra_pcie_ep_raise_intx_irq(pcie, interrupt_num); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num); default: diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c index cba3c88fcf..3fced0d3e8 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c +++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c @@ -212,7 +212,7 @@ static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep) dw_pcie_ep_reset_bar(pci, bar); } -static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep) +static int uniphier_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); @@ -256,15 +256,14 @@ static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, } static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return uniphier_pcie_ep_raise_legacy_irq(ep); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return uniphier_pcie_ep_raise_intx_irq(ep); + case PCI_IRQ_MSI: return uniphier_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); default: @@ -284,7 +283,7 @@ uniphier_pcie_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = { - .ep_init = uniphier_pcie_ep_init, + .init = uniphier_pcie_ep_init, .raise_irq = uniphier_pcie_ep_raise_irq, .get_features = uniphier_pcie_get_features, }; diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index 48c3eba817..5757ca3803 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -67,7 +67,7 @@ struct uniphier_pcie { struct clk *clk; struct reset_control *rst; struct phy *phy; - struct irq_domain *legacy_irq_domain; + struct irq_domain *intx_irq_domain; }; #define to_uniphier_pcie(x) dev_get_drvdata((x)->dev) @@ -253,12 +253,12 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc) reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val); for_each_set_bit(bit, ®, PCI_NUM_INTX) - generic_handle_domain_irq(pcie->legacy_irq_domain, bit); + generic_handle_domain_irq(pcie->intx_irq_domain, bit); chained_irq_exit(chip, desc); } -static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp) +static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct uniphier_pcie *pcie = to_uniphier_pcie(pci); @@ -279,9 +279,9 @@ static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp) goto out_put_node; } - pcie->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, + pcie->intx_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, &uniphier_intx_domain_ops, pp); - if (!pcie->legacy_irq_domain) { + if (!pcie->intx_irq_domain) { dev_err(pci->dev, "Failed to get INTx domain\n"); ret = -ENODEV; goto out_put_node; @@ -301,7 +301,7 @@ static int uniphier_pcie_host_init(struct dw_pcie_rp *pp) struct uniphier_pcie *pcie = to_uniphier_pcie(pci); int ret; - ret = uniphier_pcie_config_legacy_irq(pp); + ret = uniphier_pcie_config_intx_irq(pp); if (ret) return ret; @@ -311,7 +311,7 @@ static int uniphier_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops uniphier_pcie_host_ops = { - .host_init = uniphier_pcie_host_init, + .init = uniphier_pcie_host_init, }; static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie) diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c index 71026fefa3..318c278e65 100644 --- a/drivers/pci/controller/dwc/pcie-visconti.c +++ b/drivers/pci/controller/dwc/pcie-visconti.c @@ -236,7 +236,7 @@ static int visconti_pcie_host_init(struct dw_pcie_rp *pp) } static const struct dw_pcie_host_ops visconti_pcie_host_ops = { - .host_init = visconti_pcie_host_init, + .init = visconti_pcie_host_init, }; static int visconti_get_resources(struct platform_device *pdev, diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index 6be3266cd7..45b7180618 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -85,7 +85,7 @@ int pci_host_common_probe(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(pci_host_common_probe); -int pci_host_common_remove(struct platform_device *pdev) +void pci_host_common_remove(struct platform_device *pdev) { struct pci_host_bridge *bridge = platform_get_drvdata(pdev); @@ -93,8 +93,6 @@ int pci_host_common_remove(struct platform_device *pdev) pci_stop_root_bus(bridge->bus); pci_remove_root_bus(bridge->bus); pci_unlock_rescan_remove(); - - return 0; } EXPORT_SYMBOL_GPL(pci_host_common_remove); diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c index 63865aeb63..41cb6a057f 100644 --- a/drivers/pci/controller/pci-host-generic.c +++ b/drivers/pci/controller/pci-host-generic.c @@ -82,7 +82,7 @@ static struct platform_driver gen_pci_driver = { .of_match_table = gen_pci_of_match, }, .probe = pci_host_common_probe, - .remove = pci_host_common_remove, + .remove_new = pci_host_common_remove, }; module_platform_driver(gen_pci_driver); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 11cc354a30..5992280e81 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -651,13 +651,6 @@ static void hv_arch_irq_unmask(struct irq_data *data) PCI_FUNC(pdev->devfn); params->int_target.vector = hv_msi_get_int_vector(data); - /* - * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by - * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a - * spurious interrupt storm. Not doing so does not seem to have a - * negative effect (yet?). - */ - if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) { /* * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index e47a77f943..c08683febd 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -48,6 +48,9 @@ #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00 +#define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8 +#define PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK 0xf8 + #define PCIE_RC_DL_MDIO_ADDR 0x1100 #define PCIE_RC_DL_MDIO_WR_DATA 0x1104 #define PCIE_RC_DL_MDIO_RD_DATA 0x1108 @@ -121,9 +124,12 @@ #define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2 +#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK 0x200000 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000 #define PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x00800000 - +#define PCIE_CLKREQ_MASK \ + (PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \ + PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK) #define PCIE_INTR2_CPU_BASE 0x4300 #define PCIE_MSI_INTR2_BASE 0x4500 @@ -1028,13 +1034,89 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) return 0; } +/* + * This extends the timeout period for an access to an internal bus. This + * access timeout may occur during L1SS sleep periods, even without the + * presence of a PCIe access. + */ +static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie) +{ + /* TIMEOUT register is two registers before RGR1_SW_INIT_1 */ + const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8; + u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */ + + /* Each unit in timeout register is 1/216,000,000 seconds */ + writel(216 * timeout_us, pcie->base + REG_OFFSET); +} + +static void brcm_config_clkreq(struct brcm_pcie *pcie) +{ + static const char err_msg[] = "invalid 'brcm,clkreq-mode' DT string\n"; + const char *mode = "default"; + u32 clkreq_cntl; + int ret, tmp; + + ret = of_property_read_string(pcie->np, "brcm,clkreq-mode", &mode); + if (ret && ret != -EINVAL) { + dev_err(pcie->dev, err_msg); + mode = "safe"; + } + + /* Start out assuming safe mode (both mode bits cleared) */ + clkreq_cntl = readl(pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + clkreq_cntl &= ~PCIE_CLKREQ_MASK; + + if (strcmp(mode, "no-l1ss") == 0) { + /* + * "no-l1ss" -- Provides Clock Power Management, L0s, and + * L1, but cannot provide L1 substate (L1SS) power + * savings. If the downstream device connected to the RC is + * L1SS capable AND the OS enables L1SS, all PCIe traffic + * may abruptly halt, potentially hanging the system. + */ + clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK; + /* + * We want to un-advertise L1 substates because if the OS + * tries to configure the controller into using L1 substate + * power savings it may fail or hang when the RC HW is in + * "no-l1ss" mode. + */ + tmp = readl(pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP); + u32p_replace_bits(&tmp, 2, PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK); + writel(tmp, pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP); + + } else if (strcmp(mode, "default") == 0) { + /* + * "default" -- Provides L0s, L1, and L1SS, but not + * compliant to provide Clock Power Management; + * specifically, may not be able to meet the Tclron max + * timing of 400ns as specified in "Dynamic Clock Control", + * section 3.2.5.2.2 of the PCIe spec. This situation is + * atypical and should happen only with older devices. + */ + clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK; + brcm_extend_rbus_timeout(pcie); + + } else { + /* + * "safe" -- No power savings; refclk is driven by RC + * unconditionally. + */ + if (strcmp(mode, "safe") != 0) + dev_err(pcie->dev, err_msg); + mode = "safe"; + } + writel(clkreq_cntl, pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + + dev_info(pcie->dev, "clkreq-mode set to %s\n", mode); +} + static int brcm_pcie_start_link(struct brcm_pcie *pcie) { struct device *dev = pcie->dev; void __iomem *base = pcie->base; u16 nlw, cls, lnksta; bool ssc_good = false; - u32 tmp; int ret, i; /* Unassert the fundamental reset */ @@ -1059,6 +1141,8 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) return -ENODEV; } + brcm_config_clkreq(pcie); + if (pcie->gen) brcm_pcie_set_gen(pcie, pcie->gen); @@ -1077,14 +1161,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) pci_speed_string(pcie_link_speed[cls]), nlw, ssc_good ? "(SSC)" : "(!SSC)"); - /* - * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1 - * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1. - */ - tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); - tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK; - writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); - return 0; } diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index acdc583d29..4e6aa882a5 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -52,7 +52,7 @@ static int iproc_pltfm_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(bridge); pcie->dev = dev; - pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev); + pcie->type = (uintptr_t)of_device_get_match_data(dev); ret = of_address_to_resource(np, 0, ®); if (ret < 0) { diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c index 7034c0ff23..e6909271de 100644 --- a/drivers/pci/controller/pcie-rcar-ep.c +++ b/drivers/pci/controller/pcie-rcar-ep.c @@ -402,16 +402,15 @@ static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie, } static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); switch (type) { - case PCI_EPC_IRQ_LEGACY: + case PCI_IRQ_INTX: return rcar_pcie_ep_assert_intx(ep, fn, 0); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num); default: diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index bf7cc0b6a6..996077ab7c 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -29,6 +29,7 @@ #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> #include "pcie-rcar.h" @@ -953,14 +954,22 @@ static const struct of_device_id rcar_pcie_of_match[] = { {}, }; +/* Design note 346 from Linear Technology says order is not important. */ +static const char * const rcar_pcie_supplies[] = { + "vpcie1v5", + "vpcie3v3", + "vpcie12v", +}; + static int rcar_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct pci_host_bridge *bridge; struct rcar_pcie_host *host; struct rcar_pcie *pcie; + unsigned int i; u32 data; int err; - struct pci_host_bridge *bridge; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host)); if (!bridge) @@ -971,6 +980,13 @@ static int rcar_pcie_probe(struct platform_device *pdev) pcie->dev = dev; platform_set_drvdata(pdev, host); + for (i = 0; i < ARRAY_SIZE(rcar_pcie_supplies); i++) { + err = devm_regulator_get_enable_optional(dev, rcar_pcie_supplies[i]); + if (err < 0 && err != -ENODEV) + return dev_err_probe(dev, err, "failed to enable regulator: %s\n", + rcar_pcie_supplies[i]); + } + pm_runtime_enable(pcie->dev); err = pm_runtime_get_sync(pcie->dev); if (err < 0) { diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 0af0e965fb..c9046e97a1 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -26,16 +26,16 @@ * @max_regions: maximum number of regions supported by hardware * @ob_region_map: bitmask of mapped outbound regions * @ob_addr: base addresses in the AXI bus where the outbound regions start - * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ + * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ * dedicated outbound regions is mapped. * @irq_cpu_addr: base address in the CPU space where a write access triggers - * the sending of a memory write (MSI) / normal message (legacy + * the sending of a memory write (MSI) / normal message (INTX * IRQ) TLP through the PCIe bus. - * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ + * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ * dedicated outbound region. * @irq_pci_fn: the latest PCI function that has updated the mapping of - * the MSI/legacy IRQ dedicated outbound region. - * @irq_pending: bitmask of asserted legacy IRQs. + * the MSI/INTX IRQ dedicated outbound region. + * @irq_pending: bitmask of asserted INTX IRQs. */ struct rockchip_pcie_ep { struct rockchip_pcie rockchip; @@ -325,8 +325,8 @@ static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn, } } -static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn, - u8 intx) +static int rockchip_pcie_ep_send_intx_irq(struct rockchip_pcie_ep *ep, u8 fn, + u8 intx) { u16 cmd; @@ -407,15 +407,14 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, } static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); switch (type) { - case PCI_EPC_IRQ_LEGACY: - return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_INTX: + return rockchip_pcie_ep_send_intx_irq(ep, fn, 0); + case PCI_IRQ_MSI: return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num); default: return -EINVAL; diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index afbbdccd19..300b9dc85e 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -505,7 +505,7 @@ static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) +static void rockchip_pcie_intx_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); @@ -553,7 +553,7 @@ static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) return irq; irq_set_chained_handler_and_data(irq, - rockchip_pcie_legacy_int_handler, + rockchip_pcie_intx_handler, rockchip); irq = platform_get_irq_byname(pdev, "client"); diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c index 3d7f280eda..5be5dfd839 100644 --- a/drivers/pci/controller/pcie-xilinx-dma-pl.c +++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c @@ -684,10 +684,8 @@ static int xilinx_request_msi_irq(struct pl_dma_pcie *port) int ret; port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0"); - if (port->msi.irq_msi0 <= 0) { - dev_err(dev, "Unable to find msi0 IRQ line\n"); + if (port->msi.irq_msi0 <= 0) return port->msi.irq_msi0; - } ret = devm_request_irq(dev, port->msi.irq_msi0, xilinx_pl_dma_pcie_msi_handler_low, IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl", @@ -698,10 +696,8 @@ static int xilinx_request_msi_irq(struct pl_dma_pcie *port) } port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1"); - if (port->msi.irq_msi1 <= 0) { - dev_err(dev, "Unable to find msi1 IRQ line\n"); + if (port->msi.irq_msi1 <= 0) return port->msi.irq_msi1; - } ret = devm_request_irq(dev, port->msi.irq_msi1, xilinx_pl_dma_pcie_msi_handler_high, IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl", diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index e307aceba5..0408f4d612 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -166,7 +166,7 @@ struct nwl_pcie { int irq_intx; int irq_misc; struct nwl_msi msi; - struct irq_domain *legacy_irq_domain; + struct irq_domain *intx_irq_domain; struct clk *clk; raw_spinlock_t leg_mask_lock; }; @@ -324,7 +324,7 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc) while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & MSGF_LEG_SR_MASKALL) != 0) { for_each_set_bit(bit, &status, PCI_NUM_INTX) - generic_handle_domain_irq(pcie->legacy_irq_domain, bit); + generic_handle_domain_irq(pcie->intx_irq_domain, bit); } chained_irq_exit(chip, desc); @@ -364,7 +364,7 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void nwl_mask_leg_irq(struct irq_data *data) +static void nwl_mask_intx_irq(struct irq_data *data) { struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); unsigned long flags; @@ -378,7 +378,7 @@ static void nwl_mask_leg_irq(struct irq_data *data) raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); } -static void nwl_unmask_leg_irq(struct irq_data *data) +static void nwl_unmask_intx_irq(struct irq_data *data) { struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); unsigned long flags; @@ -392,26 +392,26 @@ static void nwl_unmask_leg_irq(struct irq_data *data) raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); } -static struct irq_chip nwl_leg_irq_chip = { +static struct irq_chip nwl_intx_irq_chip = { .name = "nwl_pcie:legacy", - .irq_enable = nwl_unmask_leg_irq, - .irq_disable = nwl_mask_leg_irq, - .irq_mask = nwl_mask_leg_irq, - .irq_unmask = nwl_unmask_leg_irq, + .irq_enable = nwl_unmask_intx_irq, + .irq_disable = nwl_mask_intx_irq, + .irq_mask = nwl_mask_intx_irq, + .irq_unmask = nwl_unmask_intx_irq, }; -static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) +static int nwl_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) { - irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq); + irq_set_chip_and_handler(irq, &nwl_intx_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); irq_set_status_flags(irq, IRQ_LEVEL); return 0; } -static const struct irq_domain_ops legacy_domain_ops = { - .map = nwl_legacy_map, +static const struct irq_domain_ops intx_domain_ops = { + .map = nwl_intx_map, .xlate = pci_irqd_intx_xlate, }; @@ -525,20 +525,20 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) { struct device *dev = pcie->dev; struct device_node *node = dev->of_node; - struct device_node *legacy_intc_node; + struct device_node *intc_node; - legacy_intc_node = of_get_next_child(node, NULL); - if (!legacy_intc_node) { + intc_node = of_get_next_child(node, NULL); + if (!intc_node) { dev_err(dev, "No legacy intc node found\n"); return -EINVAL; } - pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node, - PCI_NUM_INTX, - &legacy_domain_ops, - pcie); - of_node_put(legacy_intc_node); - if (!pcie->legacy_irq_domain) { + pcie->intx_irq_domain = irq_domain_add_linear(intc_node, + PCI_NUM_INTX, + &intx_domain_ops, + pcie); + of_node_put(intc_node); + if (!pcie->intx_irq_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; } @@ -710,14 +710,14 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) /* Enable all misc interrupts */ nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); - /* Disable all legacy interrupts */ + /* Disable all INTX interrupts */ nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); - /* Clear pending legacy interrupts */ + /* Clear pending INTX interrupts */ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS); - /* Enable all legacy interrupts */ + /* Enable all INTX interrupts */ nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); /* Enable the bridge config interrupt */ diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 0452cbc362..87b7856f37 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -984,7 +984,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENOMEM; vmd->dev = dev; - vmd->instance = ida_simple_get(&vmd_instance_ida, 0, 0, GFP_KERNEL); + vmd->instance = ida_alloc(&vmd_instance_ida, GFP_KERNEL); if (vmd->instance < 0) return vmd->instance; @@ -1026,7 +1026,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) return 0; out_release_instance: - ida_simple_remove(&vmd_instance_ida, vmd->instance); + ida_free(&vmd_instance_ida, vmd->instance); return err; } @@ -1048,7 +1048,7 @@ static void vmd_remove(struct pci_dev *dev) vmd_cleanup_srcu(vmd); vmd_detach_resources(vmd); vmd_remove_irq_domain(vmd); - ida_simple_remove(&vmd_instance_ida, vmd->instance); + ida_free(&vmd_instance_ida, vmd->instance); } static void vmd_shutdown(struct pci_dev *dev) diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c index 6dc918a8a0..1c3e4ea76b 100644 --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c @@ -21,6 +21,15 @@ /* Platform specific flags */ #define MHI_EPF_USE_DMA BIT(0) +struct pci_epf_mhi_dma_transfer { + struct pci_epf_mhi *epf_mhi; + struct mhi_ep_buf_info buf_info; + struct list_head node; + dma_addr_t paddr; + enum dma_data_direction dir; + size_t size; +}; + struct pci_epf_mhi_ep_info { const struct mhi_ep_cntrl_config *config; struct pci_epf_header *epf_header; @@ -124,6 +133,10 @@ struct pci_epf_mhi { resource_size_t mmio_phys; struct dma_chan *dma_chan_tx; struct dma_chan *dma_chan_rx; + struct workqueue_struct *dma_wq; + struct work_struct dma_work; + struct list_head dma_list; + spinlock_t list_lock; u32 mmio_size; int irq; }; @@ -205,7 +218,7 @@ static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector) * MHI supplies 0 based MSI vectors but the API expects the vector * number to start from 1, so we need to increment the vector by 1. */ - pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI, + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_IRQ_MSI, vector + 1); } @@ -234,6 +247,9 @@ static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, mutex_unlock(&epf_mhi->lock); + if (buf_info->cb) + buf_info->cb(buf_info); + return 0; } @@ -262,6 +278,9 @@ static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl, mutex_unlock(&epf_mhi->lock); + if (buf_info->cb) + buf_info->cb(buf_info); + return 0; } @@ -412,6 +431,198 @@ err_unlock: return ret; } +static void pci_epf_mhi_dma_worker(struct work_struct *work) +{ + struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, dma_work); + struct device *dma_dev = epf_mhi->epf->epc->dev.parent; + struct pci_epf_mhi_dma_transfer *itr, *tmp; + struct mhi_ep_buf_info *buf_info; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&epf_mhi->list_lock, flags); + list_splice_tail_init(&epf_mhi->dma_list, &head); + spin_unlock_irqrestore(&epf_mhi->list_lock, flags); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dma_unmap_single(dma_dev, itr->paddr, itr->size, itr->dir); + buf_info = &itr->buf_info; + buf_info->cb(buf_info); + kfree(itr); + } +} + +static void pci_epf_mhi_dma_async_callback(void *param) +{ + struct pci_epf_mhi_dma_transfer *transfer = param; + struct pci_epf_mhi *epf_mhi = transfer->epf_mhi; + + spin_lock(&epf_mhi->list_lock); + list_add_tail(&transfer->node, &epf_mhi->dma_list); + spin_unlock(&epf_mhi->list_lock); + + queue_work(epf_mhi->dma_wq, &epf_mhi->dma_work); +} + +static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_buf_info *buf_info) +{ + struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); + struct device *dma_dev = epf_mhi->epf->epc->dev.parent; + struct pci_epf_mhi_dma_transfer *transfer = NULL; + struct dma_chan *chan = epf_mhi->dma_chan_rx; + struct device *dev = &epf_mhi->epf->dev; + DECLARE_COMPLETION_ONSTACK(complete); + struct dma_async_tx_descriptor *desc; + struct dma_slave_config config = {}; + dma_cookie_t cookie; + dma_addr_t dst_addr; + int ret; + + mutex_lock(&epf_mhi->lock); + + config.direction = DMA_DEV_TO_MEM; + config.src_addr = buf_info->host_addr; + + ret = dmaengine_slave_config(chan, &config); + if (ret) { + dev_err(dev, "Failed to configure DMA channel\n"); + goto err_unlock; + } + + dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size, + DMA_FROM_DEVICE); + ret = dma_mapping_error(dma_dev, dst_addr); + if (ret) { + dev_err(dev, "Failed to map remote memory\n"); + goto err_unlock; + } + + desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size, + DMA_DEV_TO_MEM, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(dev, "Failed to prepare DMA\n"); + ret = -EIO; + goto err_unmap; + } + + transfer = kzalloc(sizeof(*transfer), GFP_KERNEL); + if (!transfer) { + ret = -ENOMEM; + goto err_unmap; + } + + transfer->epf_mhi = epf_mhi; + transfer->paddr = dst_addr; + transfer->size = buf_info->size; + transfer->dir = DMA_FROM_DEVICE; + memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info)); + + desc->callback = pci_epf_mhi_dma_async_callback; + desc->callback_param = transfer; + + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + if (ret) { + dev_err(dev, "Failed to do DMA submit\n"); + goto err_free_transfer; + } + + dma_async_issue_pending(chan); + + goto err_unlock; + +err_free_transfer: + kfree(transfer); +err_unmap: + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE); +err_unlock: + mutex_unlock(&epf_mhi->lock); + + return ret; +} + +static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_buf_info *buf_info) +{ + struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); + struct device *dma_dev = epf_mhi->epf->epc->dev.parent; + struct pci_epf_mhi_dma_transfer *transfer = NULL; + struct dma_chan *chan = epf_mhi->dma_chan_tx; + struct device *dev = &epf_mhi->epf->dev; + DECLARE_COMPLETION_ONSTACK(complete); + struct dma_async_tx_descriptor *desc; + struct dma_slave_config config = {}; + dma_cookie_t cookie; + dma_addr_t src_addr; + int ret; + + mutex_lock(&epf_mhi->lock); + + config.direction = DMA_MEM_TO_DEV; + config.dst_addr = buf_info->host_addr; + + ret = dmaengine_slave_config(chan, &config); + if (ret) { + dev_err(dev, "Failed to configure DMA channel\n"); + goto err_unlock; + } + + src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size, + DMA_TO_DEVICE); + ret = dma_mapping_error(dma_dev, src_addr); + if (ret) { + dev_err(dev, "Failed to map remote memory\n"); + goto err_unlock; + } + + desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size, + DMA_MEM_TO_DEV, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(dev, "Failed to prepare DMA\n"); + ret = -EIO; + goto err_unmap; + } + + transfer = kzalloc(sizeof(*transfer), GFP_KERNEL); + if (!transfer) { + ret = -ENOMEM; + goto err_unmap; + } + + transfer->epf_mhi = epf_mhi; + transfer->paddr = src_addr; + transfer->size = buf_info->size; + transfer->dir = DMA_TO_DEVICE; + memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info)); + + desc->callback = pci_epf_mhi_dma_async_callback; + desc->callback_param = transfer; + + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + if (ret) { + dev_err(dev, "Failed to do DMA submit\n"); + goto err_free_transfer; + } + + dma_async_issue_pending(chan); + + goto err_unlock; + +err_free_transfer: + kfree(transfer); +err_unmap: + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE); +err_unlock: + mutex_unlock(&epf_mhi->lock); + + return ret; +} + struct epf_dma_filter { struct device *dev; u32 dma_mask; @@ -435,6 +646,7 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi) struct device *dev = &epf_mhi->epf->dev; struct epf_dma_filter filter; dma_cap_mask_t mask; + int ret; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); @@ -453,16 +665,35 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi) &filter); if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) { dev_err(dev, "Failed to request rx channel\n"); - dma_release_channel(epf_mhi->dma_chan_tx); - epf_mhi->dma_chan_tx = NULL; - return -ENODEV; + ret = -ENODEV; + goto err_release_tx; + } + + epf_mhi->dma_wq = alloc_workqueue("pci_epf_mhi_dma_wq", 0, 0); + if (!epf_mhi->dma_wq) { + ret = -ENOMEM; + goto err_release_rx; } + INIT_LIST_HEAD(&epf_mhi->dma_list); + INIT_WORK(&epf_mhi->dma_work, pci_epf_mhi_dma_worker); + spin_lock_init(&epf_mhi->list_lock); + return 0; + +err_release_rx: + dma_release_channel(epf_mhi->dma_chan_rx); + epf_mhi->dma_chan_rx = NULL; +err_release_tx: + dma_release_channel(epf_mhi->dma_chan_tx); + epf_mhi->dma_chan_tx = NULL; + + return ret; } static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi) { + destroy_workqueue(epf_mhi->dma_wq); dma_release_channel(epf_mhi->dma_chan_tx); dma_release_channel(epf_mhi->dma_chan_rx); epf_mhi->dma_chan_tx = NULL; @@ -535,12 +766,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf) mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq; mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map; mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free; + mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read; + mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write; if (info->flags & MHI_EPF_USE_DMA) { - mhi_cntrl->read_from_host = pci_epf_mhi_edma_read; - mhi_cntrl->write_to_host = pci_epf_mhi_edma_write; - } else { - mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read; - mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write; + mhi_cntrl->read_sync = pci_epf_mhi_edma_read; + mhi_cntrl->write_sync = pci_epf_mhi_edma_write; + mhi_cntrl->read_async = pci_epf_mhi_edma_read_async; + mhi_cntrl->write_async = pci_epf_mhi_edma_write_async; } /* Register the MHI EP controller */ @@ -648,7 +880,7 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf) pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar); } -static struct pci_epc_event_ops pci_epf_mhi_event_ops = { +static const struct pci_epc_event_ops pci_epf_mhi_event_ops = { .core_init = pci_epf_mhi_core_init, .link_up = pci_epf_mhi_link_up, .link_down = pci_epf_mhi_link_down, @@ -686,7 +918,7 @@ static const struct pci_epf_device_id pci_epf_mhi_ids[] = { {}, }; -static struct pci_epf_ops pci_epf_mhi_ops = { +static const struct pci_epf_ops pci_epf_mhi_ops = { .unbind = pci_epf_mhi_unbind, .bind = pci_epf_mhi_bind, }; diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c index 9aac2c6f3b..0553946005 100644 --- a/drivers/pci/endpoint/functions/pci-epf-ntb.c +++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c @@ -140,9 +140,9 @@ static struct pci_epf_header epf_ntb_header = { static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up) { enum pci_epc_interface_type type; - enum pci_epc_irq_type irq_type; struct epf_ntb_epc *ntb_epc; struct epf_ntb_ctrl *ctrl; + unsigned int irq_type; struct pci_epc *epc; u8 func_no, vfunc_no; bool is_msix; @@ -159,7 +159,7 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up) ctrl->link_status |= LINK_STATUS_UP; else ctrl->link_status &= ~LINK_STATUS_UP; - irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI; + irq_type = is_msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1); if (ret) { dev_err(&epc->dev, @@ -2099,7 +2099,7 @@ static int epf_ntb_probe(struct pci_epf *epf, return 0; } -static struct pci_epf_ops epf_ntb_ops = { +static const struct pci_epf_ops epf_ntb_ops = { .bind = epf_ntb_bind, .unbind = epf_ntb_unbind, .add_cfs = epf_ntb_add_cfs, diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 1f0d2b8429..18c80002d3 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -19,11 +19,11 @@ #include <linux/pci-epf.h> #include <linux/pci_regs.h> -#define IRQ_TYPE_LEGACY 0 +#define IRQ_TYPE_INTX 0 #define IRQ_TYPE_MSI 1 #define IRQ_TYPE_MSIX 2 -#define COMMAND_RAISE_LEGACY_IRQ BIT(0) +#define COMMAND_RAISE_INTX_IRQ BIT(0) #define COMMAND_RAISE_MSI_IRQ BIT(1) #define COMMAND_RAISE_MSIX_IRQ BIT(2) #define COMMAND_READ BIT(3) @@ -600,9 +600,9 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, WRITE_ONCE(reg->status, status); switch (reg->irq_type) { - case IRQ_TYPE_LEGACY: + case IRQ_TYPE_INTX: pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, - PCI_EPC_IRQ_LEGACY, 0); + PCI_IRQ_INTX, 0); break; case IRQ_TYPE_MSI: count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no); @@ -612,7 +612,7 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, return; } pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, - PCI_EPC_IRQ_MSI, reg->irq_number); + PCI_IRQ_MSI, reg->irq_number); break; case IRQ_TYPE_MSIX: count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no); @@ -622,7 +622,7 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, return; } pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, - PCI_EPC_IRQ_MSIX, reg->irq_number); + PCI_IRQ_MSIX, reg->irq_number); break; default: dev_err(dev, "Failed to raise IRQ, unknown type\n"); @@ -659,7 +659,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) } switch (command) { - case COMMAND_RAISE_LEGACY_IRQ: + case COMMAND_RAISE_INTX_IRQ: case COMMAND_RAISE_MSI_IRQ: case COMMAND_RAISE_MSIX_IRQ: pci_epf_test_raise_irq(epf_test, reg); @@ -973,7 +973,7 @@ static int pci_epf_test_probe(struct pci_epf *epf, return 0; } -static struct pci_epf_ops ops = { +static const struct pci_epf_ops ops = { .unbind = pci_epf_test_unbind, .bind = pci_epf_test_bind, }; diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c index 2b7bc5a731..5b84821c0d 100644 --- a/drivers/pci/endpoint/functions/pci-epf-vntb.c +++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c @@ -1172,11 +1172,8 @@ static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits) func_no = ntb->epf->func_no; vfunc_no = ntb->epf->vfunc_no; - ret = pci_epc_raise_irq(ntb->epf->epc, - func_no, - vfunc_no, - PCI_EPC_IRQ_MSI, - interrupt_num + 1); + ret = pci_epc_raise_irq(ntb->epf->epc, func_no, vfunc_no, + PCI_IRQ_MSI, interrupt_num + 1); if (ret) dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n"); @@ -1383,7 +1380,7 @@ static void epf_ntb_unbind(struct pci_epf *epf) } // EPF driver probe -static struct pci_epf_ops epf_ntb_ops = { +static const struct pci_epf_ops epf_ntb_ops = { .bind = epf_ntb_bind, .unbind = epf_ntb_unbind, .add_cfs = epf_ntb_add_cfs, diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 56e1184bc6..dcd4e66430 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -211,13 +211,13 @@ EXPORT_SYMBOL_GPL(pci_epc_start); * @epc: the EPC device which has to interrupt the host * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function - * @type: specify the type of interrupt; legacy, MSI or MSI-X + * @type: specify the type of interrupt; INTX, MSI or MSI-X * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N) * - * Invoke to raise an legacy, MSI or MSI-X interrupt + * Invoke to raise an INTX, MSI or MSI-X interrupt */ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, - enum pci_epc_irq_type type, u16 interrupt_num) + unsigned int type, u16 interrupt_num) { int ret; diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 25dbe85c42..aaa33e8dc4 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -745,6 +745,7 @@ static int sriov_init(struct pci_dev *dev, int pos) u16 ctrl, total; struct pci_sriov *iov; struct resource *res; + const char *res_name; struct pci_dev *pdev; pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl); @@ -785,6 +786,8 @@ found: nres = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &dev->resource[i + PCI_IOV_RESOURCES]; + res_name = pci_resource_name(dev, i + PCI_IOV_RESOURCES); + /* * If it is already FIXED, don't change it, something * (perhaps EA or header fixups) wants it this way. @@ -802,8 +805,8 @@ found: } iov->barsz[i] = resource_size(res); res->end = res->start + resource_size(res) * total - 1; - pci_info(dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n", - i, res, i, total); + pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n", + res_name, res, i, total); i += bar64; nres++; } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e62de97304..c3585229c1 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -851,6 +851,66 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res) EXPORT_SYMBOL(pci_find_resource); /** + * pci_resource_name - Return the name of the PCI resource + * @dev: PCI device to query + * @i: index of the resource + * + * Return the standard PCI resource (BAR) name according to their index. + */ +const char *pci_resource_name(struct pci_dev *dev, unsigned int i) +{ + static const char * const bar_name[] = { + "BAR 0", + "BAR 1", + "BAR 2", + "BAR 3", + "BAR 4", + "BAR 5", + "ROM", +#ifdef CONFIG_PCI_IOV + "VF BAR 0", + "VF BAR 1", + "VF BAR 2", + "VF BAR 3", + "VF BAR 4", + "VF BAR 5", +#endif + "bridge window", /* "io" included in %pR */ + "bridge window", /* "mem" included in %pR */ + "bridge window", /* "mem pref" included in %pR */ + }; + static const char * const cardbus_name[] = { + "BAR 1", + "unknown", + "unknown", + "unknown", + "unknown", + "unknown", +#ifdef CONFIG_PCI_IOV + "unknown", + "unknown", + "unknown", + "unknown", + "unknown", + "unknown", +#endif + "CardBus bridge window 0", /* I/O */ + "CardBus bridge window 1", /* I/O */ + "CardBus bridge window 0", /* mem */ + "CardBus bridge window 1", /* mem */ + }; + + if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS && + i < ARRAY_SIZE(cardbus_name)) + return cardbus_name[i]; + + if (i < ARRAY_SIZE(bar_name)) + return bar_name[i]; + + return "unknown"; +} + +/** * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos * @dev: the PCI device to operate on * @pos: config space offset of status word @@ -1219,6 +1279,9 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) if (delay > PCI_RESET_WAIT) pci_info(dev, "ready %dms after %s\n", delay - 1, reset_type); + else + pci_dbg(dev, "ready %dms after %s\n", delay - 1, + reset_type); return 0; } @@ -3329,6 +3392,7 @@ static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei, static int pci_ea_read(struct pci_dev *dev, int offset) { struct resource *res; + const char *res_name; int ent_size, ent_offset = offset; resource_size_t start, end; unsigned long flags; @@ -3358,6 +3422,7 @@ static int pci_ea_read(struct pci_dev *dev, int offset) goto out; res = pci_ea_get_resource(dev, bei, prop); + res_name = pci_resource_name(dev, bei); if (!res) { pci_err(dev, "Unsupported EA entry BEI: %u\n", bei); goto out; @@ -3431,16 +3496,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset) res->flags = flags; if (bei <= PCI_EA_BEI_BAR5) - pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", - bei, res, prop); + pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n", + res_name, res, prop); else if (bei == PCI_EA_BEI_ROM) - pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", - res, prop); + pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n", + res_name, res, prop); else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) - pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", - bei - PCI_EA_BEI_VF_BAR0, res, prop); + pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n", + res_name, res, prop); else - pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", + pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n", bei, res, prop); out: @@ -6263,6 +6328,41 @@ int pcie_set_mps(struct pci_dev *dev, int mps) } EXPORT_SYMBOL(pcie_set_mps); +static enum pci_bus_speed to_pcie_link_speed(u16 lnksta) +{ + return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)]; +} + +int pcie_link_speed_mbps(struct pci_dev *pdev) +{ + u16 lnksta; + int err; + + err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); + if (err) + return err; + + switch (to_pcie_link_speed(lnksta)) { + case PCIE_SPEED_2_5GT: + return 2500; + case PCIE_SPEED_5_0GT: + return 5000; + case PCIE_SPEED_8_0GT: + return 8000; + case PCIE_SPEED_16_0GT: + return 16000; + case PCIE_SPEED_32_0GT: + return 32000; + case PCIE_SPEED_64_0GT: + return 64000; + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL(pcie_link_speed_mbps); + /** * pcie_bandwidth_available - determine minimum link settings of a PCIe * device and its bandwidth limitation @@ -6296,8 +6396,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, while (dev) { pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); - next_speed = pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, - lnksta)]; + next_speed = to_pcie_link_speed(lnksta); next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta); next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); @@ -6728,14 +6827,15 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar, resource_size_t align, bool resize) { struct resource *r = &dev->resource[bar]; + const char *r_name = pci_resource_name(dev, bar); resource_size_t size; if (!(r->flags & IORESOURCE_MEM)) return; if (r->flags & IORESOURCE_PCI_FIXED) { - pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n", - bar, r, (unsigned long long)align); + pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n", + r_name, r, (unsigned long long)align); return; } @@ -6771,8 +6871,8 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar, * devices and we use the second. */ - pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n", - bar, r, (unsigned long long)align); + pci_info(dev, "%s %pR: requesting alignment to %#llx\n", + r_name, r, (unsigned long long)align); if (resize) { r->start = 0; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 1697cb8289..bfc56f7bee 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -255,6 +255,8 @@ void __pci_bus_assign_resources(const struct pci_bus *bus, struct list_head *fail_head); bool pci_bus_clip_resource(struct pci_dev *dev, int idx); +const char *pci_resource_name(struct pci_dev *dev, unsigned int i); + void pci_reassigndev_resource_alignment(struct pci_dev *dev); void pci_disable_bridge_window(struct pci_dev *dev); struct pci_bus *pci_bus_get(struct pci_bus *bus); diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 38e3346772..05fc30bb51 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -41,8 +41,8 @@ #define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/ struct aer_err_source { - unsigned int status; - unsigned int id; + u32 status; /* PCI_ERR_ROOT_STATUS */ + u32 id; /* PCI_ERR_ROOT_ERR_SRC */ }; struct aer_rpc { @@ -435,10 +435,10 @@ void pci_aer_exit(struct pci_dev *dev) /* * AER error strings */ -static const char *aer_error_severity_string[] = { - "Uncorrected (Non-Fatal)", - "Uncorrected (Fatal)", - "Corrected" +static const char * const aer_error_severity_string[] = { + "Uncorrectable (Non-Fatal)", + "Uncorrectable (Fatal)", + "Correctable" }; static const char *aer_error_layer[] = { diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index eb2c77d522..bc0bd86695 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -426,17 +426,6 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) } } -static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, - u32 clear, u32 set) -{ - u32 val; - - pci_read_config_dword(pdev, pos, &val); - val &= ~clear; - val |= set; - pci_write_config_dword(pdev, pos, val); -} - /* Calculate L1.2 PM substate timing parameters */ static void aspm_calc_l12_info(struct pcie_link_state *link, u32 parent_l1ss_cap, u32 child_l1ss_cap) @@ -501,10 +490,12 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK; if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); } /* Program T_POWER_ON times in both ports */ @@ -512,22 +503,26 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2); /* Program Common_Mode_Restore_Time in upstream device */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); /* Program LTR_L1.2_THRESHOLD time in both ports */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0, - pl1_2_enables); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0, - cl1_2_enables); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, 0, + pl1_2_enables); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, 0, + cl1_2_enables); } } @@ -687,10 +682,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) */ /* Disable all L1 substates */ - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); /* * If needed, disable L1, and it gets enabled later * in pcie_config_aspm_link(). @@ -713,10 +708,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* Enable what we need to enable */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); } static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ed6b7f4873..b7335be560 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -180,6 +180,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, u64 l64, sz64, mask64; u16 orig_cmd; struct pci_bus_region region, inverted_region; + const char *res_name = pci_resource_name(dev, res - dev->resource); mask = type ? PCI_ROM_ADDRESS_MASK : ~0; @@ -254,8 +255,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, sz64 = pci_size(l64, sz64, mask64); if (!sz64) { - pci_info(dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n", - pos); + pci_info(dev, FW_BUG "%s: invalid; can't size\n", res_name); goto fail; } @@ -265,8 +265,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; res->start = 0; res->end = 0; - pci_err(dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n", - pos, (unsigned long long)sz64); + pci_err(dev, "%s: can't handle BAR larger than 4GB (size %#010llx)\n", + res_name, (unsigned long long)sz64); goto out; } @@ -275,8 +275,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, res->flags |= IORESOURCE_UNSET; res->start = 0; res->end = sz64 - 1; - pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n", - pos, (unsigned long long)l64); + pci_info(dev, "%s: can't handle BAR above 4GB (bus address %#010llx)\n", + res_name, (unsigned long long)l64); goto out; } } @@ -302,8 +302,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, res->flags |= IORESOURCE_UNSET; res->start = 0; res->end = region.end - region.start; - pci_info(dev, "reg 0x%x: initial BAR value %#010llx invalid\n", - pos, (unsigned long long)region.start); + pci_info(dev, "%s: initial BAR value %#010llx invalid\n", + res_name, (unsigned long long)region.start); } goto out; @@ -313,7 +313,7 @@ fail: res->flags = 0; out: if (res->flags) - pci_info(dev, "reg 0x%x: %pR\n", pos, res); + pci_info(dev, "%s %pR\n", res_name, res); return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; } @@ -344,64 +344,12 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) } } -static void pci_read_bridge_windows(struct pci_dev *bridge) +static void pci_read_bridge_io(struct pci_dev *dev, struct resource *res, + bool log) { - u16 io; - u32 pmem, tmp; - - pci_read_config_word(bridge, PCI_IO_BASE, &io); - if (!io) { - pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); - pci_read_config_word(bridge, PCI_IO_BASE, &io); - pci_write_config_word(bridge, PCI_IO_BASE, 0x0); - } - if (io) - bridge->io_window = 1; - - /* - * DECchip 21050 pass 2 errata: the bridge may miss an address - * disconnect boundary by one PCI data phase. Workaround: do not - * use prefetching on this device. - */ - if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) - return; - - pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); - if (!pmem) { - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, - 0xffe0fff0); - pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); - } - if (!pmem) - return; - - bridge->pref_window = 1; - - if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { - - /* - * Bridge claims to have a 64-bit prefetchable memory - * window; verify that the upper bits are actually - * writable. - */ - pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem); - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, - 0xffffffff); - pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem); - if (tmp) - bridge->pref_64_window = 1; - } -} - -static void pci_read_bridge_io(struct pci_bus *child) -{ - struct pci_dev *dev = child->self; u8 io_base_lo, io_limit_lo; unsigned long io_mask, io_granularity, base, limit; struct pci_bus_region region; - struct resource *res; io_mask = PCI_IO_RANGE_MASK; io_granularity = 0x1000; @@ -411,7 +359,6 @@ static void pci_read_bridge_io(struct pci_bus *child) io_granularity = 0x400; } - res = child->resource[0]; pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); base = (io_base_lo & io_mask) << 8; @@ -431,19 +378,18 @@ static void pci_read_bridge_io(struct pci_bus *child) region.start = base; region.end = limit + io_granularity - 1; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, " bridge window %pR\n", res); + if (log) + pci_info(dev, " bridge window %pR\n", res); } } -static void pci_read_bridge_mmio(struct pci_bus *child) +static void pci_read_bridge_mmio(struct pci_dev *dev, struct resource *res, + bool log) { - struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; unsigned long base, limit; struct pci_bus_region region; - struct resource *res; - res = child->resource[1]; pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; @@ -453,20 +399,19 @@ static void pci_read_bridge_mmio(struct pci_bus *child) region.start = base; region.end = limit + 0xfffff; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, " bridge window %pR\n", res); + if (log) + pci_info(dev, " bridge window %pR\n", res); } } -static void pci_read_bridge_mmio_pref(struct pci_bus *child) +static void pci_read_bridge_mmio_pref(struct pci_dev *dev, struct resource *res, + bool log) { - struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; u64 base64, limit64; pci_bus_addr_t base, limit; struct pci_bus_region region; - struct resource *res; - res = child->resource[2]; pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; @@ -506,10 +451,77 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) region.start = base; region.end = limit + 0xfffff; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, " bridge window %pR\n", res); + if (log) + pci_info(dev, " bridge window %pR\n", res); } } +static void pci_read_bridge_windows(struct pci_dev *bridge) +{ + u32 buses; + u16 io; + u32 pmem, tmp; + struct resource res; + + pci_read_config_dword(bridge, PCI_PRIMARY_BUS, &buses); + res.flags = IORESOURCE_BUS; + res.start = (buses >> 8) & 0xff; + res.end = (buses >> 16) & 0xff; + pci_info(bridge, "PCI bridge to %pR%s\n", &res, + bridge->transparent ? " (subtractive decode)" : ""); + + pci_read_config_word(bridge, PCI_IO_BASE, &io); + if (!io) { + pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); + pci_read_config_word(bridge, PCI_IO_BASE, &io); + pci_write_config_word(bridge, PCI_IO_BASE, 0x0); + } + if (io) { + bridge->io_window = 1; + pci_read_bridge_io(bridge, &res, true); + } + + pci_read_bridge_mmio(bridge, &res, true); + + /* + * DECchip 21050 pass 2 errata: the bridge may miss an address + * disconnect boundary by one PCI data phase. Workaround: do not + * use prefetching on this device. + */ + if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) + return; + + pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); + if (!pmem) { + pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, + 0xffe0fff0); + pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); + pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); + } + if (!pmem) + return; + + bridge->pref_window = 1; + + if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { + + /* + * Bridge claims to have a 64-bit prefetchable memory + * window; verify that the upper bits are actually + * writable. + */ + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem); + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, + 0xffffffff); + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem); + if (tmp) + bridge->pref_64_window = 1; + } + + pci_read_bridge_mmio_pref(bridge, &res, true); +} + void pci_read_bridge_bases(struct pci_bus *child) { struct pci_dev *dev = child->self; @@ -527,9 +539,9 @@ void pci_read_bridge_bases(struct pci_bus *child) for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; - pci_read_bridge_io(child); - pci_read_bridge_mmio(child); - pci_read_bridge_mmio_pref(child); + pci_read_bridge_io(child->self, child->resource[0], false); + pci_read_bridge_mmio(child->self, child->resource[1], false); + pci_read_bridge_mmio_pref(child->self, child->resource[2], false); if (dev->transparent) { pci_bus_for_each_resource(child->parent, res) { @@ -1817,6 +1829,43 @@ static void early_dump_pci_device(struct pci_dev *pdev) value, 256, false); } +static const char *pci_type_str(struct pci_dev *dev) +{ + static const char * const str[] = { + "PCIe Endpoint", + "PCIe Legacy Endpoint", + "PCIe unknown", + "PCIe unknown", + "PCIe Root Port", + "PCIe Switch Upstream Port", + "PCIe Switch Downstream Port", + "PCIe to PCI/PCI-X bridge", + "PCI/PCI-X to PCIe bridge", + "PCIe Root Complex Integrated Endpoint", + "PCIe Root Complex Event Collector", + }; + int type; + + if (pci_is_pcie(dev)) { + type = pci_pcie_type(dev); + if (type < ARRAY_SIZE(str)) + return str[type]; + + return "PCIe unknown"; + } + + switch (dev->hdr_type) { + case PCI_HEADER_TYPE_NORMAL: + return "conventional PCI endpoint"; + case PCI_HEADER_TYPE_BRIDGE: + return "conventional PCI bridge"; + case PCI_HEADER_TYPE_CARDBUS: + return "CardBus bridge"; + default: + return "conventional PCI"; + } +} + /** * pci_setup_device - Fill in class and map information of a device * @dev: the device structure to fill @@ -1887,8 +1936,9 @@ int pci_setup_device(struct pci_dev *dev) pci_set_removable(dev); - pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", - dev->vendor, dev->device, dev->hdr_type, dev->class); + pci_info(dev, "[%04x:%04x] type %02x class %#08x %s\n", + dev->vendor, dev->device, dev->hdr_type, dev->class, + pci_type_str(dev)); /* Device class may be changed after fixup */ class = dev->class >> 8; @@ -1929,14 +1979,14 @@ int pci_setup_device(struct pci_dev *dev) res = &dev->resource[0]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, "legacy IDE quirk: reg 0x10: %pR\n", + pci_info(dev, "BAR 0 %pR: legacy IDE quirk\n", res); region.start = 0x3F6; region.end = 0x3F6; res = &dev->resource[1]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, "legacy IDE quirk: reg 0x14: %pR\n", + pci_info(dev, "BAR 1 %pR: legacy IDE quirk\n", res); } if ((progif & 4) == 0) { @@ -1945,14 +1995,14 @@ int pci_setup_device(struct pci_dev *dev) res = &dev->resource[2]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, "legacy IDE quirk: reg 0x18: %pR\n", + pci_info(dev, "BAR 2 %pR: legacy IDE quirk\n", res); region.start = 0x376; region.end = 0x376; res = &dev->resource[3]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_info(dev, "legacy IDE quirk: reg 0x1c: %pR\n", + pci_info(dev, "BAR 3 %pR: legacy IDE quirk\n", res); } } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index fa770601c6..eff7f5df08 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -570,13 +570,14 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev) for (i = 0; i < PCI_STD_NUM_BARS; i++) { struct resource *r = &dev->resource[i]; + const char *r_name = pci_resource_name(dev, i); if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { r->end = PAGE_SIZE - 1; r->start = 0; r->flags |= IORESOURCE_UNSET; - pci_info(dev, "expanded BAR %d to page size: %pR\n", - i, r); + pci_info(dev, "%s %pR: expanded to page size\n", + r_name, r); } } } @@ -605,6 +606,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size, u32 region; struct pci_bus_region bus_region; struct resource *res = dev->resource + pos; + const char *res_name = pci_resource_name(dev, pos); pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion); @@ -622,8 +624,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size, bus_region.end = region + size - 1; pcibios_bus_to_resource(dev->bus, res, &bus_region); - pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n", - name, PCI_BASE_ADDRESS_0 + (pos << 2), res); + pci_info(dev, FW_BUG "%s %pR: %s quirk\n", res_name, res, name); } /* @@ -670,6 +671,12 @@ static void quirk_io_region(struct pci_dev *dev, int port, bus_region.end = region + size - 1; pcibios_bus_to_resource(dev->bus, res, &bus_region); + /* + * "res" is typically a bridge window resource that's not being + * used for a bridge window, so it's just a place to stash this + * non-standard resource. Printing "nr" or pci_resource_name() of + * it doesn't really make sense. + */ if (!pci_claim_resource(dev, nr)) pci_info(dev, "quirk: %pR claimed by %s\n", res, name); } diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index fd74f1c99d..909e6a7c3c 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -213,6 +213,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head, struct list_head *head) { struct resource *res; + const char *res_name; struct pci_dev_resource *add_res, *tmp; struct pci_dev_resource *dev_res; resource_size_t add_size, align; @@ -222,6 +223,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head, bool found_match = false; res = add_res->res; + /* Skip resource that has been reset */ if (!res->flags) goto out; @@ -237,6 +239,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head, continue; idx = res - &add_res->dev->resource[0]; + res_name = pci_resource_name(add_res->dev, idx); add_size = add_res->add_size; align = add_res->min_align; if (!resource_size(res)) { @@ -249,9 +252,9 @@ static void reassign_resources_sorted(struct list_head *realloc_head, (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); if (pci_reassign_resource(add_res->dev, idx, add_size, align)) - pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n", - (unsigned long long) add_size, idx, - res); + pci_info(add_res->dev, "%s %pR: failed to add %llx\n", + res_name, res, + (unsigned long long) add_size); } out: list_del(&add_res->list); @@ -571,6 +574,7 @@ EXPORT_SYMBOL(pci_setup_cardbus); static void pci_setup_bridge_io(struct pci_dev *bridge) { struct resource *res; + const char *res_name; struct pci_bus_region region; unsigned long io_mask; u8 io_base_lo, io_limit_lo; @@ -583,6 +587,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge) /* Set up the top and bottom of the PCI I/O segment for this bus */ res = &bridge->resource[PCI_BRIDGE_IO_WINDOW]; + res_name = pci_resource_name(bridge, PCI_BRIDGE_IO_WINDOW); pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_IO) { pci_read_config_word(bridge, PCI_IO_BASE, &l); @@ -591,7 +596,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge) l = ((u16) io_limit_lo << 8) | io_base_lo; /* Set up upper 16 bits of I/O base/limit */ io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); - pci_info(bridge, " bridge window %pR\n", res); + pci_info(bridge, " %s %pR\n", res_name, res); } else { /* Clear upper 16 bits of I/O base/limit */ io_upper16 = 0; @@ -608,16 +613,18 @@ static void pci_setup_bridge_io(struct pci_dev *bridge) static void pci_setup_bridge_mmio(struct pci_dev *bridge) { struct resource *res; + const char *res_name; struct pci_bus_region region; u32 l; /* Set up the top and bottom of the PCI Memory segment for this bus */ res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW]; + res_name = pci_resource_name(bridge, PCI_BRIDGE_MEM_WINDOW); pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_MEM) { l = (region.start >> 16) & 0xfff0; l |= region.end & 0xfff00000; - pci_info(bridge, " bridge window %pR\n", res); + pci_info(bridge, " %s %pR\n", res_name, res); } else { l = 0x0000fff0; } @@ -627,6 +634,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge) static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) { struct resource *res; + const char *res_name; struct pci_bus_region region; u32 l, bu, lu; @@ -640,6 +648,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) /* Set up PREF base/limit */ bu = lu = 0; res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; + res_name = pci_resource_name(bridge, PCI_BRIDGE_PREF_MEM_WINDOW); pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_PREFETCH) { l = (region.start >> 16) & 0xfff0; @@ -648,7 +657,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) bu = upper_32_bits(region.start); lu = upper_32_bits(region.end); } - pci_info(bridge, " bridge window %pR\n", res); + pci_info(bridge, " %s %pR\n", res_name, res); } else { l = 0x0000fff0; } @@ -1013,6 +1022,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, int i; pci_dev_for_each_resource(dev, r, i) { + const char *r_name = pci_resource_name(dev, i); resource_size_t r_size; if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) || @@ -1043,8 +1053,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, if (order < 0) order = 0; if (order >= ARRAY_SIZE(aligns)) { - pci_warn(dev, "disabling BAR %d: %pR (bad alignment %#llx)\n", - i, r, (unsigned long long) align); + pci_warn(dev, "%s %pR: disabling; bad alignment %#llx\n", + r_name, r, (unsigned long long) align); r->flags = 0; continue; } @@ -2235,6 +2245,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type) for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END; i++) { struct resource *res = &bridge->resource[i]; + const char *res_name = pci_resource_name(bridge, i); if ((res->flags ^ type) & PCI_RES_TYPE_MASK) continue; @@ -2247,8 +2258,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type) if (ret) goto cleanup; - pci_info(bridge, "BAR %d: releasing %pR\n", - i, res); + pci_info(bridge, "%s %pR: releasing\n", res_name, res); if (res->parent) release_resource(res); diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index ceaa69491f..c6d933ddfd 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -30,6 +30,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) u32 new, check, mask; int reg; struct resource *res = dev->resource + resno; + const char *res_name = pci_resource_name(dev, resno); /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ if (dev->is_virtfn) @@ -104,8 +105,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) pci_read_config_dword(dev, reg, &check); if ((new ^ check) & mask) { - pci_err(dev, "BAR %d: error updating (%#010x != %#010x)\n", - resno, new, check); + pci_err(dev, "%s: error updating (%#010x != %#010x)\n", + res_name, new, check); } if (res->flags & IORESOURCE_MEM_64) { @@ -113,8 +114,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) pci_write_config_dword(dev, reg + 4, new); pci_read_config_dword(dev, reg + 4, &check); if (check != new) { - pci_err(dev, "BAR %d: error updating (high %#010x != %#010x)\n", - resno, new, check); + pci_err(dev, "%s: error updating (high %#010x != %#010x)\n", + res_name, new, check); } } @@ -135,11 +136,12 @@ void pci_update_resource(struct pci_dev *dev, int resno) int pci_claim_resource(struct pci_dev *dev, int resource) { struct resource *res = &dev->resource[resource]; + const char *res_name = pci_resource_name(dev, resource); struct resource *root, *conflict; if (res->flags & IORESOURCE_UNSET) { - pci_info(dev, "can't claim BAR %d %pR: no address assigned\n", - resource, res); + pci_info(dev, "%s %pR: can't claim; no address assigned\n", + res_name, res); return -EINVAL; } @@ -153,16 +155,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource) root = pci_find_parent_resource(dev, res); if (!root) { - pci_info(dev, "can't claim BAR %d %pR: no compatible bridge window\n", - resource, res); + pci_info(dev, "%s %pR: can't claim; no compatible bridge window\n", + res_name, res); res->flags |= IORESOURCE_UNSET; return -EINVAL; } conflict = request_resource_conflict(root, res); if (conflict) { - pci_info(dev, "can't claim BAR %d %pR: address conflict with %s %pR\n", - resource, res, conflict->name, conflict); + pci_info(dev, "%s %pR: can't claim; address conflict with %s %pR\n", + res_name, res, conflict->name, conflict); res->flags |= IORESOURCE_UNSET; return -EBUSY; } @@ -201,6 +203,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, { struct resource *root, *conflict; resource_size_t fw_addr, start, end; + const char *res_name = pci_resource_name(dev, resno); fw_addr = pcibios_retrieve_fw_addr(dev, resno); if (!fw_addr) @@ -231,12 +234,11 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, root = &iomem_resource; } - pci_info(dev, "BAR %d: trying firmware assignment %pR\n", - resno, res); + pci_info(dev, "%s: trying firmware assignment %pR\n", res_name, res); conflict = request_resource_conflict(root, res); if (conflict) { - pci_info(dev, "BAR %d: %pR conflicts with %s %pR\n", - resno, res, conflict->name, conflict); + pci_info(dev, "%s %pR: conflicts with %s %pR\n", res_name, res, + conflict->name, conflict); res->start = start; res->end = end; res->flags |= IORESOURCE_UNSET; @@ -325,6 +327,7 @@ static int _pci_assign_resource(struct pci_dev *dev, int resno, int pci_assign_resource(struct pci_dev *dev, int resno) { struct resource *res = dev->resource + resno; + const char *res_name = pci_resource_name(dev, resno); resource_size_t align, size; int ret; @@ -334,8 +337,8 @@ int pci_assign_resource(struct pci_dev *dev, int resno) res->flags |= IORESOURCE_UNSET; align = pci_resource_alignment(dev, res); if (!align) { - pci_info(dev, "BAR %d: can't assign %pR (bogus alignment)\n", - resno, res); + pci_info(dev, "%s %pR: can't assign; bogus alignment\n", + res_name, res); return -EINVAL; } @@ -348,18 +351,18 @@ int pci_assign_resource(struct pci_dev *dev, int resno) * working, which is better than just leaving it disabled. */ if (ret < 0) { - pci_info(dev, "BAR %d: no space for %pR\n", resno, res); + pci_info(dev, "%s %pR: can't assign; no space\n", res_name, res); ret = pci_revert_fw_address(res, dev, resno, size); } if (ret < 0) { - pci_info(dev, "BAR %d: failed to assign %pR\n", resno, res); + pci_info(dev, "%s %pR: failed to assign\n", res_name, res); return ret; } res->flags &= ~IORESOURCE_UNSET; res->flags &= ~IORESOURCE_STARTALIGN; - pci_info(dev, "BAR %d: assigned %pR\n", resno, res); + pci_info(dev, "%s %pR: assigned\n", res_name, res); if (resno < PCI_BRIDGE_RESOURCES) pci_update_resource(dev, resno); @@ -367,10 +370,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno) } EXPORT_SYMBOL(pci_assign_resource); -int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize, - resource_size_t min_align) +int pci_reassign_resource(struct pci_dev *dev, int resno, + resource_size_t addsize, resource_size_t min_align) { struct resource *res = dev->resource + resno; + const char *res_name = pci_resource_name(dev, resno); unsigned long flags; resource_size_t new_size; int ret; @@ -381,8 +385,8 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz flags = res->flags; res->flags |= IORESOURCE_UNSET; if (!res->parent) { - pci_info(dev, "BAR %d: can't reassign an unassigned resource %pR\n", - resno, res); + pci_info(dev, "%s %pR: can't reassign; unassigned resource\n", + res_name, res); return -EINVAL; } @@ -391,15 +395,15 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz ret = _pci_assign_resource(dev, resno, new_size, min_align); if (ret) { res->flags = flags; - pci_info(dev, "BAR %d: %pR (failed to expand by %#llx)\n", - resno, res, (unsigned long long) addsize); + pci_info(dev, "%s %pR: failed to expand by %#llx\n", + res_name, res, (unsigned long long) addsize); return ret; } res->flags &= ~IORESOURCE_UNSET; res->flags &= ~IORESOURCE_STARTALIGN; - pci_info(dev, "BAR %d: reassigned %pR (expanded by %#llx)\n", - resno, res, (unsigned long long) addsize); + pci_info(dev, "%s %pR: reassigned; expanded by %#llx\n", + res_name, res, (unsigned long long) addsize); if (resno < PCI_BRIDGE_RESOURCES) pci_update_resource(dev, resno); @@ -409,8 +413,9 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz void pci_release_resource(struct pci_dev *dev, int resno) { struct resource *res = dev->resource + resno; + const char *res_name = pci_resource_name(dev, resno); - pci_info(dev, "BAR %d: releasing %pR\n", resno, res); + pci_info(dev, "%s %pR: releasing\n", res_name, res); if (!res->parent) return; @@ -480,6 +485,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask) u16 cmd, old_cmd; int i; struct resource *r; + const char *r_name; pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; @@ -488,6 +494,8 @@ int pci_enable_resources(struct pci_dev *dev, int mask) if (!(mask & (1 << i))) continue; + r_name = pci_resource_name(dev, i); + if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) continue; if ((i == PCI_ROM_RESOURCE) && @@ -495,14 +503,14 @@ int pci_enable_resources(struct pci_dev *dev, int mask) continue; if (r->flags & IORESOURCE_UNSET) { - pci_err(dev, "can't enable device: BAR %d %pR not assigned\n", - i, r); + pci_err(dev, "%s %pR: not assigned; can't enable device\n", + r_name, r); return -EINVAL; } if (!r->parent) { - pci_err(dev, "can't enable device: BAR %d %pR not claimed\n", - i, r); + pci_err(dev, "%s %pR: not claimed; can't enable device\n", + r_name, r); return -EINVAL; } |