diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
commit | 76cb841cb886eef6b3bee341a2266c76578724ad (patch) | |
tree | f5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /drivers/pci/controller/dwc | |
parent | Initial commit. (diff) | |
download | linux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip |
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/pci/controller/dwc')
21 files changed, 10204 insertions, 0 deletions
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig new file mode 100644 index 000000000..91b019424 --- /dev/null +++ b/drivers/pci/controller/dwc/Kconfig @@ -0,0 +1,196 @@ +# SPDX-License-Identifier: GPL-2.0 + +menu "DesignWare PCI Core Support" + depends on PCI + +config PCIE_DW + bool + +config PCIE_DW_HOST + bool + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW + +config PCIE_DW_EP + bool + depends on PCI_ENDPOINT + select PCIE_DW + +config PCI_DRA7XX + bool + +config PCI_DRA7XX_HOST + bool "TI DRA7xx PCIe controller Host Mode" + depends on SOC_DRA7XX || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + depends on OF && HAS_IOMEM && TI_PIPE3 + select PCIE_DW_HOST + select PCI_DRA7XX + default y + help + Enables support for the PCIe controller in the DRA7xx SoC to work in + host mode. There are two instances of PCIe controller in DRA7xx. + This controller can work either as EP or RC. In order to enable + host-specific features PCI_DRA7XX_HOST must be selected and in order + to enable device-specific features PCI_DRA7XX_EP must be selected. + This uses the DesignWare core. + +config PCI_DRA7XX_EP + bool "TI DRA7xx PCIe controller Endpoint Mode" + depends on SOC_DRA7XX || COMPILE_TEST + depends on PCI_ENDPOINT + depends on OF && HAS_IOMEM && TI_PIPE3 + select PCIE_DW_EP + select PCI_DRA7XX + help + Enables support for the PCIe controller in the DRA7xx SoC to work in + endpoint mode. There are two instances of PCIe controller in DRA7xx. + This controller can work either as EP or RC. In order to enable + host-specific features PCI_DRA7XX_HOST must be selected and in order + to enable device-specific features PCI_DRA7XX_EP must be selected. + This uses the DesignWare core. + +config PCIE_DW_PLAT + bool + +config PCIE_DW_PLAT_HOST + bool "Platform bus based DesignWare PCIe Controller - Host mode" + depends on PCI && PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + select PCIE_DW_PLAT + help + Enables support for the PCIe controller in the Designware IP to + work in host mode. There are two instances of PCIe controller in + Designware IP. + This controller can work either as EP or RC. In order to enable + host-specific features PCIE_DW_PLAT_HOST must be selected and in + order to enable device-specific features PCI_DW_PLAT_EP must be + selected. + +config PCIE_DW_PLAT_EP + bool "Platform bus based DesignWare PCIe Controller - Endpoint mode" + depends on PCI && PCI_MSI_IRQ_DOMAIN + depends on PCI_ENDPOINT + select PCIE_DW_EP + select PCIE_DW_PLAT + help + Enables support for the PCIe controller in the Designware IP to + work in endpoint mode. There are two instances of PCIe controller + in Designware IP. + This controller can work either as EP or RC. In order to enable + host-specific features PCIE_DW_PLAT_HOST must be selected and in + order to enable device-specific features PCI_DW_PLAT_EP must be + selected. + +config PCI_EXYNOS + bool "Samsung Exynos PCIe controller" + depends on SOC_EXYNOS5440 || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + +config PCI_IMX6 + bool "Freescale i.MX6 PCIe controller" + depends on SOC_IMX6Q || (ARM && COMPILE_TEST) + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + +config PCIE_SPEAR13XX + bool "STMicroelectronics SPEAr PCIe controller" + depends on ARCH_SPEAR13XX || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here if you want PCIe support on SPEAr13XX SoCs. + +config PCI_KEYSTONE + bool "TI Keystone PCIe controller" + depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST) + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here if you want to enable PCI controller support on Keystone + SoCs. The PCI controller on Keystone is based on DesignWare hardware + and therefore the driver re-uses the DesignWare core functions to + implement the driver. + +config PCI_LAYERSCAPE + bool "Freescale Layerscape PCIe controller" + depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST) + depends on PCI_MSI_IRQ_DOMAIN + select MFD_SYSCON + select PCIE_DW_HOST + help + Say Y here if you want PCIe controller support on Layerscape SoCs. + +config PCI_HISI + depends on OF && (ARM64 || COMPILE_TEST) + bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers" + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + select PCI_HOST_COMMON + help + Say Y here if you want PCIe controller support on HiSilicon + Hip05 and Hip06 SoCs + +config PCIE_QCOM + bool "Qualcomm PCIe controller" + depends on OF && (ARCH_QCOM || COMPILE_TEST) + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here to enable PCIe controller support on Qualcomm SoCs. The + PCIe controller uses the DesignWare core plus Qualcomm-specific + hardware wrappers. + +config PCIE_ARMADA_8K + bool "Marvell Armada-8K PCIe controller" + depends on ARCH_MVEBU || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here if you want to enable PCIe controller support on + Armada-8K SoCs. The PCIe controller on Armada-8K is based on + DesignWare hardware and therefore the driver re-uses the + DesignWare core functions to implement the driver. + +config PCIE_ARTPEC6 + bool + +config PCIE_ARTPEC6_HOST + bool "Axis ARTPEC-6 PCIe controller Host Mode" + depends on MACH_ARTPEC6 || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + select PCIE_ARTPEC6 + help + Enables support for the PCIe controller in the ARTPEC-6 SoC to work in + host mode. This uses the DesignWare core. + +config PCIE_ARTPEC6_EP + bool "Axis ARTPEC-6 PCIe controller Endpoint Mode" + depends on MACH_ARTPEC6 || COMPILE_TEST + depends on PCI_ENDPOINT + select PCIE_DW_EP + select PCIE_ARTPEC6 + help + Enables support for the PCIe controller in the ARTPEC-6 SoC to work in + endpoint mode. This uses the DesignWare core. + +config PCIE_KIRIN + depends on OF && (ARM64 || COMPILE_TEST) + bool "HiSilicon Kirin series SoCs PCIe controllers" + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here if you want PCIe controller support + on HiSilicon Kirin series SoCs. + +config PCIE_HISI_STB + bool "HiSilicon STB SoCs PCIe controllers" + depends on ARCH_HISI || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here if you want PCIe controller support on HiSilicon STB SoCs + +endmenu diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile new file mode 100644 index 000000000..5d2ce72c7 --- /dev/null +++ b/drivers/pci/controller/dwc/Makefile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PCIE_DW) += pcie-designware.o +obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o +obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o +obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o +obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o +obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o +obj-$(CONFIG_PCI_IMX6) += pci-imx6.o +obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o +obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o +obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o +obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o +obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o +obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o +obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o +obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o + +# The following drivers are for devices that use the generic ACPI +# pci_root.c driver but don't support standard ECAM config access. +# They contain MCFG quirks to replace the generic ECAM accessors with +# device-specific ones that are shared with the DT driver. + +# The ACPI driver is generic and should not require driver-specific +# config options to be enabled, so we always build these drivers on +# ARM64 and use internal ifdefs to only build the pieces we need +# depending on whether ACPI, the DT driver, or both are enabled. + +ifdef CONFIG_PCI +obj-$(CONFIG_ARM64) += pcie-hisi.o +endif diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c new file mode 100644 index 000000000..412524aa1 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -0,0 +1,852 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs + * + * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com + * + * Authors: Kishon Vijay Abraham I <kishon@ti.com> + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/resource.h> +#include <linux/types.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/gpio/consumer.h> + +#include "../../pci.h" +#include "pcie-designware.h" + +/* PCIe controller wrapper DRA7XX configuration registers */ + +#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 +#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 +#define ERR_SYS BIT(0) +#define ERR_FATAL BIT(1) +#define ERR_NONFATAL BIT(2) +#define ERR_COR BIT(3) +#define ERR_AXI BIT(4) +#define ERR_ECRC BIT(5) +#define PME_TURN_OFF BIT(8) +#define PME_TO_ACK BIT(9) +#define PM_PME BIT(10) +#define LINK_REQ_RST BIT(11) +#define LINK_UP_EVT BIT(12) +#define CFG_BME_EVT BIT(13) +#define CFG_MSE_EVT BIT(14) +#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ + ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ + LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) + +#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 +#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 +#define INTA BIT(0) +#define INTB BIT(1) +#define INTC BIT(2) +#define INTD BIT(3) +#define MSI BIT(4) +#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) + +#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 +#define DEVICE_TYPE_EP 0x0 +#define DEVICE_TYPE_LEG_EP 0x1 +#define DEVICE_TYPE_RC 0x4 + +#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 +#define LTSSM_EN 0x1 + +#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C +#define LINK_UP BIT(16) +#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF + +#define EXP_CAP_ID_OFFSET 0x70 + +#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 +#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 + +#define PCIECTRL_TI_CONF_MSI_XMT 0x012c +#define MSI_REQ_GRANT BIT(0) +#define MSI_VECTOR_SHIFT 7 + +struct dra7xx_pcie { + struct dw_pcie *pci; + void __iomem *base; /* DT ti_conf */ + int phy_count; /* DT phy-names count */ + struct phy **phy; + int link_gen; + struct irq_domain *irq_domain; + enum dw_pcie_device_mode mode; +}; + +struct dra7xx_pcie_of_data { + enum dw_pcie_device_mode mode; +}; + +#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) + +static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) +{ + return readl(pcie->base + offset); +} + +static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, + u32 value) +{ + writel(value, pcie->base + offset); +} + +static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) +{ + return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; +} + +static int dra7xx_pcie_link_up(struct dw_pcie *pci) +{ + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); + + return !!(reg & LINK_UP); +} + +static void dra7xx_pcie_stop_link(struct dw_pcie *pci) +{ + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + u32 reg; + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); + reg &= ~LTSSM_EN; + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); +} + +static int dra7xx_pcie_establish_link(struct dw_pcie *pci) +{ + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + struct device *dev = pci->dev; + u32 reg; + u32 exp_cap_off = EXP_CAP_ID_OFFSET; + + if (dw_pcie_link_up(pci)) { + dev_err(dev, "link is already up\n"); + return 0; + } + + if (dra7xx->link_gen == 1) { + dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, + 4, ®); + if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { + reg &= ~((u32)PCI_EXP_LNKCAP_SLS); + reg |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_write(pci->dbi_base + exp_cap_off + + PCI_EXP_LNKCAP, 4, reg); + } + + dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, + 2, ®); + if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { + reg &= ~((u32)PCI_EXP_LNKCAP_SLS); + reg |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_write(pci->dbi_base + exp_cap_off + + PCI_EXP_LNKCTL2, 2, reg); + } + } + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); + reg |= LTSSM_EN; + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); + + return 0; +} + +static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) +{ + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, + LEG_EP_INTERRUPTS | MSI); + + dra7xx_pcie_writel(dra7xx, + PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, + MSI | LEG_EP_INTERRUPTS); +} + +static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) +{ + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, + INTERRUPTS); + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, + INTERRUPTS); +} + +static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) +{ + dra7xx_pcie_enable_wrapper_interrupts(dra7xx); + dra7xx_pcie_enable_msi_interrupts(dra7xx); +} + +static int dra7xx_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + + dw_pcie_setup_rc(pp); + + dra7xx_pcie_establish_link(pci); + dw_pcie_wait_for_link(pci); + dw_pcie_msi_init(pp); + dra7xx_pcie_enable_interrupts(dra7xx); + + return 0; +} + +static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { + .host_init = dra7xx_pcie_host_init, +}; + +static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = dra7xx_pcie_intx_map, + .xlate = pci_irqd_intx_xlate, +}; + +static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node = of_get_next_child(node, NULL); + + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return -ENODEV; + } + + dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &intx_domain_ops, pp); + if (!dra7xx->irq_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + return -ENODEV; + } + + return 0; +} + +static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) +{ + struct dra7xx_pcie *dra7xx = arg; + struct dw_pcie *pci = dra7xx->pci; + struct pcie_port *pp = &pci->pp; + unsigned long reg; + u32 virq, bit; + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); + + switch (reg) { + case MSI: + dw_handle_msi_irq(pp); + break; + case INTA: + case INTB: + case INTC: + case INTD: + for_each_set_bit(bit, ®, PCI_NUM_INTX) { + virq = irq_find_mapping(dra7xx->irq_domain, bit); + if (virq) + generic_handle_irq(virq); + } + break; + } + + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); + + return IRQ_HANDLED; +} + +static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) +{ + struct dra7xx_pcie *dra7xx = arg; + struct dw_pcie *pci = dra7xx->pci; + struct device *dev = pci->dev; + struct dw_pcie_ep *ep = &pci->ep; + u32 reg; + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); + + if (reg & ERR_SYS) + dev_dbg(dev, "System Error\n"); + + if (reg & ERR_FATAL) + dev_dbg(dev, "Fatal Error\n"); + + if (reg & ERR_NONFATAL) + dev_dbg(dev, "Non Fatal Error\n"); + + if (reg & ERR_COR) + dev_dbg(dev, "Correctable Error\n"); + + if (reg & ERR_AXI) + dev_dbg(dev, "AXI tag lookup fatal Error\n"); + + if (reg & ERR_ECRC) + dev_dbg(dev, "ECRC Error\n"); + + if (reg & PME_TURN_OFF) + dev_dbg(dev, + "Power Management Event Turn-Off message received\n"); + + if (reg & PME_TO_ACK) + dev_dbg(dev, + "Power Management Turn-Off Ack message received\n"); + + if (reg & PM_PME) + dev_dbg(dev, "PM Power Management Event message received\n"); + + if (reg & LINK_REQ_RST) + dev_dbg(dev, "Link Request Reset\n"); + + if (reg & LINK_UP_EVT) { + if (dra7xx->mode == DW_PCIE_EP_TYPE) + dw_pcie_ep_linkup(ep); + dev_dbg(dev, "Link-up state change\n"); + } + + if (reg & CFG_BME_EVT) + dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); + + if (reg & CFG_MSE_EVT) + dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); + + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); + + return IRQ_HANDLED; +} + +static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + enum pci_barno bar; + + for (bar = BAR_0; bar <= BAR_5; bar++) + dw_pcie_ep_reset_bar(pci, bar); + + dra7xx_pcie_enable_wrapper_interrupts(dra7xx); +} + +static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) +{ + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); + mdelay(1); + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); +} + +static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, + u8 interrupt_num) +{ + u32 reg; + + reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; + reg |= MSI_REQ_GRANT; + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); +} + +static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, u16 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + dra7xx_pcie_raise_legacy_irq(dra7xx); + break; + case PCI_EPC_IRQ_MSI: + dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); + break; + default: + dev_err(pci->dev, "UNKNOWN IRQ type\n"); + } + + return 0; +} + +static struct dw_pcie_ep_ops pcie_ep_ops = { + .ep_init = dra7xx_pcie_ep_init, + .raise_irq = dra7xx_pcie_raise_irq, +}; + +static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, + struct platform_device *pdev) +{ + int ret; + struct dw_pcie_ep *ep; + struct resource *res; + struct device *dev = &pdev->dev; + struct dw_pcie *pci = dra7xx->pci; + + ep = &pci->ep; + ep->ops = &pcie_ep_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); + pci->dbi_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); + pci->dbi_base2 = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base2)) + return PTR_ERR(pci->dbi_base2); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); + if (!res) + return -EINVAL; + + ep->phys_base = res->start; + ep->addr_size = resource_size(res); + + ret = dw_pcie_ep_init(ep); + if (ret) { + dev_err(dev, "failed to initialize endpoint\n"); + return ret; + } + + return 0; +} + +static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, + struct platform_device *pdev) +{ + int ret; + struct dw_pcie *pci = dra7xx->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + struct resource *res; + + pp->irq = platform_get_irq(pdev, 1); + if (pp->irq < 0) { + dev_err(dev, "missing IRQ resource\n"); + return pp->irq; + } + + ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, + IRQF_SHARED | IRQF_NO_THREAD, + "dra7-pcie-msi", dra7xx); + if (ret) { + dev_err(dev, "failed to request irq\n"); + return ret; + } + + ret = dra7xx_pcie_init_irq_domain(pp); + if (ret < 0) + return ret; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); + pci->dbi_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + pp->ops = &dra7xx_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, + .start_link = dra7xx_pcie_establish_link, + .stop_link = dra7xx_pcie_stop_link, + .link_up = dra7xx_pcie_link_up, +}; + +static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) +{ + int phy_count = dra7xx->phy_count; + + while (phy_count--) { + phy_power_off(dra7xx->phy[phy_count]); + phy_exit(dra7xx->phy[phy_count]); + } +} + +static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) +{ + int phy_count = dra7xx->phy_count; + int ret; + int i; + + for (i = 0; i < phy_count; i++) { + ret = phy_init(dra7xx->phy[i]); + if (ret < 0) + goto err_phy; + + ret = phy_power_on(dra7xx->phy[i]); + if (ret < 0) { + phy_exit(dra7xx->phy[i]); + goto err_phy; + } + } + + return 0; + +err_phy: + while (--i >= 0) { + phy_power_off(dra7xx->phy[i]); + phy_exit(dra7xx->phy[i]); + } + + return ret; +} + +static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { + .mode = DW_PCIE_EP_TYPE, +}; + +static const struct of_device_id of_dra7xx_pcie_match[] = { + { + .compatible = "ti,dra7-pcie", + .data = &dra7xx_pcie_rc_of_data, + }, + { + .compatible = "ti,dra7-pcie-ep", + .data = &dra7xx_pcie_ep_of_data, + }, + {}, +}; + +/* + * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 + * @dra7xx: the dra7xx device where the workaround should be applied + * + * Access to the PCIe slave port that are not 32-bit aligned will result + * in incorrect mapping to TLP Address and Byte enable fields. Therefore, + * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or + * 0x3. + * + * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. + */ +static int dra7xx_pcie_unaligned_memaccess(struct device *dev) +{ + int ret; + struct device_node *np = dev->of_node; + struct of_phandle_args args; + struct regmap *regmap; + + regmap = syscon_regmap_lookup_by_phandle(np, + "ti,syscon-unaligned-access"); + if (IS_ERR(regmap)) { + dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); + return -EINVAL; + } + + ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", + 2, 0, &args); + if (ret) { + dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); + return ret; + } + + ret = regmap_update_bits(regmap, args.args[0], args.args[1], + args.args[1]); + if (ret) + dev_err(dev, "failed to enable unaligned access\n"); + + of_node_put(args.np); + + return ret; +} + +static int __init dra7xx_pcie_probe(struct platform_device *pdev) +{ + u32 reg; + int ret; + int irq; + int i; + int phy_count; + struct phy **phy; + struct device_link **link; + void __iomem *base; + struct resource *res; + struct dw_pcie *pci; + struct dra7xx_pcie *dra7xx; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + char name[10]; + struct gpio_desc *reset; + const struct of_device_id *match; + const struct dra7xx_pcie_of_data *data; + enum dw_pcie_device_mode mode; + + match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); + if (!match) + return -EINVAL; + + data = (struct dra7xx_pcie_of_data *)match->data; + mode = (enum dw_pcie_device_mode)data->mode; + + dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); + if (!dra7xx) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "missing IRQ resource: %d\n", irq); + return irq; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); + base = devm_ioremap_nocache(dev, res->start, resource_size(res)); + if (!base) + return -ENOMEM; + + phy_count = of_property_count_strings(np, "phy-names"); + if (phy_count < 0) { + dev_err(dev, "unable to find the strings\n"); + return phy_count; + } + + phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + + link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; + + for (i = 0; i < phy_count; i++) { + snprintf(name, sizeof(name), "pcie-phy%d", i); + phy[i] = devm_phy_get(dev, name); + if (IS_ERR(phy[i])) + return PTR_ERR(phy[i]); + + link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); + if (!link[i]) { + ret = -EINVAL; + goto err_link; + } + } + + dra7xx->base = base; + dra7xx->phy = phy; + dra7xx->pci = pci; + dra7xx->phy_count = phy_count; + + ret = dra7xx_pcie_enable_phy(dra7xx); + if (ret) { + dev_err(dev, "failed to enable phy\n"); + return ret; + } + + platform_set_drvdata(pdev, dra7xx); + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + goto err_get_sync; + } + + reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); + if (IS_ERR(reset)) { + ret = PTR_ERR(reset); + dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); + goto err_gpio; + } + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); + reg &= ~LTSSM_EN; + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); + + dra7xx->link_gen = of_pci_get_max_link_speed(np); + if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) + dra7xx->link_gen = 2; + + switch (mode) { + case DW_PCIE_RC_TYPE: + if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { + ret = -ENODEV; + goto err_gpio; + } + + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, + DEVICE_TYPE_RC); + + ret = dra7xx_pcie_unaligned_memaccess(dev); + if (ret) + dev_err(dev, "WA for Errata i870 not applied\n"); + + ret = dra7xx_add_pcie_port(dra7xx, pdev); + if (ret < 0) + goto err_gpio; + break; + case DW_PCIE_EP_TYPE: + if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { + ret = -ENODEV; + goto err_gpio; + } + + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, + DEVICE_TYPE_EP); + + ret = dra7xx_pcie_unaligned_memaccess(dev); + if (ret) + goto err_gpio; + + ret = dra7xx_add_pcie_ep(dra7xx, pdev); + if (ret < 0) + goto err_gpio; + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + } + dra7xx->mode = mode; + + ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, + IRQF_SHARED, "dra7xx-pcie-main", dra7xx); + if (ret) { + dev_err(dev, "failed to request irq\n"); + goto err_gpio; + } + + return 0; + +err_gpio: + pm_runtime_put(dev); + +err_get_sync: + pm_runtime_disable(dev); + dra7xx_pcie_disable_phy(dra7xx); + +err_link: + while (--i >= 0) + device_link_del(link[i]); + + return ret; +} + +#ifdef CONFIG_PM_SLEEP +static int dra7xx_pcie_suspend(struct device *dev) +{ + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + struct dw_pcie *pci = dra7xx->pci; + u32 val; + + if (dra7xx->mode != DW_PCIE_RC_TYPE) + return 0; + + /* clear MSE */ + val = dw_pcie_readl_dbi(pci, PCI_COMMAND); + val &= ~PCI_COMMAND_MEMORY; + dw_pcie_writel_dbi(pci, PCI_COMMAND, val); + + return 0; +} + +static int dra7xx_pcie_resume(struct device *dev) +{ + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + struct dw_pcie *pci = dra7xx->pci; + u32 val; + + if (dra7xx->mode != DW_PCIE_RC_TYPE) + return 0; + + /* set MSE */ + val = dw_pcie_readl_dbi(pci, PCI_COMMAND); + val |= PCI_COMMAND_MEMORY; + dw_pcie_writel_dbi(pci, PCI_COMMAND, val); + + return 0; +} + +static int dra7xx_pcie_suspend_noirq(struct device *dev) +{ + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + + dra7xx_pcie_disable_phy(dra7xx); + + return 0; +} + +static int dra7xx_pcie_resume_noirq(struct device *dev) +{ + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + int ret; + + ret = dra7xx_pcie_enable_phy(dra7xx); + if (ret) { + dev_err(dev, "failed to enable phy\n"); + return ret; + } + + return 0; +} +#endif + +static void dra7xx_pcie_shutdown(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + int ret; + + dra7xx_pcie_stop_link(dra7xx->pci); + + ret = pm_runtime_put_sync(dev); + if (ret < 0) + dev_dbg(dev, "pm_runtime_put_sync failed\n"); + + pm_runtime_disable(dev); + dra7xx_pcie_disable_phy(dra7xx); +} + +static const struct dev_pm_ops dra7xx_pcie_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, + dra7xx_pcie_resume_noirq) +}; + +static struct platform_driver dra7xx_pcie_driver = { + .driver = { + .name = "dra7-pcie", + .of_match_table = of_dra7xx_pcie_match, + .suppress_bind_attrs = true, + .pm = &dra7xx_pcie_pm_ops, + }, + .shutdown = dra7xx_pcie_shutdown, +}; +builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c new file mode 100644 index 000000000..14a6ba406 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -0,0 +1,538 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Samsung EXYNOS SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Jingoo Han <jg1.han@samsung.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/resource.h> +#include <linux/signal.h> +#include <linux/types.h> + +#include "pcie-designware.h" + +#define to_exynos_pcie(x) dev_get_drvdata((x)->dev) + +/* PCIe ELBI registers */ +#define PCIE_IRQ_PULSE 0x000 +#define IRQ_INTA_ASSERT BIT(0) +#define IRQ_INTB_ASSERT BIT(2) +#define IRQ_INTC_ASSERT BIT(4) +#define IRQ_INTD_ASSERT BIT(6) +#define PCIE_IRQ_LEVEL 0x004 +#define PCIE_IRQ_SPECIAL 0x008 +#define PCIE_IRQ_EN_PULSE 0x00c +#define PCIE_IRQ_EN_LEVEL 0x010 +#define IRQ_MSI_ENABLE BIT(2) +#define PCIE_IRQ_EN_SPECIAL 0x014 +#define PCIE_PWR_RESET 0x018 +#define PCIE_CORE_RESET 0x01c +#define PCIE_CORE_RESET_ENABLE BIT(0) +#define PCIE_STICKY_RESET 0x020 +#define PCIE_NONSTICKY_RESET 0x024 +#define PCIE_APP_INIT_RESET 0x028 +#define PCIE_APP_LTSSM_ENABLE 0x02c +#define PCIE_ELBI_RDLH_LINKUP 0x064 +#define PCIE_ELBI_LTSSM_ENABLE 0x1 +#define PCIE_ELBI_SLV_AWMISC 0x11c +#define PCIE_ELBI_SLV_ARMISC 0x120 +#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) + +struct exynos_pcie_mem_res { + void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */ +}; + +struct exynos_pcie_clk_res { + struct clk *clk; + struct clk *bus_clk; +}; + +struct exynos_pcie { + struct dw_pcie *pci; + struct exynos_pcie_mem_res *mem_res; + struct exynos_pcie_clk_res *clk_res; + const struct exynos_pcie_ops *ops; + int reset_gpio; + + struct phy *phy; +}; + +struct exynos_pcie_ops { + int (*get_mem_resources)(struct platform_device *pdev, + struct exynos_pcie *ep); + int (*get_clk_resources)(struct exynos_pcie *ep); + int (*init_clk_resources)(struct exynos_pcie *ep); + void (*deinit_clk_resources)(struct exynos_pcie *ep); +}; + +static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, + struct exynos_pcie *ep) +{ + struct dw_pcie *pci = ep->pci; + struct device *dev = pci->dev; + struct resource *res; + + ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); + if (!ep->mem_res) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ep->mem_res->elbi_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ep->mem_res->elbi_base)) + return PTR_ERR(ep->mem_res->elbi_base); + + return 0; +} + +static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep) +{ + struct dw_pcie *pci = ep->pci; + struct device *dev = pci->dev; + + ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL); + if (!ep->clk_res) + return -ENOMEM; + + ep->clk_res->clk = devm_clk_get(dev, "pcie"); + if (IS_ERR(ep->clk_res->clk)) { + dev_err(dev, "Failed to get pcie rc clock\n"); + return PTR_ERR(ep->clk_res->clk); + } + + ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus"); + if (IS_ERR(ep->clk_res->bus_clk)) { + dev_err(dev, "Failed to get pcie bus clock\n"); + return PTR_ERR(ep->clk_res->bus_clk); + } + + return 0; +} + +static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep) +{ + struct dw_pcie *pci = ep->pci; + struct device *dev = pci->dev; + int ret; + + ret = clk_prepare_enable(ep->clk_res->clk); + if (ret) { + dev_err(dev, "cannot enable pcie rc clock"); + return ret; + } + + ret = clk_prepare_enable(ep->clk_res->bus_clk); + if (ret) { + dev_err(dev, "cannot enable pcie bus clock"); + goto err_bus_clk; + } + + return 0; + +err_bus_clk: + clk_disable_unprepare(ep->clk_res->clk); + + return ret; +} + +static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep) +{ + clk_disable_unprepare(ep->clk_res->bus_clk); + clk_disable_unprepare(ep->clk_res->clk); +} + +static const struct exynos_pcie_ops exynos5440_pcie_ops = { + .get_mem_resources = exynos5440_pcie_get_mem_resources, + .get_clk_resources = exynos5440_pcie_get_clk_resources, + .init_clk_resources = exynos5440_pcie_init_clk_resources, + .deinit_clk_resources = exynos5440_pcie_deinit_clk_resources, +}; + +static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg) +{ + writel(val, base + reg); +} + +static u32 exynos_pcie_readl(void __iomem *base, u32 reg) +{ + return readl(base + reg); +} + +static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on) +{ + u32 val; + + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC); + if (on) + val |= PCIE_ELBI_SLV_DBI_ENABLE; + else + val &= ~PCIE_ELBI_SLV_DBI_ENABLE; + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC); +} + +static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on) +{ + u32 val; + + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC); + if (on) + val |= PCIE_ELBI_SLV_DBI_ENABLE; + else + val &= ~PCIE_ELBI_SLV_DBI_ENABLE; + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC); +} + +static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep) +{ + u32 val; + + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET); + val &= ~PCIE_CORE_RESET_ENABLE; + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET); +} + +static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep) +{ + u32 val; + + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET); + val |= PCIE_CORE_RESET_ENABLE; + + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET); +} + +static void exynos_pcie_assert_reset(struct exynos_pcie *ep) +{ + struct dw_pcie *pci = ep->pci; + struct device *dev = pci->dev; + + if (ep->reset_gpio >= 0) + devm_gpio_request_one(dev, ep->reset_gpio, + GPIOF_OUT_INIT_HIGH, "RESET"); +} + +static int exynos_pcie_establish_link(struct exynos_pcie *ep) +{ + struct dw_pcie *pci = ep->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + + if (dw_pcie_link_up(pci)) { + dev_err(dev, "Link already up\n"); + return 0; + } + + exynos_pcie_assert_core_reset(ep); + + phy_reset(ep->phy); + + exynos_pcie_writel(ep->mem_res->elbi_base, 1, + PCIE_PWR_RESET); + + phy_power_on(ep->phy); + phy_init(ep->phy); + + exynos_pcie_deassert_core_reset(ep); + dw_pcie_setup_rc(pp); + exynos_pcie_assert_reset(ep); + + /* assert LTSSM enable */ + exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE, + PCIE_APP_LTSSM_ENABLE); + + /* check if the link is up or not */ + if (!dw_pcie_wait_for_link(pci)) + return 0; + + phy_power_off(ep->phy); + return -ETIMEDOUT; +} + +static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep) +{ + u32 val; + + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE); + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE); +} + +static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep) +{ + u32 val; + + /* enable INTX interrupt */ + val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | + IRQ_INTC_ASSERT | IRQ_INTD_ASSERT; + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE); +} + +static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) +{ + struct exynos_pcie *ep = arg; + + exynos_pcie_clear_irq_pulse(ep); + return IRQ_HANDLED; +} + +static void exynos_pcie_msi_init(struct exynos_pcie *ep) +{ + struct dw_pcie *pci = ep->pci; + struct pcie_port *pp = &pci->pp; + u32 val; + + dw_pcie_msi_init(pp); + + /* enable MSI interrupt */ + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL); + val |= IRQ_MSI_ENABLE; + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL); +} + +static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep) +{ + exynos_pcie_enable_irq_pulse(ep); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + exynos_pcie_msi_init(ep); +} + +static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size) +{ + struct exynos_pcie *ep = to_exynos_pcie(pci); + u32 val; + + exynos_pcie_sideband_dbi_r_mode(ep, true); + dw_pcie_read(base + reg, size, &val); + exynos_pcie_sideband_dbi_r_mode(ep, false); + return val; +} + +static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size, u32 val) +{ + struct exynos_pcie *ep = to_exynos_pcie(pci); + + exynos_pcie_sideband_dbi_w_mode(ep, true); + dw_pcie_write(base + reg, size, val); + exynos_pcie_sideband_dbi_w_mode(ep, false); +} + +static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct exynos_pcie *ep = to_exynos_pcie(pci); + int ret; + + exynos_pcie_sideband_dbi_r_mode(ep, true); + ret = dw_pcie_read(pci->dbi_base + where, size, val); + exynos_pcie_sideband_dbi_r_mode(ep, false); + return ret; +} + +static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, + u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct exynos_pcie *ep = to_exynos_pcie(pci); + int ret; + + exynos_pcie_sideband_dbi_w_mode(ep, true); + ret = dw_pcie_write(pci->dbi_base + where, size, val); + exynos_pcie_sideband_dbi_w_mode(ep, false); + return ret; +} + +static int exynos_pcie_link_up(struct dw_pcie *pci) +{ + struct exynos_pcie *ep = to_exynos_pcie(pci); + u32 val; + + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP); + if (val == PCIE_ELBI_LTSSM_ENABLE) + return 1; + + return 0; +} + +static int exynos_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct exynos_pcie *ep = to_exynos_pcie(pci); + + exynos_pcie_establish_link(ep); + exynos_pcie_enable_interrupts(ep); + + return 0; +} + +static const struct dw_pcie_host_ops exynos_pcie_host_ops = { + .rd_own_conf = exynos_pcie_rd_own_conf, + .wr_own_conf = exynos_pcie_wr_own_conf, + .host_init = exynos_pcie_host_init, +}; + +static int __init exynos_add_pcie_port(struct exynos_pcie *ep, + struct platform_device *pdev) +{ + struct dw_pcie *pci = ep->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + pp->irq = platform_get_irq(pdev, 1); + if (pp->irq < 0) { + dev_err(dev, "failed to get irq\n"); + return pp->irq; + } + ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, + IRQF_SHARED, "exynos-pcie", ep); + if (ret) { + dev_err(dev, "failed to request irq\n"); + return ret; + } + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq(pdev, 0); + if (pp->msi_irq < 0) { + dev_err(dev, "failed to get msi irq\n"); + return pp->msi_irq; + } + } + + pp->ops = &exynos_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .read_dbi = exynos_pcie_read_dbi, + .write_dbi = exynos_pcie_write_dbi, + .link_up = exynos_pcie_link_up, +}; + +static int __init exynos_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct exynos_pcie *ep; + struct device_node *np = dev->of_node; + int ret; + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + ep->pci = pci; + ep->ops = (const struct exynos_pcie_ops *) + of_device_get_match_data(dev); + + ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); + + ep->phy = devm_of_phy_get(dev, np, NULL); + if (IS_ERR(ep->phy)) { + if (PTR_ERR(ep->phy) != -ENODEV) + return PTR_ERR(ep->phy); + + ep->phy = NULL; + } + + if (ep->ops && ep->ops->get_mem_resources) { + ret = ep->ops->get_mem_resources(pdev, ep); + if (ret) + return ret; + } + + if (ep->ops && ep->ops->get_clk_resources && + ep->ops->init_clk_resources) { + ret = ep->ops->get_clk_resources(ep); + if (ret) + return ret; + ret = ep->ops->init_clk_resources(ep); + if (ret) + return ret; + } + + platform_set_drvdata(pdev, ep); + + ret = exynos_add_pcie_port(ep, pdev); + if (ret < 0) + goto fail_probe; + + return 0; + +fail_probe: + phy_exit(ep->phy); + + if (ep->ops && ep->ops->deinit_clk_resources) + ep->ops->deinit_clk_resources(ep); + return ret; +} + +static int __exit exynos_pcie_remove(struct platform_device *pdev) +{ + struct exynos_pcie *ep = platform_get_drvdata(pdev); + + if (ep->ops && ep->ops->deinit_clk_resources) + ep->ops->deinit_clk_resources(ep); + + return 0; +} + +static const struct of_device_id exynos_pcie_of_match[] = { + { + .compatible = "samsung,exynos5440-pcie", + .data = &exynos5440_pcie_ops + }, + {}, +}; + +static struct platform_driver exynos_pcie_driver = { + .remove = __exit_p(exynos_pcie_remove), + .driver = { + .name = "exynos-pcie", + .of_match_table = exynos_pcie_of_match, + }, +}; + +/* Exynos PCIe driver does not allow module unload */ + +static int __init exynos_pcie_init(void) +{ + return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe); +} +subsys_initcall(exynos_pcie_init); diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c new file mode 100644 index 000000000..3b2ceb566 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -0,0 +1,872 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Freescale i.MX6 SoCs + * + * Copyright (C) 2013 Kosagi + * http://www.kosagi.com + * + * Author: Sean Cross <xobs@kosagi.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> +#include <linux/mfd/syscon/imx7-iomuxc-gpr.h> +#include <linux/module.h> +#include <linux/of_gpio.h> +#include <linux/of_device.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/resource.h> +#include <linux/signal.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/reset.h> + +#include "pcie-designware.h" + +#define to_imx6_pcie(x) dev_get_drvdata((x)->dev) + +enum imx6_pcie_variants { + IMX6Q, + IMX6SX, + IMX6QP, + IMX7D, +}; + +struct imx6_pcie { + struct dw_pcie *pci; + int reset_gpio; + bool gpio_active_high; + struct clk *pcie_bus; + struct clk *pcie_phy; + struct clk *pcie_inbound_axi; + struct clk *pcie; + struct regmap *iomuxc_gpr; + struct reset_control *pciephy_reset; + struct reset_control *apps_reset; + enum imx6_pcie_variants variant; + u32 tx_deemph_gen1; + u32 tx_deemph_gen2_3p5db; + u32 tx_deemph_gen2_6db; + u32 tx_swing_full; + u32 tx_swing_low; + int link_gen; + struct regulator *vpcie; +}; + +/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ +#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 +#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50 +#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 + +/* PCIe Root Complex registers (memory-mapped) */ +#define PCIE_RC_IMX6_MSI_CAP 0x50 +#define PCIE_RC_LCR 0x7c +#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 +#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 +#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf + +#define PCIE_RC_LCSR 0x80 + +/* PCIe Port Logic registers (memory-mapped) */ +#define PL_OFFSET 0x700 +#define PCIE_PL_PFLR (PL_OFFSET + 0x08) +#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) +#define PCIE_PL_PFLR_FORCE_LINK (1 << 15) +#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) +#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) + +#define PCIE_PHY_CTRL (PL_OFFSET + 0x114) +#define PCIE_PHY_CTRL_DATA_LOC 0 +#define PCIE_PHY_CTRL_CAP_ADR_LOC 16 +#define PCIE_PHY_CTRL_CAP_DAT_LOC 17 +#define PCIE_PHY_CTRL_WR_LOC 18 +#define PCIE_PHY_CTRL_RD_LOC 19 + +#define PCIE_PHY_STAT (PL_OFFSET + 0x110) +#define PCIE_PHY_STAT_ACK_LOC 16 + +#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C +#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) + +/* PHY registers (not memory-mapped) */ +#define PCIE_PHY_RX_ASIC_OUT 0x100D +#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) + +#define PHY_RX_OVRD_IN_LO 0x1005 +#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) +#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) + +static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val) +{ + struct dw_pcie *pci = imx6_pcie->pci; + u32 val; + u32 max_iterations = 10; + u32 wait_counter = 0; + + do { + val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); + val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; + wait_counter++; + + if (val == exp_val) + return 0; + + udelay(1); + } while (wait_counter < max_iterations); + + return -ETIMEDOUT; +} + +static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) +{ + struct dw_pcie *pci = imx6_pcie->pci; + u32 val; + int ret; + + val = addr << PCIE_PHY_CTRL_DATA_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); + + val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); + + ret = pcie_phy_poll_ack(imx6_pcie, 1); + if (ret) + return ret; + + val = addr << PCIE_PHY_CTRL_DATA_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); + + return pcie_phy_poll_ack(imx6_pcie, 0); +} + +/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ +static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) +{ + struct dw_pcie *pci = imx6_pcie->pci; + u32 val, phy_ctl; + int ret; + + ret = pcie_phy_wait_ack(imx6_pcie, addr); + if (ret) + return ret; + + /* assert Read signal */ + phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); + + ret = pcie_phy_poll_ack(imx6_pcie, 1); + if (ret) + return ret; + + val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); + *data = val & 0xffff; + + /* deassert Read signal */ + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); + + return pcie_phy_poll_ack(imx6_pcie, 0); +} + +static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) +{ + struct dw_pcie *pci = imx6_pcie->pci; + u32 var; + int ret; + + /* write addr */ + /* cap addr */ + ret = pcie_phy_wait_ack(imx6_pcie, addr); + if (ret) + return ret; + + var = data << PCIE_PHY_CTRL_DATA_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); + + /* capture data */ + var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); + + ret = pcie_phy_poll_ack(imx6_pcie, 1); + if (ret) + return ret; + + /* deassert cap data */ + var = data << PCIE_PHY_CTRL_DATA_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); + + /* wait for ack de-assertion */ + ret = pcie_phy_poll_ack(imx6_pcie, 0); + if (ret) + return ret; + + /* assert wr signal */ + var = 0x1 << PCIE_PHY_CTRL_WR_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); + + /* wait for ack */ + ret = pcie_phy_poll_ack(imx6_pcie, 1); + if (ret) + return ret; + + /* deassert wr signal */ + var = data << PCIE_PHY_CTRL_DATA_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); + + /* wait for ack de-assertion */ + ret = pcie_phy_poll_ack(imx6_pcie, 0); + if (ret) + return ret; + + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); + + return 0; +} + +static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) +{ + u32 tmp; + + pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); + tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | + PHY_RX_OVRD_IN_LO_RX_PLL_EN); + pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); + + usleep_range(2000, 3000); + + pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); + tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | + PHY_RX_OVRD_IN_LO_RX_PLL_EN); + pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); +} + +/* Added for PCI abort handling */ +static int imx6q_pcie_abort_handler(unsigned long addr, + unsigned int fsr, struct pt_regs *regs) +{ + unsigned long pc = instruction_pointer(regs); + unsigned long instr = *(unsigned long *)pc; + int reg = (instr >> 12) & 15; + + /* + * If the instruction being executed was a read, + * make it look like it read all-ones. + */ + if ((instr & 0x0c100000) == 0x04100000) { + unsigned long val; + + if (instr & 0x00400000) + val = 255; + else + val = -1; + + regs->uregs[reg] = val; + regs->ARM_pc += 4; + return 0; + } + + if ((instr & 0x0e100090) == 0x00100090) { + regs->uregs[reg] = -1; + regs->ARM_pc += 4; + return 0; + } + + return 1; +} + +static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) +{ + struct device *dev = imx6_pcie->pci->dev; + + switch (imx6_pcie->variant) { + case IMX7D: + reset_control_assert(imx6_pcie->pciephy_reset); + reset_control_assert(imx6_pcie->apps_reset); + break; + case IMX6SX: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN); + /* Force PCIe PHY reset */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, + IMX6SX_GPR5_PCIE_BTNRST_RESET, + IMX6SX_GPR5_PCIE_BTNRST_RESET); + break; + case IMX6QP: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_SW_RST, + IMX6Q_GPR1_PCIE_SW_RST); + break; + case IMX6Q: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); + break; + } + + if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { + int ret = regulator_disable(imx6_pcie->vpcie); + + if (ret) + dev_err(dev, "failed to disable vpcie regulator: %d\n", + ret); + } +} + +static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct device *dev = pci->dev; + int ret = 0; + + switch (imx6_pcie->variant) { + case IMX6SX: + ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); + if (ret) { + dev_err(dev, "unable to enable pcie_axi clock\n"); + break; + } + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); + break; + case IMX6QP: /* FALLTHROUGH */ + case IMX6Q: + /* power up core phy and enable ref clock */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); + /* + * the async reset input need ref clock to sync internally, + * when the ref clock comes after reset, internal synced + * reset time is too short, cannot meet the requirement. + * add one ~10us delay here. + */ + udelay(10); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); + break; + case IMX7D: + break; + } + + return ret; +} + +static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) +{ + u32 val; + unsigned int retries; + struct device *dev = imx6_pcie->pci->dev; + + for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) { + regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val); + + if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED) + return; + + usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN, + PHY_PLL_LOCK_WAIT_USLEEP_MAX); + } + + dev_err(dev, "PCIe PLL lock timeout\n"); +} + +static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct device *dev = pci->dev; + int ret; + + if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) { + ret = regulator_enable(imx6_pcie->vpcie); + if (ret) { + dev_err(dev, "failed to enable vpcie regulator: %d\n", + ret); + return; + } + } + + ret = clk_prepare_enable(imx6_pcie->pcie_phy); + if (ret) { + dev_err(dev, "unable to enable pcie_phy clock\n"); + goto err_pcie_phy; + } + + ret = clk_prepare_enable(imx6_pcie->pcie_bus); + if (ret) { + dev_err(dev, "unable to enable pcie_bus clock\n"); + goto err_pcie_bus; + } + + ret = clk_prepare_enable(imx6_pcie->pcie); + if (ret) { + dev_err(dev, "unable to enable pcie clock\n"); + goto err_pcie; + } + + ret = imx6_pcie_enable_ref_clk(imx6_pcie); + if (ret) { + dev_err(dev, "unable to enable pcie ref clock\n"); + goto err_ref_clk; + } + + /* allow the clocks to stabilize */ + usleep_range(200, 500); + + /* Some boards don't have PCIe reset GPIO. */ + if (gpio_is_valid(imx6_pcie->reset_gpio)) { + gpio_set_value_cansleep(imx6_pcie->reset_gpio, + imx6_pcie->gpio_active_high); + msleep(100); + gpio_set_value_cansleep(imx6_pcie->reset_gpio, + !imx6_pcie->gpio_active_high); + } + + switch (imx6_pcie->variant) { + case IMX7D: + reset_control_deassert(imx6_pcie->pciephy_reset); + imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); + break; + case IMX6SX: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, + IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); + break; + case IMX6QP: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_SW_RST, 0); + + usleep_range(200, 500); + break; + case IMX6Q: /* Nothing to do */ + break; + } + + return; + +err_ref_clk: + clk_disable_unprepare(imx6_pcie->pcie); +err_pcie: + clk_disable_unprepare(imx6_pcie->pcie_bus); +err_pcie_bus: + clk_disable_unprepare(imx6_pcie->pcie_phy); +err_pcie_phy: + if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { + ret = regulator_disable(imx6_pcie->vpcie); + if (ret) + dev_err(dev, "failed to disable vpcie regulator: %d\n", + ret); + } +} + +static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) +{ + switch (imx6_pcie->variant) { + case IMX7D: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); + break; + case IMX6SX: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_RX_EQ_MASK, + IMX6SX_GPR12_PCIE_RX_EQ_2); + /* FALLTHROUGH */ + default: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); + + /* configure constant input signal to the pcie ctrl and phy */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_LOS_LEVEL, 9 << 4); + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN1, + imx6_pcie->tx_deemph_gen1 << 0); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, + imx6_pcie->tx_deemph_gen2_3p5db << 6); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, + imx6_pcie->tx_deemph_gen2_6db << 12); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_SWING_FULL, + imx6_pcie->tx_swing_full << 18); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_SWING_LOW, + imx6_pcie->tx_swing_low << 25); + break; + } + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); +} + +static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct device *dev = pci->dev; + + /* check if the link is up or not */ + if (!dw_pcie_wait_for_link(pci)) + return 0; + + dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", + dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), + dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); + return -ETIMEDOUT; +} + +static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct device *dev = pci->dev; + u32 tmp; + unsigned int retries; + + for (retries = 0; retries < 200; retries++) { + tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + /* Test if the speed change finished. */ + if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) + return 0; + usleep_range(100, 1000); + } + + dev_err(dev, "Speed change timeout\n"); + return -EINVAL; +} + +static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct device *dev = pci->dev; + u32 tmp; + int ret; + + /* + * Force Gen1 operation when starting the link. In case the link is + * started in Gen2 mode, there is a possibility the devices on the + * bus will not be detected at all. This happens with PCIe switches. + */ + tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); + tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; + tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1; + dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); + + /* Start LTSSM. */ + if (imx6_pcie->variant == IMX7D) + reset_control_deassert(imx6_pcie->apps_reset); + else + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); + + ret = imx6_pcie_wait_for_link(imx6_pcie); + if (ret) + goto err_reset_phy; + + if (imx6_pcie->link_gen == 2) { + /* Allow Gen2 mode after the link is up. */ + tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); + tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; + tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; + dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); + + /* + * Start Directed Speed Change so the best possible + * speed both link partners support can be negotiated. + */ + tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + tmp |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); + + if (imx6_pcie->variant != IMX7D) { + /* + * On i.MX7, DIRECT_SPEED_CHANGE behaves differently + * from i.MX6 family when no link speed transition + * occurs and we go Gen1 -> yep, Gen1. The difference + * is that, in such case, it will not be cleared by HW + * which will cause the following code to report false + * failure. + */ + + ret = imx6_pcie_wait_for_speed_change(imx6_pcie); + if (ret) { + dev_err(dev, "Failed to bring link up!\n"); + goto err_reset_phy; + } + } + + /* Make sure link training is finished as well! */ + ret = imx6_pcie_wait_for_link(imx6_pcie); + if (ret) { + dev_err(dev, "Failed to bring link up!\n"); + goto err_reset_phy; + } + } else { + dev_info(dev, "Link: Gen2 disabled\n"); + } + + tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR); + dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); + return 0; + +err_reset_phy: + dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", + dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), + dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); + imx6_pcie_reset_phy(imx6_pcie); + return ret; +} + +static int imx6_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + + imx6_pcie_assert_core_reset(imx6_pcie); + imx6_pcie_init_phy(imx6_pcie); + imx6_pcie_deassert_core_reset(imx6_pcie); + dw_pcie_setup_rc(pp); + imx6_pcie_establish_link(imx6_pcie); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + + return 0; +} + +static const struct dw_pcie_host_ops imx6_pcie_host_ops = { + .host_init = imx6_pcie_host_init, +}; + +static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq <= 0) { + dev_err(dev, "failed to get MSI irq\n"); + return -ENODEV; + } + } + + pp->ops = &imx6_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + /* No special ops needed, but pcie-designware still expects this struct */ +}; + +static int imx6_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct imx6_pcie *imx6_pcie; + struct resource *dbi_base; + struct device_node *node = dev->of_node; + int ret; + u16 val; + + imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); + if (!imx6_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + imx6_pcie->pci = pci; + imx6_pcie->variant = + (enum imx6_pcie_variants)of_device_get_match_data(dev); + + dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pci->dbi_base = devm_ioremap_resource(dev, dbi_base); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + /* Fetch GPIOs */ + imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0); + imx6_pcie->gpio_active_high = of_property_read_bool(node, + "reset-gpio-active-high"); + if (gpio_is_valid(imx6_pcie->reset_gpio)) { + ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio, + imx6_pcie->gpio_active_high ? + GPIOF_OUT_INIT_HIGH : + GPIOF_OUT_INIT_LOW, + "PCIe reset"); + if (ret) { + dev_err(dev, "unable to get reset gpio\n"); + return ret; + } + } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) { + return imx6_pcie->reset_gpio; + } + + /* Fetch clocks */ + imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); + if (IS_ERR(imx6_pcie->pcie_phy)) { + dev_err(dev, "pcie_phy clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie_phy); + } + + imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus"); + if (IS_ERR(imx6_pcie->pcie_bus)) { + dev_err(dev, "pcie_bus clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie_bus); + } + + imx6_pcie->pcie = devm_clk_get(dev, "pcie"); + if (IS_ERR(imx6_pcie->pcie)) { + dev_err(dev, "pcie clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie); + } + + switch (imx6_pcie->variant) { + case IMX6SX: + imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, + "pcie_inbound_axi"); + if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { + dev_err(dev, "pcie_inbound_axi clock missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie_inbound_axi); + } + break; + case IMX7D: + imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, + "pciephy"); + if (IS_ERR(imx6_pcie->pciephy_reset)) { + dev_err(dev, "Failed to get PCIEPHY reset control\n"); + return PTR_ERR(imx6_pcie->pciephy_reset); + } + + imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, + "apps"); + if (IS_ERR(imx6_pcie->apps_reset)) { + dev_err(dev, "Failed to get PCIE APPS reset control\n"); + return PTR_ERR(imx6_pcie->apps_reset); + } + break; + default: + break; + } + + /* Grab GPR config register range */ + imx6_pcie->iomuxc_gpr = + syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); + if (IS_ERR(imx6_pcie->iomuxc_gpr)) { + dev_err(dev, "unable to find iomuxc registers\n"); + return PTR_ERR(imx6_pcie->iomuxc_gpr); + } + + /* Grab PCIe PHY Tx Settings */ + if (of_property_read_u32(node, "fsl,tx-deemph-gen1", + &imx6_pcie->tx_deemph_gen1)) + imx6_pcie->tx_deemph_gen1 = 0; + + if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", + &imx6_pcie->tx_deemph_gen2_3p5db)) + imx6_pcie->tx_deemph_gen2_3p5db = 0; + + if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", + &imx6_pcie->tx_deemph_gen2_6db)) + imx6_pcie->tx_deemph_gen2_6db = 20; + + if (of_property_read_u32(node, "fsl,tx-swing-full", + &imx6_pcie->tx_swing_full)) + imx6_pcie->tx_swing_full = 127; + + if (of_property_read_u32(node, "fsl,tx-swing-low", + &imx6_pcie->tx_swing_low)) + imx6_pcie->tx_swing_low = 127; + + /* Limit link speed */ + ret = of_property_read_u32(node, "fsl,max-link-speed", + &imx6_pcie->link_gen); + if (ret) + imx6_pcie->link_gen = 1; + + imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); + if (IS_ERR(imx6_pcie->vpcie)) { + if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV) + return PTR_ERR(imx6_pcie->vpcie); + imx6_pcie->vpcie = NULL; + } + + platform_set_drvdata(pdev, imx6_pcie); + + ret = imx6_add_pcie_port(imx6_pcie, pdev); + if (ret < 0) + return ret; + + if (pci_msi_enabled()) { + val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP + + PCI_MSI_FLAGS); + val |= PCI_MSI_FLAGS_ENABLE; + dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS, + val); + } + + return 0; +} + +static void imx6_pcie_shutdown(struct platform_device *pdev) +{ + struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev); + + /* bring down link, so bootloader gets clean state in case of reboot */ + imx6_pcie_assert_core_reset(imx6_pcie); +} + +static const struct of_device_id imx6_pcie_of_match[] = { + { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, + { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, + { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, + { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, }, + {}, +}; + +static struct platform_driver imx6_pcie_driver = { + .driver = { + .name = "imx6q-pcie", + .of_match_table = imx6_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = imx6_pcie_probe, + .shutdown = imx6_pcie_shutdown, +}; + +static int __init imx6_pcie_init(void) +{ + /* + * Since probe() can be deferred we need to make sure that + * hook_fault_code is not called after __init memory is freed + * by kernel and since imx6q_pcie_abort_handler() is a no-op, + * we can install the handler here without risking it + * accessing some uninitialized driver state. + */ + hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, + "external abort on non-linefetch"); + + return platform_driver_register(&imx6_pcie_driver); +} +device_initcall(imx6_pcie_init); diff --git a/drivers/pci/controller/dwc/pci-keystone-dw.c b/drivers/pci/controller/dwc/pci-keystone-dw.c new file mode 100644 index 000000000..15c612e85 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-keystone-dw.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DesignWare application register space functions for Keystone PCI controller + * + * Copyright (C) 2013-2014 Texas Instruments., Ltd. + * http://www.ti.com + * + * Author: Murali Karicheri <m-karicheri2@ti.com> + */ + +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/irqreturn.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/platform_device.h> + +#include "pcie-designware.h" +#include "pci-keystone.h" + +/* Application register defines */ +#define LTSSM_EN_VAL 1 +#define LTSSM_STATE_MASK 0x1f +#define LTSSM_STATE_L0 0x11 +#define DBI_CS2_EN_VAL 0x20 +#define OB_XLAT_EN_VAL 2 + +/* Application registers */ +#define CMD_STATUS 0x004 +#define CFG_SETUP 0x008 +#define OB_SIZE 0x030 +#define CFG_PCIM_WIN_SZ_IDX 3 +#define CFG_PCIM_WIN_CNT 32 +#define SPACE0_REMOTE_CFG_OFFSET 0x1000 +#define OB_OFFSET_INDEX(n) (0x200 + (8 * n)) +#define OB_OFFSET_HI(n) (0x204 + (8 * n)) + +/* IRQ register defines */ +#define IRQ_EOI 0x050 +#define IRQ_STATUS 0x184 +#define IRQ_ENABLE_SET 0x188 +#define IRQ_ENABLE_CLR 0x18c + +#define MSI_IRQ 0x054 +#define MSI0_IRQ_STATUS 0x104 +#define MSI0_IRQ_ENABLE_SET 0x108 +#define MSI0_IRQ_ENABLE_CLR 0x10c +#define IRQ_STATUS 0x184 +#define MSI_IRQ_OFFSET 4 + +/* Error IRQ bits */ +#define ERR_AER BIT(5) /* ECRC error */ +#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ +#define ERR_CORR BIT(3) /* Correctable error */ +#define ERR_NONFATAL BIT(2) /* Non-fatal error */ +#define ERR_FATAL BIT(1) /* Fatal error */ +#define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */ +#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ + ERR_NONFATAL | ERR_FATAL | ERR_SYS) +#define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI) +#define ERR_IRQ_STATUS_RAW 0x1c0 +#define ERR_IRQ_STATUS 0x1c4 +#define ERR_IRQ_ENABLE_SET 0x1c8 +#define ERR_IRQ_ENABLE_CLR 0x1cc + +/* Config space registers */ +#define DEBUG0 0x728 + +#define to_keystone_pcie(x) dev_get_drvdata((x)->dev) + +static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, + u32 *bit_pos) +{ + *reg_offset = offset % 8; + *bit_pos = offset >> 3; +} + +phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + return ks_pcie->app.start + MSI_IRQ; +} + +static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset) +{ + return readl(ks_pcie->va_app_base + offset); +} + +static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val) +{ + writel(val, ks_pcie->va_app_base + offset); +} + +void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + u32 pending, vector; + int src, virq; + + pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); + + /* + * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit + * shows 1, 9, 17, 25 and so forth + */ + for (src = 0; src < 4; src++) { + if (BIT(src) & pending) { + vector = offset + (src << 3); + virq = irq_linear_revmap(pp->irq_domain, vector); + dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", + src, vector, virq); + generic_handle_irq(virq); + } + } +} + +void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp) +{ + u32 reg_offset, bit_pos; + struct keystone_pcie *ks_pcie; + struct dw_pcie *pci; + + pci = to_dw_pcie_from_pp(pp); + ks_pcie = to_keystone_pcie(pci); + update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); + + ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), + BIT(bit_pos)); + ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); +} + +void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) +{ + u32 reg_offset, bit_pos; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); + ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), + BIT(bit_pos)); +} + +void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) +{ + u32 reg_offset, bit_pos; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); + ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), + BIT(bit_pos)); +} + +int ks_dw_pcie_msi_host_init(struct pcie_port *pp) +{ + return dw_pcie_allocate_domains(pp); +} + +void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) +{ + int i; + + for (i = 0; i < PCI_NUM_INTX; i++) + ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); +} + +void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct device *dev = pci->dev; + u32 pending; + int virq; + + pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); + + if (BIT(0) & pending) { + virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); + dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq); + generic_handle_irq(virq); + } + + /* EOI the INTx interrupt */ + ks_dw_app_writel(ks_pcie, IRQ_EOI, offset); +} + +void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) +{ + ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); +} + +irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) +{ + u32 status; + + status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL; + if (!status) + return IRQ_NONE; + + if (status & ERR_FATAL_IRQ) + dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n", + status); + + /* Ack the IRQ; status bits are RW1C */ + ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status); + return IRQ_HANDLED; +} + +static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d) +{ +} + +static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d) +{ +} + +static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d) +{ +} + +static struct irq_chip ks_dw_pcie_legacy_irq_chip = { + .name = "Keystone-PCI-Legacy-IRQ", + .irq_ack = ks_dw_pcie_ack_legacy_irq, + .irq_mask = ks_dw_pcie_mask_legacy_irq, + .irq_unmask = ks_dw_pcie_unmask_legacy_irq, +}; + +static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hw_irq) +{ + irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, d->host_data); + + return 0; +} + +static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = { + .map = ks_dw_pcie_init_legacy_irq_map, + .xlate = irq_domain_xlate_onetwocell, +}; + +/** + * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask + * registers + * + * Since modification of dbi_cs2 involves different clock domain, read the + * status back to ensure the transition is complete. + */ +static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) +{ + u32 val; + + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val); + + do { + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + } while (!(val & DBI_CS2_EN_VAL)); +} + +/** + * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode + * + * Since modification of dbi_cs2 involves different clock domain, read the + * status back to ensure the transition is complete. + */ +static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) +{ + u32 val; + + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val); + + do { + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + } while (val & DBI_CS2_EN_VAL); +} + +void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + u32 start = pp->mem->start, end = pp->mem->end; + int i, tr_size; + u32 val; + + /* Disable BARs for inbound access */ + ks_dw_pcie_set_dbi_mode(ks_pcie); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); + ks_dw_pcie_clear_dbi_mode(ks_pcie); + + /* Set outbound translation size per window division */ + ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7); + + tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M; + + /* Using Direct 1:1 mapping of RC <-> PCI memory space */ + for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) { + ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1); + ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0); + start += tr_size; + } + + /* Enable OB translation */ + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val); +} + +/** + * ks_pcie_cfg_setup() - Set up configuration space address for a device + * + * @ks_pcie: ptr to keystone_pcie structure + * @bus: Bus number the device is residing on + * @devfn: device, function number info + * + * Forms and returns the address of configuration space mapped in PCIESS + * address space 0. Also configures CFG_SETUP for remote configuration space + * access. + * + * The address space has two regions to access configuration - local and remote. + * We access local region for bus 0 (as RC is attached on bus 0) and remote + * region for others with TYPE 1 access when bus > 1. As for device on bus = 1, + * we will do TYPE 0 access as it will be on our secondary bus (logical). + * CFG_SETUP is needed only for remote configuration access. + */ +static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus, + unsigned int devfn) +{ + u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn); + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + u32 regval; + + if (bus == 0) + return pci->dbi_base; + + regval = (bus << 16) | (device << 8) | function; + + /* + * Since Bus#1 will be a virtual bus, we need to have TYPE0 + * access only. + * TYPE 1 + */ + if (bus != 1) + regval |= BIT(24); + + ks_dw_app_writel(ks_pcie, CFG_SETUP, regval); + return pp->va_cfg0_base; +} + +int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + u8 bus_num = bus->number; + void __iomem *addr; + + addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); + + return dw_pcie_read(addr + where, size, val); +} + +int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + u8 bus_num = bus->number; + void __iomem *addr; + + addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); + + return dw_pcie_write(addr + where, size, val); +} + +/** + * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization + * + * This sets BAR0 to enable inbound access for MSI_IRQ register + */ +void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + /* Configure and set up BAR0 */ + ks_dw_pcie_set_dbi_mode(ks_pcie); + + /* Enable BAR0 */ + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); + + ks_dw_pcie_clear_dbi_mode(ks_pcie); + + /* + * For BAR0, just setting bus address for inbound writes (MSI) should + * be sufficient. Use physical address to avoid any conflicts. + */ + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); +} + +/** + * ks_dw_pcie_link_up() - Check if link up + */ +int ks_dw_pcie_link_up(struct dw_pcie *pci) +{ + u32 val; + + val = dw_pcie_readl_dbi(pci, DEBUG0); + return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0; +} + +void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) +{ + u32 val; + + /* Disable Link training */ + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + val &= ~LTSSM_EN_VAL; + ks_dw_app_writel(ks_pcie, CMD_STATUS, val); + + /* Initiate Link Training */ + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); +} + +/** + * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware + * + * Ioremap the register resources, initialize legacy irq domain + * and call dw_pcie_v3_65_host_init() API to initialize the Keystone + * PCI host controller. + */ +int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, + struct device_node *msi_intc_np) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + + /* Index 0 is the config reg. space address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + /* + * We set these same and is used in pcie rd/wr_other_conf + * functions + */ + pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET; + pp->va_cfg1_base = pp->va_cfg0_base; + + /* Index 1 is the application reg. space address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + ks_pcie->va_app_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ks_pcie->va_app_base)) + return PTR_ERR(ks_pcie->va_app_base); + + ks_pcie->app = *res; + + /* Create legacy IRQ domain */ + ks_pcie->legacy_irq_domain = + irq_domain_add_linear(ks_pcie->legacy_intc_np, + PCI_NUM_INTX, + &ks_dw_pcie_legacy_irq_domain_ops, + NULL); + if (!ks_pcie->legacy_irq_domain) { + dev_err(dev, "Failed to add irq domain for legacy irqs\n"); + return -EINVAL; + } + + return dw_pcie_host_init(pp); +} diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c new file mode 100644 index 000000000..765357b87 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Texas Instruments Keystone SoCs + * + * Copyright (C) 2013-2014 Texas Instruments., Ltd. + * http://www.ti.com + * + * Author: Murali Karicheri <m-karicheri2@ti.com> + * Implementation based on pci-exynos.c and pcie-designware.c + */ + +#include <linux/irqchip/chained_irq.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/init.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/of.h> +#include <linux/of_pci.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/resource.h> +#include <linux/signal.h> + +#include "pcie-designware.h" +#include "pci-keystone.h" + +#define DRIVER_NAME "keystone-pcie" + +/* DEV_STAT_CTRL */ +#define PCIE_CAP_BASE 0x70 + +/* PCIE controller device IDs */ +#define PCIE_RC_K2HK 0xb008 +#define PCIE_RC_K2E 0xb009 +#define PCIE_RC_K2L 0xb00a +#define PCIE_RC_K2G 0xb00b + +#define to_keystone_pcie(x) dev_get_drvdata((x)->dev) + +static void quirk_limit_mrrs(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + struct pci_dev *bridge = bus->self; + static const struct pci_device_id rc_pci_devids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E), + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G), + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { 0, }, + }; + + if (pci_is_root_bus(bus)) + return; + + /* look for the host bridge */ + while (!pci_is_root_bus(bus)) { + bridge = bus->self; + bus = bus->parent; + } + + if (bridge) { + /* + * Keystone PCI controller has a h/w limitation of + * 256 bytes maximum read request size. It can't handle + * anything higher than this. So force this limit on + * all downstream devices. + */ + if (pci_match_id(rc_pci_devids, bridge)) { + if (pcie_get_readrq(dev) > 256) { + dev_info(&dev->dev, "limiting MRRS to 256\n"); + pcie_set_readrq(dev, 256); + } + } + } +} +DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs); + +static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + unsigned int retries; + + dw_pcie_setup_rc(pp); + + if (dw_pcie_link_up(pci)) { + dev_info(dev, "Link already up\n"); + return 0; + } + + /* check if the link is up or not */ + for (retries = 0; retries < 5; retries++) { + ks_dw_pcie_initiate_link_train(ks_pcie); + if (!dw_pcie_wait_for_link(pci)) + return 0; + } + + dev_err(dev, "phy link never came up\n"); + return -ETIMEDOUT; +} + +static void ks_pcie_msi_irq_handler(struct irq_desc *desc) +{ + unsigned int irq = irq_desc_get_irq(desc); + struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); + u32 offset = irq - ks_pcie->msi_host_irqs[0]; + struct dw_pcie *pci = ks_pcie->pci; + struct device *dev = pci->dev; + struct irq_chip *chip = irq_desc_get_chip(desc); + + dev_dbg(dev, "%s, irq %d\n", __func__, irq); + + /* + * The chained irq handler installation would have replaced normal + * interrupt driver handler so we need to take care of mask/unmask and + * ack operation. + */ + chained_irq_enter(chip, desc); + ks_dw_pcie_handle_msi_irq(ks_pcie, offset); + chained_irq_exit(chip, desc); +} + +/** + * ks_pcie_legacy_irq_handler() - Handle legacy interrupt + * @irq: IRQ line for legacy interrupts + * @desc: Pointer to irq descriptor + * + * Traverse through pending legacy interrupts and invoke handler for each. Also + * takes care of interrupt controller level mask/ack operation. + */ +static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) +{ + unsigned int irq = irq_desc_get_irq(desc); + struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); + struct dw_pcie *pci = ks_pcie->pci; + struct device *dev = pci->dev; + u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; + struct irq_chip *chip = irq_desc_get_chip(desc); + + dev_dbg(dev, ": Handling legacy irq %d\n", irq); + + /* + * The chained irq handler installation would have replaced normal + * interrupt driver handler so we need to take care of mask/unmask and + * ack operation. + */ + chained_irq_enter(chip, desc); + ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset); + chained_irq_exit(chip, desc); +} + +static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, + char *controller, int *num_irqs) +{ + int temp, max_host_irqs, legacy = 1, *host_irqs; + struct device *dev = ks_pcie->pci->dev; + struct device_node *np_pcie = dev->of_node, **np_temp; + + if (!strcmp(controller, "msi-interrupt-controller")) + legacy = 0; + + if (legacy) { + np_temp = &ks_pcie->legacy_intc_np; + max_host_irqs = PCI_NUM_INTX; + host_irqs = &ks_pcie->legacy_host_irqs[0]; + } else { + np_temp = &ks_pcie->msi_intc_np; + max_host_irqs = MAX_MSI_HOST_IRQS; + host_irqs = &ks_pcie->msi_host_irqs[0]; + } + + /* interrupt controller is in a child node */ + *np_temp = of_get_child_by_name(np_pcie, controller); + if (!(*np_temp)) { + dev_err(dev, "Node for %s is absent\n", controller); + return -EINVAL; + } + + temp = of_irq_count(*np_temp); + if (!temp) { + dev_err(dev, "No IRQ entries in %s\n", controller); + of_node_put(*np_temp); + return -EINVAL; + } + + if (temp > max_host_irqs) + dev_warn(dev, "Too many %s interrupts defined %u\n", + (legacy ? "legacy" : "MSI"), temp); + + /* + * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to + * 7 (MSI) + */ + for (temp = 0; temp < max_host_irqs; temp++) { + host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); + if (!host_irqs[temp]) + break; + } + + of_node_put(*np_temp); + + if (temp) { + *num_irqs = temp; + return 0; + } + + return -EINVAL; +} + +static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) +{ + int i; + + /* Legacy IRQ */ + for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { + irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i], + ks_pcie_legacy_irq_handler, + ks_pcie); + } + ks_dw_pcie_enable_legacy_irqs(ks_pcie); + + /* MSI IRQ */ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { + irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i], + ks_pcie_msi_irq_handler, + ks_pcie); + } + } + + if (ks_pcie->error_irq > 0) + ks_dw_pcie_enable_error_irq(ks_pcie); +} + +#ifdef CONFIG_ARM +/* + * When a PCI device does not exist during config cycles, keystone host gets a + * bus error instead of returning 0xffffffff. This handler always returns 0 + * for this kind of faults. + */ +static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + unsigned long instr = *(unsigned long *) instruction_pointer(regs); + + if ((instr & 0x0e100090) == 0x00100090) { + int reg = (instr >> 12) & 15; + + regs->uregs[reg] = -1; + regs->ARM_pc += 4; + } + + return 0; +} +#endif + +static int __init ks_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + u32 val; + + ks_pcie_establish_link(ks_pcie); + ks_dw_pcie_setup_rc_app_regs(ks_pcie); + ks_pcie_setup_interrupts(ks_pcie); + writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), + pci->dbi_base + PCI_IO_BASE); + + /* update the Vendor ID */ + writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID); + + /* update the DEV_STAT_CTRL to publish right mrrs */ + val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); + val &= ~PCI_EXP_DEVCTL_READRQ; + /* set the mrrs to 256 bytes */ + val |= BIT(12); + writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); + +#ifdef CONFIG_ARM + /* + * PCIe access errors that result into OCP errors are caught by ARM as + * "External aborts" + */ + hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, + "Asynchronous external abort"); +#endif + + return 0; +} + +static const struct dw_pcie_host_ops keystone_pcie_host_ops = { + .rd_other_conf = ks_dw_pcie_rd_other_conf, + .wr_other_conf = ks_dw_pcie_wr_other_conf, + .host_init = ks_pcie_host_init, + .msi_set_irq = ks_dw_pcie_msi_set_irq, + .msi_clear_irq = ks_dw_pcie_msi_clear_irq, + .get_msi_addr = ks_dw_pcie_get_msi_addr, + .msi_host_init = ks_dw_pcie_msi_host_init, + .msi_irq_ack = ks_dw_pcie_msi_irq_ack, + .scan_bus = ks_dw_pcie_v3_65_scan_bus, +}; + +static irqreturn_t pcie_err_irq_handler(int irq, void *priv) +{ + struct keystone_pcie *ks_pcie = priv; + + return ks_dw_pcie_handle_error_irq(ks_pcie); +} + +static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + ret = ks_pcie_get_irq_controller_info(ks_pcie, + "legacy-interrupt-controller", + &ks_pcie->num_legacy_host_irqs); + if (ret) + return ret; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + ret = ks_pcie_get_irq_controller_info(ks_pcie, + "msi-interrupt-controller", + &ks_pcie->num_msi_host_irqs); + if (ret) + return ret; + } + + /* + * Index 0 is the platform interrupt for error interrupt + * from RC. This is optional. + */ + ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); + if (ks_pcie->error_irq <= 0) + dev_info(dev, "no error IRQ defined\n"); + else { + ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler, + IRQF_SHARED, "pcie-error-irq", ks_pcie); + if (ret < 0) { + dev_err(dev, "failed to request error IRQ %d\n", + ks_pcie->error_irq); + return ret; + } + } + + pp->ops = &keystone_pcie_host_ops; + ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct of_device_id ks_pcie_of_match[] = { + { + .type = "pci", + .compatible = "ti,keystone-pcie", + }, + { }, +}; + +static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = ks_dw_pcie_link_up, +}; + +static int __exit ks_pcie_remove(struct platform_device *pdev) +{ + struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); + + clk_disable_unprepare(ks_pcie->clk); + + return 0; +} + +static int __init ks_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct keystone_pcie *ks_pcie; + struct resource *res; + void __iomem *reg_p; + struct phy *phy; + int ret; + + ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); + if (!ks_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + ks_pcie->pci = pci; + + /* initialize SerDes Phy if present */ + phy = devm_phy_get(dev, "pcie-phy"); + if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER) + return PTR_ERR(phy); + + if (!IS_ERR_OR_NULL(phy)) { + ret = phy_init(phy); + if (ret < 0) + return ret; + } + + /* index 2 is to read PCI DEVICE_ID */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + reg_p = devm_ioremap_resource(dev, res); + if (IS_ERR(reg_p)) + return PTR_ERR(reg_p); + ks_pcie->device_id = readl(reg_p) >> 16; + devm_iounmap(dev, reg_p); + devm_release_mem_region(dev, res->start, resource_size(res)); + + ks_pcie->np = dev->of_node; + platform_set_drvdata(pdev, ks_pcie); + ks_pcie->clk = devm_clk_get(dev, "pcie"); + if (IS_ERR(ks_pcie->clk)) { + dev_err(dev, "Failed to get pcie rc clock\n"); + return PTR_ERR(ks_pcie->clk); + } + ret = clk_prepare_enable(ks_pcie->clk); + if (ret) + return ret; + + platform_set_drvdata(pdev, ks_pcie); + + ret = ks_add_pcie_port(ks_pcie, pdev); + if (ret < 0) + goto fail_clk; + + return 0; +fail_clk: + clk_disable_unprepare(ks_pcie->clk); + + return ret; +} + +static struct platform_driver ks_pcie_driver __refdata = { + .probe = ks_pcie_probe, + .remove = __exit_p(ks_pcie_remove), + .driver = { + .name = "keystone-pcie", + .of_match_table = of_match_ptr(ks_pcie_of_match), + }, +}; +builtin_platform_driver(ks_pcie_driver); diff --git a/drivers/pci/controller/dwc/pci-keystone.h b/drivers/pci/controller/dwc/pci-keystone.h new file mode 100644 index 000000000..8a13da391 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-keystone.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Keystone PCI Controller's common includes + * + * Copyright (C) 2013-2014 Texas Instruments., Ltd. + * http://www.ti.com + * + * Author: Murali Karicheri <m-karicheri2@ti.com> + */ + +#define MAX_MSI_HOST_IRQS 8 + +struct keystone_pcie { + struct dw_pcie *pci; + struct clk *clk; + /* PCI Device ID */ + u32 device_id; + int num_legacy_host_irqs; + int legacy_host_irqs[PCI_NUM_INTX]; + struct device_node *legacy_intc_np; + + int num_msi_host_irqs; + int msi_host_irqs[MAX_MSI_HOST_IRQS]; + struct device_node *msi_intc_np; + struct irq_domain *legacy_irq_domain; + struct device_node *np; + + int error_irq; + + /* Application register space */ + void __iomem *va_app_base; /* DT 1st resource */ + struct resource app; +}; + +/* Keystone DW specific MSI controller APIs/definitions */ +void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset); +phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); + +/* Keystone specific PCI controller APIs */ +void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); +void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset); +void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie); +irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie); +int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, + struct device_node *msi_intc_np); +int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 val); +int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 *val); +void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie); +void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie); +void ks_dw_pcie_msi_irq_ack(int i, struct pcie_port *pp); +void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq); +void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); +void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); +int ks_dw_pcie_msi_host_init(struct pcie_port *pp); +int ks_dw_pcie_link_up(struct dw_pcie *pci); diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c new file mode 100644 index 000000000..7aa9a82b7 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Freescale Layerscape SoCs + * + * Copyright (C) 2014 Freescale Semiconductor. + * + * Author: Minghuan Lian <Minghuan.Lian@freescale.com> + */ + +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/of_pci.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> + +#include "pcie-designware.h" + +/* PEX1/2 Misc Ports Status Register */ +#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4) +#define LTSSM_STATE_SHIFT 20 +#define LTSSM_STATE_MASK 0x3f +#define LTSSM_PCIE_L0 0x11 /* L0 state */ + +/* PEX Internal Configuration Registers */ +#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ +#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */ +#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */ + +#define PCIE_IATU_NUM 6 + +struct ls_pcie_drvdata { + u32 lut_offset; + u32 ltssm_shift; + u32 lut_dbg; + const struct dw_pcie_host_ops *ops; + const struct dw_pcie_ops *dw_pcie_ops; +}; + +struct ls_pcie { + struct dw_pcie *pci; + void __iomem *lut; + struct regmap *scfg; + const struct ls_pcie_drvdata *drvdata; + int index; +}; + +#define to_ls_pcie(x) dev_get_drvdata((x)->dev) + +static bool ls_pcie_is_bridge(struct ls_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + u32 header_type; + + header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE); + header_type &= 0x7f; + + return header_type == PCI_HEADER_TYPE_BRIDGE; +} + +/* Clear multi-function bit */ +static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + + iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE); +} + +/* Drop MSG TLP except for Vendor MSG */ +static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) +{ + u32 val; + struct dw_pcie *pci = pcie->pci; + + val = ioread32(pci->dbi_base + PCIE_STRFMR1); + val &= 0xDFFFFFFF; + iowrite32(val, pci->dbi_base + PCIE_STRFMR1); +} + +static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) +{ + int i; + + for (i = 0; i < PCIE_IATU_NUM; i++) + dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND); +} + +static int ls1021_pcie_link_up(struct dw_pcie *pci) +{ + u32 state; + struct ls_pcie *pcie = to_ls_pcie(pci); + + if (!pcie->scfg) + return 0; + + regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); + state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; + + if (state < LTSSM_PCIE_L0) + return 0; + + return 1; +} + +static int ls_pcie_link_up(struct dw_pcie *pci) +{ + struct ls_pcie *pcie = to_ls_pcie(pci); + u32 state; + + state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> + pcie->drvdata->ltssm_shift) & + LTSSM_STATE_MASK; + + if (state < LTSSM_PCIE_L0) + return 0; + + return 1; +} + +/* Forward error response of outbound non-posted requests */ +static void ls_pcie_fix_error_response(struct ls_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + + iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR); +} + +static int ls_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + + /* + * Disable outbound windows configured by the bootloader to avoid + * one transaction hitting multiple outbound windows. + * dw_pcie_setup_rc() will reconfigure the outbound windows. + */ + ls_pcie_disable_outbound_atus(pcie); + ls_pcie_fix_error_response(pcie); + + dw_pcie_dbi_ro_wr_en(pci); + ls_pcie_clear_multifunction(pcie); + dw_pcie_dbi_ro_wr_dis(pci); + + ls_pcie_drop_msg_tlp(pcie); + + dw_pcie_setup_rc(pp); + + return 0; +} + +static int ls1021_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + struct device *dev = pci->dev; + u32 index[2]; + int ret; + + pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, + "fsl,pcie-scfg"); + if (IS_ERR(pcie->scfg)) { + ret = PTR_ERR(pcie->scfg); + dev_err(dev, "No syscfg phandle specified\n"); + pcie->scfg = NULL; + return ret; + } + + if (of_property_read_u32_array(dev->of_node, + "fsl,pcie-scfg", index, 2)) { + pcie->scfg = NULL; + return -EINVAL; + } + pcie->index = index[1]; + + return ls_pcie_host_init(pp); +} + +static int ls_pcie_msi_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + struct device_node *msi_node; + + /* + * The MSI domain is set by the generic of_msi_configure(). This + * .msi_host_init() function keeps us from doing the default MSI + * domain setup in dw_pcie_host_init() and also enforces the + * requirement that "msi-parent" exists. + */ + msi_node = of_parse_phandle(np, "msi-parent", 0); + if (!msi_node) { + dev_err(dev, "failed to find msi-parent\n"); + return -EINVAL; + } + + return 0; +} + +static const struct dw_pcie_host_ops ls1021_pcie_host_ops = { + .host_init = ls1021_pcie_host_init, + .msi_host_init = ls_pcie_msi_host_init, +}; + +static const struct dw_pcie_host_ops ls_pcie_host_ops = { + .host_init = ls_pcie_host_init, + .msi_host_init = ls_pcie_msi_host_init, +}; + +static const struct dw_pcie_ops dw_ls1021_pcie_ops = { + .link_up = ls1021_pcie_link_up, +}; + +static const struct dw_pcie_ops dw_ls_pcie_ops = { + .link_up = ls_pcie_link_up, +}; + +static struct ls_pcie_drvdata ls1021_drvdata = { + .ops = &ls1021_pcie_host_ops, + .dw_pcie_ops = &dw_ls1021_pcie_ops, +}; + +static struct ls_pcie_drvdata ls1043_drvdata = { + .lut_offset = 0x10000, + .ltssm_shift = 24, + .lut_dbg = 0x7fc, + .ops = &ls_pcie_host_ops, + .dw_pcie_ops = &dw_ls_pcie_ops, +}; + +static struct ls_pcie_drvdata ls1046_drvdata = { + .lut_offset = 0x80000, + .ltssm_shift = 24, + .lut_dbg = 0x407fc, + .ops = &ls_pcie_host_ops, + .dw_pcie_ops = &dw_ls_pcie_ops, +}; + +static struct ls_pcie_drvdata ls2080_drvdata = { + .lut_offset = 0x80000, + .ltssm_shift = 0, + .lut_dbg = 0x7fc, + .ops = &ls_pcie_host_ops, + .dw_pcie_ops = &dw_ls_pcie_ops, +}; + +static struct ls_pcie_drvdata ls2088_drvdata = { + .lut_offset = 0x80000, + .ltssm_shift = 0, + .lut_dbg = 0x407fc, + .ops = &ls_pcie_host_ops, + .dw_pcie_ops = &dw_ls_pcie_ops, +}; + +static const struct of_device_id ls_pcie_of_match[] = { + { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata }, + { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, + { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, + { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, + { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, + { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, + { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata }, + { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata }, + { }, +}; + +static int __init ls_add_pcie_port(struct ls_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + int ret; + + pp->ops = pcie->drvdata->ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static int __init ls_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct ls_pcie *pcie; + struct resource *dbi_base; + int ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pcie->drvdata = of_device_get_match_data(dev); + + pci->dev = dev; + pci->ops = pcie->drvdata->dw_pcie_ops; + + pcie->pci = pci; + + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset; + + if (!ls_pcie_is_bridge(pcie)) + return -ENODEV; + + platform_set_drvdata(pdev, pcie); + + ret = ls_add_pcie_port(pcie); + if (ret < 0) + return ret; + + return 0; +} + +static struct platform_driver ls_pcie_driver = { + .driver = { + .name = "layerscape-pcie", + .of_match_table = ls_pcie_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe); diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c new file mode 100644 index 000000000..0c389a30e --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Marvell Armada-8K SoCs + * + * Armada-8K PCIe Glue Layer Source Code + * + * Copyright (C) 2016 Marvell Technology Group Ltd. + * + * Author: Yehuda Yitshak <yehuday@marvell.com> + * Author: Shadi Ammouri <shadi@marvell.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/of_pci.h> +#include <linux/of_irq.h> + +#include "pcie-designware.h" + +struct armada8k_pcie { + struct dw_pcie *pci; + struct clk *clk; + struct clk *clk_reg; +}; + +#define PCIE_VENDOR_REGS_OFFSET 0x8000 + +#define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0) +#define PCIE_APP_LTSSM_EN BIT(2) +#define PCIE_DEVICE_TYPE_SHIFT 4 +#define PCIE_DEVICE_TYPE_MASK 0xF +#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */ + +#define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8) +#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1) +#define PCIE_GLB_STS_PHY_LINK_UP BIT(9) + +#define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C) +#define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20) +#define PCIE_INT_A_ASSERT_MASK BIT(9) +#define PCIE_INT_B_ASSERT_MASK BIT(10) +#define PCIE_INT_C_ASSERT_MASK BIT(11) +#define PCIE_INT_D_ASSERT_MASK BIT(12) + +#define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50) +#define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54) +#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C) +#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60) +/* + * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write + * allocate + */ +#define ARCACHE_DEFAULT_VALUE 0x3511 +#define AWCACHE_DEFAULT_VALUE 0x5311 + +#define DOMAIN_OUTER_SHAREABLE 0x2 +#define AX_USER_DOMAIN_MASK 0x3 +#define AX_USER_DOMAIN_SHIFT 4 + +#define to_armada8k_pcie(x) dev_get_drvdata((x)->dev) + +static int armada8k_pcie_link_up(struct dw_pcie *pci) +{ + u32 reg; + u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; + + reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG); + + if ((reg & mask) == mask) + return 1; + + dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg); + return 0; +} + +static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + u32 reg; + + if (!dw_pcie_link_up(pci)) { + /* Disable LTSSM state machine to enable configuration */ + reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); + reg &= ~(PCIE_APP_LTSSM_EN); + dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); + } + + /* Set the device to root complex mode */ + reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); + reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT); + reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT; + dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); + + /* Set the PCIe master AxCache attributes */ + dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE); + dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE); + + /* Set the PCIe master AxDomain attributes */ + reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG); + reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); + reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; + dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg); + + reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG); + reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); + reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; + dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg); + + /* Enable INT A-D interrupts */ + reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG); + reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK | + PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK; + dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg); + + if (!dw_pcie_link_up(pci)) { + /* Configuration done. Start LTSSM */ + reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); + reg |= PCIE_APP_LTSSM_EN; + dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); + } + + /* Wait until the link becomes active again */ + if (dw_pcie_wait_for_link(pci)) + dev_err(pci->dev, "Link not up after reconfiguration\n"); +} + +static int armada8k_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct armada8k_pcie *pcie = to_armada8k_pcie(pci); + + dw_pcie_setup_rc(pp); + armada8k_pcie_establish_link(pcie); + + return 0; +} + +static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) +{ + struct armada8k_pcie *pcie = arg; + struct dw_pcie *pci = pcie->pci; + u32 val; + + /* + * Interrupts are directly handled by the device driver of the + * PCI device. However, they are also latched into the PCIe + * controller, so we simply discard them. + */ + val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG); + dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val); + + return IRQ_HANDLED; +} + +static const struct dw_pcie_host_ops armada8k_pcie_host_ops = { + .host_init = armada8k_pcie_host_init, +}; + +static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + pp->ops = &armada8k_pcie_host_ops; + + pp->irq = platform_get_irq(pdev, 0); + if (pp->irq < 0) { + dev_err(dev, "failed to get irq for port\n"); + return pp->irq; + } + + ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, + IRQF_SHARED, "armada8k-pcie", pcie); + if (ret) { + dev_err(dev, "failed to request irq %d\n", pp->irq); + return ret; + } + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host: %d\n", ret); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = armada8k_pcie_link_up, +}; + +static int armada8k_pcie_probe(struct platform_device *pdev) +{ + struct dw_pcie *pci; + struct armada8k_pcie *pcie; + struct device *dev = &pdev->dev; + struct resource *base; + int ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + pcie->pci = pci; + + pcie->clk = devm_clk_get(dev, NULL); + if (IS_ERR(pcie->clk)) + return PTR_ERR(pcie->clk); + + ret = clk_prepare_enable(pcie->clk); + if (ret) + return ret; + + pcie->clk_reg = devm_clk_get(dev, "reg"); + if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) { + ret = -EPROBE_DEFER; + goto fail; + } + if (!IS_ERR(pcie->clk_reg)) { + ret = clk_prepare_enable(pcie->clk_reg); + if (ret) + goto fail_clkreg; + } + + /* Get the dw-pcie unit configuration/control registers base. */ + base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); + if (IS_ERR(pci->dbi_base)) { + dev_err(dev, "couldn't remap regs base %p\n", base); + ret = PTR_ERR(pci->dbi_base); + goto fail_clkreg; + } + + platform_set_drvdata(pdev, pcie); + + ret = armada8k_add_pcie_port(pcie, pdev); + if (ret) + goto fail_clkreg; + + return 0; + +fail_clkreg: + clk_disable_unprepare(pcie->clk_reg); +fail: + clk_disable_unprepare(pcie->clk); + + return ret; +} + +static const struct of_device_id armada8k_pcie_of_match[] = { + { .compatible = "marvell,armada8k-pcie", }, + {}, +}; + +static struct platform_driver armada8k_pcie_driver = { + .probe = armada8k_pcie_probe, + .driver = { + .name = "armada8k-pcie", + .of_match_table = of_match_ptr(armada8k_pcie_of_match), + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(armada8k_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c new file mode 100644 index 000000000..dba83abfe --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -0,0 +1,617 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Axis ARTPEC-6 SoC + * + * Author: Niklas Cassel <niklas.cassel@axis.com> + * + * Based on work done by Phil Edworthy <phil@edworthys.org> + */ + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/signal.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> + +#include "pcie-designware.h" + +#define to_artpec6_pcie(x) dev_get_drvdata((x)->dev) + +enum artpec_pcie_variants { + ARTPEC6, + ARTPEC7, +}; + +struct artpec6_pcie { + struct dw_pcie *pci; + struct regmap *regmap; /* DT axis,syscon-pcie */ + void __iomem *phy_base; /* DT phy */ + enum artpec_pcie_variants variant; + enum dw_pcie_device_mode mode; +}; + +struct artpec_pcie_of_data { + enum artpec_pcie_variants variant; + enum dw_pcie_device_mode mode; +}; + +static const struct of_device_id artpec6_pcie_of_match[]; + +/* PCIe Port Logic registers (memory-mapped) */ +#define PL_OFFSET 0x700 + +#define ACK_F_ASPM_CTRL_OFF (PL_OFFSET + 0xc) +#define ACK_N_FTS_MASK GENMASK(15, 8) +#define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK) + +#define FAST_TRAINING_SEQ_MASK GENMASK(7, 0) +#define FAST_TRAINING_SEQ(x) (((x) << 0) & FAST_TRAINING_SEQ_MASK) + +/* ARTPEC-6 specific registers */ +#define PCIECFG 0x18 +#define PCIECFG_DBG_OEN BIT(24) +#define PCIECFG_CORE_RESET_REQ BIT(21) +#define PCIECFG_LTSSM_ENABLE BIT(20) +#define PCIECFG_DEVICE_TYPE_MASK GENMASK(19, 16) +#define PCIECFG_CLKREQ_B BIT(11) +#define PCIECFG_REFCLK_ENABLE BIT(10) +#define PCIECFG_PLL_ENABLE BIT(9) +#define PCIECFG_PCLK_ENABLE BIT(8) +#define PCIECFG_RISRCREN BIT(4) +#define PCIECFG_MODE_TX_DRV_EN BIT(3) +#define PCIECFG_CISRREN BIT(2) +#define PCIECFG_MACRO_ENABLE BIT(0) +/* ARTPEC-7 specific fields */ +#define PCIECFG_REFCLKSEL BIT(23) +#define PCIECFG_NOC_RESET BIT(3) + +#define PCIESTAT 0x1c +/* ARTPEC-7 specific fields */ +#define PCIESTAT_EXTREFCLK BIT(3) + +#define NOCCFG 0x40 +#define NOCCFG_ENABLE_CLK_PCIE BIT(4) +#define NOCCFG_POWER_PCIE_IDLEACK BIT(3) +#define NOCCFG_POWER_PCIE_IDLE BIT(2) +#define NOCCFG_POWER_PCIE_IDLEREQ BIT(1) + +#define PHY_STATUS 0x118 +#define PHY_COSPLLLOCK BIT(0) + +#define PHY_TX_ASIC_OUT 0x4040 +#define PHY_TX_ASIC_OUT_TX_ACK BIT(0) + +#define PHY_RX_ASIC_OUT 0x405c +#define PHY_RX_ASIC_OUT_ACK BIT(0) + +static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset) +{ + u32 val; + + regmap_read(artpec6_pcie->regmap, offset, &val); + return val; +} + +static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val) +{ + regmap_write(artpec6_pcie->regmap, offset, val); +} + +static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) +{ + struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); + struct pcie_port *pp = &pci->pp; + struct dw_pcie_ep *ep = &pci->ep; + + switch (artpec6_pcie->mode) { + case DW_PCIE_RC_TYPE: + return pci_addr - pp->cfg0_base; + case DW_PCIE_EP_TYPE: + return pci_addr - ep->phys_base; + default: + dev_err(pci->dev, "UNKNOWN device type\n"); + } + return pci_addr; +} + +static int artpec6_pcie_establish_link(struct dw_pcie *pci) +{ + struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); + u32 val; + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + val |= PCIECFG_LTSSM_ENABLE; + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); + + return 0; +} + +static void artpec6_pcie_stop_link(struct dw_pcie *pci) +{ + struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); + u32 val; + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + val &= ~PCIECFG_LTSSM_ENABLE; + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup, + .start_link = artpec6_pcie_establish_link, + .stop_link = artpec6_pcie_stop_link, +}; + +static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie) +{ + struct dw_pcie *pci = artpec6_pcie->pci; + struct device *dev = pci->dev; + u32 val; + unsigned int retries; + + retries = 50; + do { + usleep_range(1000, 2000); + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); + retries--; + } while (retries && + (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); + if (!retries) + dev_err(dev, "PCIe clock manager did not leave idle state\n"); + + retries = 50; + do { + usleep_range(1000, 2000); + val = readl(artpec6_pcie->phy_base + PHY_STATUS); + retries--; + } while (retries && !(val & PHY_COSPLLLOCK)); + if (!retries) + dev_err(dev, "PHY PLL did not lock\n"); +} + +static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie) +{ + struct dw_pcie *pci = artpec6_pcie->pci; + struct device *dev = pci->dev; + u32 val; + u16 phy_status_tx, phy_status_rx; + unsigned int retries; + + retries = 50; + do { + usleep_range(1000, 2000); + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); + retries--; + } while (retries && + (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); + if (!retries) + dev_err(dev, "PCIe clock manager did not leave idle state\n"); + + retries = 50; + do { + usleep_range(1000, 2000); + phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT); + phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT); + retries--; + } while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) || + (phy_status_rx & PHY_RX_ASIC_OUT_ACK))); + if (!retries) + dev_err(dev, "PHY did not enter Pn state\n"); +} + +static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie) +{ + switch (artpec6_pcie->variant) { + case ARTPEC6: + artpec6_pcie_wait_for_phy_a6(artpec6_pcie); + break; + case ARTPEC7: + artpec6_pcie_wait_for_phy_a7(artpec6_pcie); + break; + } +} + +static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie) +{ + u32 val; + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ + PCIECFG_MODE_TX_DRV_EN | + PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */ + PCIECFG_MACRO_ENABLE; + val |= PCIECFG_REFCLK_ENABLE; + val &= ~PCIECFG_DBG_OEN; + val &= ~PCIECFG_CLKREQ_B; + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); + usleep_range(5000, 6000); + + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); + val |= NOCCFG_ENABLE_CLK_PCIE; + artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); + usleep_range(20, 30); + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE; + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); + usleep_range(6000, 7000); + + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); + val &= ~NOCCFG_POWER_PCIE_IDLEREQ; + artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); +} + +static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie) +{ + struct dw_pcie *pci = artpec6_pcie->pci; + u32 val; + bool extrefclk; + + /* Check if external reference clock is connected */ + val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT); + extrefclk = !!(val & PCIESTAT_EXTREFCLK); + dev_dbg(pci->dev, "Using reference clock: %s\n", + extrefclk ? "external" : "internal"); + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ + PCIECFG_PCLK_ENABLE; + if (extrefclk) + val |= PCIECFG_REFCLKSEL; + else + val &= ~PCIECFG_REFCLKSEL; + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); + usleep_range(10, 20); + + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); + val |= NOCCFG_ENABLE_CLK_PCIE; + artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); + usleep_range(20, 30); + + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); + val &= ~NOCCFG_POWER_PCIE_IDLEREQ; + artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); +} + +static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie) +{ + switch (artpec6_pcie->variant) { + case ARTPEC6: + artpec6_pcie_init_phy_a6(artpec6_pcie); + break; + case ARTPEC7: + artpec6_pcie_init_phy_a7(artpec6_pcie); + break; + } +} + +static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie) +{ + struct dw_pcie *pci = artpec6_pcie->pci; + u32 val; + + if (artpec6_pcie->variant != ARTPEC7) + return; + + /* + * Increase the N_FTS (Number of Fast Training Sequences) + * to be transmitted when transitioning from L0s to L0. + */ + val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF); + val &= ~ACK_N_FTS_MASK; + val |= ACK_N_FTS(180); + dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val); + + /* + * Set the Number of Fast Training Sequences that the core + * advertises as its N_FTS during Gen2 or Gen3 link training. + */ + val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + val &= ~FAST_TRAINING_SEQ_MASK; + val |= FAST_TRAINING_SEQ(180); + dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); +} + +static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie) +{ + u32 val; + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + switch (artpec6_pcie->variant) { + case ARTPEC6: + val |= PCIECFG_CORE_RESET_REQ; + break; + case ARTPEC7: + val &= ~PCIECFG_NOC_RESET; + break; + } + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); +} + +static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie) +{ + u32 val; + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + switch (artpec6_pcie->variant) { + case ARTPEC6: + val &= ~PCIECFG_CORE_RESET_REQ; + break; + case ARTPEC7: + val |= PCIECFG_NOC_RESET; + break; + } + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); + usleep_range(100, 200); +} + +static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie) +{ + struct dw_pcie *pci = artpec6_pcie->pci; + struct pcie_port *pp = &pci->pp; + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); +} + +static int artpec6_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); + + artpec6_pcie_assert_core_reset(artpec6_pcie); + artpec6_pcie_init_phy(artpec6_pcie); + artpec6_pcie_deassert_core_reset(artpec6_pcie); + artpec6_pcie_wait_for_phy(artpec6_pcie); + artpec6_pcie_set_nfts(artpec6_pcie); + dw_pcie_setup_rc(pp); + artpec6_pcie_establish_link(pci); + dw_pcie_wait_for_link(pci); + artpec6_pcie_enable_interrupts(artpec6_pcie); + + return 0; +} + +static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { + .host_init = artpec6_pcie_host_init, +}; + +static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = artpec6_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + int ret; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq < 0) { + dev_err(dev, "failed to get MSI irq\n"); + return pp->msi_irq; + } + } + + pp->ops = &artpec6_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); + enum pci_barno bar; + + artpec6_pcie_assert_core_reset(artpec6_pcie); + artpec6_pcie_init_phy(artpec6_pcie); + artpec6_pcie_deassert_core_reset(artpec6_pcie); + artpec6_pcie_wait_for_phy(artpec6_pcie); + artpec6_pcie_set_nfts(artpec6_pcie); + + for (bar = BAR_0; bar <= BAR_5; bar++) + dw_pcie_ep_reset_bar(pci, bar); +} + +static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, u16 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); + return -EINVAL; + case PCI_EPC_IRQ_MSI: + return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); + default: + dev_err(pci->dev, "UNKNOWN IRQ type\n"); + } + + return 0; +} + +static struct dw_pcie_ep_ops pcie_ep_ops = { + .ep_init = artpec6_pcie_ep_init, + .raise_irq = artpec6_pcie_raise_irq, +}; + +static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie, + struct platform_device *pdev) +{ + int ret; + struct dw_pcie_ep *ep; + struct resource *res; + struct device *dev = &pdev->dev; + struct dw_pcie *pci = artpec6_pcie->pci; + + ep = &pci->ep; + ep->ops = &pcie_ep_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); + pci->dbi_base2 = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base2)) + return PTR_ERR(pci->dbi_base2); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); + if (!res) + return -EINVAL; + + ep->phys_base = res->start; + ep->addr_size = resource_size(res); + + ret = dw_pcie_ep_init(ep); + if (ret) { + dev_err(dev, "failed to initialize endpoint\n"); + return ret; + } + + return 0; +} + +static int artpec6_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct artpec6_pcie *artpec6_pcie; + struct resource *dbi_base; + struct resource *phy_base; + int ret; + const struct of_device_id *match; + const struct artpec_pcie_of_data *data; + enum artpec_pcie_variants variant; + enum dw_pcie_device_mode mode; + + match = of_match_device(artpec6_pcie_of_match, dev); + if (!match) + return -EINVAL; + + data = (struct artpec_pcie_of_data *)match->data; + variant = (enum artpec_pcie_variants)data->variant; + mode = (enum dw_pcie_device_mode)data->mode; + + artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL); + if (!artpec6_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + artpec6_pcie->pci = pci; + artpec6_pcie->variant = variant; + artpec6_pcie->mode = mode; + + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pci->dbi_base = devm_ioremap_resource(dev, dbi_base); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); + artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base); + if (IS_ERR(artpec6_pcie->phy_base)) + return PTR_ERR(artpec6_pcie->phy_base); + + artpec6_pcie->regmap = + syscon_regmap_lookup_by_phandle(dev->of_node, + "axis,syscon-pcie"); + if (IS_ERR(artpec6_pcie->regmap)) + return PTR_ERR(artpec6_pcie->regmap); + + platform_set_drvdata(pdev, artpec6_pcie); + + switch (artpec6_pcie->mode) { + case DW_PCIE_RC_TYPE: + if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST)) + return -ENODEV; + + ret = artpec6_add_pcie_port(artpec6_pcie, pdev); + if (ret < 0) + return ret; + break; + case DW_PCIE_EP_TYPE: { + u32 val; + + if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP)) + return -ENODEV; + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + val &= ~PCIECFG_DEVICE_TYPE_MASK; + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); + ret = artpec6_add_pcie_ep(artpec6_pcie, pdev); + if (ret < 0) + return ret; + break; + } + default: + dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode); + } + + return 0; +} + +static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = { + .variant = ARTPEC6, + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = { + .variant = ARTPEC6, + .mode = DW_PCIE_EP_TYPE, +}; + +static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = { + .variant = ARTPEC7, + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = { + .variant = ARTPEC7, + .mode = DW_PCIE_EP_TYPE, +}; + +static const struct of_device_id artpec6_pcie_of_match[] = { + { + .compatible = "axis,artpec6-pcie", + .data = &artpec6_pcie_rc_of_data, + }, + { + .compatible = "axis,artpec6-pcie-ep", + .data = &artpec6_pcie_ep_of_data, + }, + { + .compatible = "axis,artpec7-pcie", + .data = &artpec7_pcie_rc_of_data, + }, + { + .compatible = "axis,artpec7-pcie-ep", + .data = &artpec7_pcie_ep_of_data, + }, + {}, +}; + +static struct platform_driver artpec6_pcie_driver = { + .probe = artpec6_pcie_probe, + .driver = { + .name = "artpec6-pcie", + .of_match_table = artpec6_pcie_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(artpec6_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c new file mode 100644 index 000000000..a3d07d9c5 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -0,0 +1,586 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Synopsys DesignWare PCIe Endpoint controller driver + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I <kishon@ti.com> + */ + +#include <linux/of.h> + +#include "pcie-designware.h" +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> + +void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) +{ + struct pci_epc *epc = ep->epc; + + pci_epc_linkup(epc); +} + +static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar, + int flags) +{ + u32 reg; + + reg = PCI_BASE_ADDRESS_0 + (4 * bar); + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writel_dbi2(pci, reg, 0x0); + dw_pcie_writel_dbi(pci, reg, 0x0); + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { + dw_pcie_writel_dbi2(pci, reg + 4, 0x0); + dw_pcie_writel_dbi(pci, reg + 4, 0x0); + } + dw_pcie_dbi_ro_wr_dis(pci); +} + +void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) +{ + __dw_pcie_ep_reset_bar(pci, bar, 0); +} + +static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, + u8 cap) +{ + u8 cap_id, next_cap_ptr; + u16 reg; + + if (!cap_ptr) + return 0; + + reg = dw_pcie_readw_dbi(pci, cap_ptr); + cap_id = (reg & 0x00ff); + + if (cap_id > PCI_CAP_ID_MAX) + return 0; + + if (cap_id == cap) + return cap_ptr; + + next_cap_ptr = (reg & 0xff00) >> 8; + return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); +} + +static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap) +{ + u8 next_cap_ptr; + u16 reg; + + reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); + next_cap_ptr = (reg & 0x00ff); + + return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); +} + +static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, + struct pci_epf_header *hdr) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid); + dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid); + dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid); + dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code); + dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, + hdr->subclass_code | hdr->baseclass_code << 8); + dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE, + hdr->cache_line_size); + dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID, + hdr->subsys_vendor_id); + dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id); + dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN, + hdr->interrupt_pin); + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar, + dma_addr_t cpu_addr, + enum dw_pcie_as_type as_type) +{ + int ret; + u32 free_win; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows); + if (free_win >= ep->num_ib_windows) { + dev_err(pci->dev, "No free inbound window\n"); + return -EINVAL; + } + + ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr, + as_type); + if (ret < 0) { + dev_err(pci->dev, "Failed to program IB window\n"); + return ret; + } + + ep->bar_to_atu[bar] = free_win; + set_bit(free_win, ep->ib_window_map); + + return 0; +} + +static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr, + u64 pci_addr, size_t size) +{ + u32 free_win; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows); + if (free_win >= ep->num_ob_windows) { + dev_err(pci->dev, "No free outbound window\n"); + return -EINVAL; + } + + dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM, + phys_addr, pci_addr, size); + + set_bit(free_win, ep->ob_window_map); + ep->outbound_addr[free_win] = phys_addr; + + return 0; +} + +static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, + struct pci_epf_bar *epf_bar) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar = epf_bar->barno; + u32 atu_index = ep->bar_to_atu[bar]; + + __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags); + + dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); + clear_bit(atu_index, ep->ib_window_map); +} + +static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, + struct pci_epf_bar *epf_bar) +{ + int ret; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar = epf_bar->barno; + size_t size = epf_bar->size; + int flags = epf_bar->flags; + enum dw_pcie_as_type as_type; + u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); + + if (!(flags & PCI_BASE_ADDRESS_SPACE)) + as_type = DW_PCIE_AS_MEM; + else + as_type = DW_PCIE_AS_IO; + + ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type); + if (ret) + return ret; + + dw_pcie_dbi_ro_wr_en(pci); + + dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1)); + dw_pcie_writel_dbi(pci, reg, flags); + + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { + dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1)); + dw_pcie_writel_dbi(pci, reg + 4, 0); + } + + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, + u32 *atu_index) +{ + u32 index; + + for (index = 0; index < ep->num_ob_windows; index++) { + if (ep->outbound_addr[index] != addr) + continue; + *atu_index = index; + return 0; + } + + return -EINVAL; +} + +static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, + phys_addr_t addr) +{ + int ret; + u32 atu_index; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + ret = dw_pcie_find_index(ep, addr, &atu_index); + if (ret < 0) + return; + + dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND); + clear_bit(atu_index, ep->ob_window_map); +} + +static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, + phys_addr_t addr, + u64 pci_addr, size_t size) +{ + int ret; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size); + if (ret) { + dev_err(pci->dev, "Failed to enable address\n"); + return ret; + } + + return 0; +} + +static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u32 val, reg; + + if (!ep->msi_cap) + return -EINVAL; + + reg = ep->msi_cap + PCI_MSI_FLAGS; + val = dw_pcie_readw_dbi(pci, reg); + if (!(val & PCI_MSI_FLAGS_ENABLE)) + return -EINVAL; + + val = (val & PCI_MSI_FLAGS_QSIZE) >> 4; + + return val; +} + +static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u32 val, reg; + + if (!ep->msi_cap) + return -EINVAL; + + reg = ep->msi_cap + PCI_MSI_FLAGS; + val = dw_pcie_readw_dbi(pci, reg); + val &= ~PCI_MSI_FLAGS_QMASK; + val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK; + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writew_dbi(pci, reg, val); + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u32 val, reg; + + if (!ep->msix_cap) + return -EINVAL; + + reg = ep->msix_cap + PCI_MSIX_FLAGS; + val = dw_pcie_readw_dbi(pci, reg); + if (!(val & PCI_MSIX_FLAGS_ENABLE)) + return -EINVAL; + + val &= PCI_MSIX_FLAGS_QSIZE; + + return val; +} + +static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u32 val, reg; + + if (!ep->msix_cap) + return -EINVAL; + + reg = ep->msix_cap + PCI_MSIX_FLAGS; + val = dw_pcie_readw_dbi(pci, reg); + val &= ~PCI_MSIX_FLAGS_QSIZE; + val |= interrupts; + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writew_dbi(pci, reg, val); + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, + enum pci_epc_irq_type type, u16 interrupt_num) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + + if (!ep->ops->raise_irq) + return -EINVAL; + + return ep->ops->raise_irq(ep, func_no, type, interrupt_num); +} + +static void dw_pcie_ep_stop(struct pci_epc *epc) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + if (!pci->ops->stop_link) + return; + + pci->ops->stop_link(pci); +} + +static int dw_pcie_ep_start(struct pci_epc *epc) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + if (!pci->ops->start_link) + return -EINVAL; + + return pci->ops->start_link(pci); +} + +static const struct pci_epc_ops epc_ops = { + .write_header = dw_pcie_ep_write_header, + .set_bar = dw_pcie_ep_set_bar, + .clear_bar = dw_pcie_ep_clear_bar, + .map_addr = dw_pcie_ep_map_addr, + .unmap_addr = dw_pcie_ep_unmap_addr, + .set_msi = dw_pcie_ep_set_msi, + .get_msi = dw_pcie_ep_get_msi, + .set_msix = dw_pcie_ep_set_msix, + .get_msix = dw_pcie_ep_get_msix, + .raise_irq = dw_pcie_ep_raise_irq, + .start = dw_pcie_ep_start, + .stop = dw_pcie_ep_stop, +}; + +int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct device *dev = pci->dev; + + dev_err(dev, "EP cannot trigger legacy IRQs\n"); + + return -EINVAL; +} + +int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, + u8 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct pci_epc *epc = ep->epc; + unsigned int aligned_offset; + u16 msg_ctrl, msg_data; + u32 msg_addr_lower, msg_addr_upper, reg; + u64 msg_addr; + bool has_upper; + int ret; + + if (!ep->msi_cap) + return -EINVAL; + + /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ + reg = ep->msi_cap + PCI_MSI_FLAGS; + msg_ctrl = dw_pcie_readw_dbi(pci, reg); + has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); + reg = ep->msi_cap + PCI_MSI_ADDRESS_LO; + msg_addr_lower = dw_pcie_readl_dbi(pci, reg); + if (has_upper) { + reg = ep->msi_cap + PCI_MSI_ADDRESS_HI; + msg_addr_upper = dw_pcie_readl_dbi(pci, reg); + reg = ep->msi_cap + PCI_MSI_DATA_64; + msg_data = dw_pcie_readw_dbi(pci, reg); + } else { + msg_addr_upper = 0; + reg = ep->msi_cap + PCI_MSI_DATA_32; + msg_data = dw_pcie_readw_dbi(pci, reg); + } + aligned_offset = msg_addr_lower & (epc->mem->page_size - 1); + msg_addr = ((u64)msg_addr_upper) << 32 | + (msg_addr_lower & ~aligned_offset); + ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, + epc->mem->page_size); + if (ret) + return ret; + + writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); + + dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); + + return 0; +} + +int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, + u16 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct pci_epc *epc = ep->epc; + u16 tbl_offset, bir; + u32 bar_addr_upper, bar_addr_lower; + u32 msg_addr_upper, msg_addr_lower; + u32 reg, msg_data, vec_ctrl; + u64 tbl_addr, msg_addr, reg_u64; + void __iomem *msix_tbl; + int ret; + + reg = ep->msix_cap + PCI_MSIX_TABLE; + tbl_offset = dw_pcie_readl_dbi(pci, reg); + bir = (tbl_offset & PCI_MSIX_TABLE_BIR); + tbl_offset &= PCI_MSIX_TABLE_OFFSET; + + reg = PCI_BASE_ADDRESS_0 + (4 * bir); + bar_addr_upper = 0; + bar_addr_lower = dw_pcie_readl_dbi(pci, reg); + reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK); + if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64) + bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4); + + tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower; + tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE)); + tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK; + + msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr, + PCI_MSIX_ENTRY_SIZE); + if (!msix_tbl) + return -EINVAL; + + msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR); + msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR); + msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; + msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA); + vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL); + + iounmap(msix_tbl); + + if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) + return -EPERM; + + ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, + epc->mem->page_size); + if (ret) + return ret; + + writel(msg_data, ep->msi_mem); + + dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); + + return 0; +} + +void dw_pcie_ep_exit(struct dw_pcie_ep *ep) +{ + struct pci_epc *epc = ep->epc; + + pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, + epc->mem->page_size); + + pci_epc_mem_exit(epc); +} + +int dw_pcie_ep_init(struct dw_pcie_ep *ep) +{ + int ret; + void *addr; + struct pci_epc *epc; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + + if (!pci->dbi_base || !pci->dbi_base2) { + dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); + return -EINVAL; + } + + ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); + if (ret < 0) { + dev_err(dev, "Unable to read *num-ib-windows* property\n"); + return ret; + } + if (ep->num_ib_windows > MAX_IATU_IN) { + dev_err(dev, "Invalid *num-ib-windows*\n"); + return -EINVAL; + } + + ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows); + if (ret < 0) { + dev_err(dev, "Unable to read *num-ob-windows* property\n"); + return ret; + } + if (ep->num_ob_windows > MAX_IATU_OUT) { + dev_err(dev, "Invalid *num-ob-windows*\n"); + return -EINVAL; + } + + ep->ib_window_map = devm_kcalloc(dev, + BITS_TO_LONGS(ep->num_ib_windows), + sizeof(long), + GFP_KERNEL); + if (!ep->ib_window_map) + return -ENOMEM; + + ep->ob_window_map = devm_kcalloc(dev, + BITS_TO_LONGS(ep->num_ob_windows), + sizeof(long), + GFP_KERNEL); + if (!ep->ob_window_map) + return -ENOMEM; + + addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t), + GFP_KERNEL); + if (!addr) + return -ENOMEM; + ep->outbound_addr = addr; + + epc = devm_pci_epc_create(dev, &epc_ops); + if (IS_ERR(epc)) { + dev_err(dev, "Failed to create epc device\n"); + return PTR_ERR(epc); + } + + ep->epc = epc; + epc_set_drvdata(epc, ep); + + if (ep->ops->ep_init) + ep->ops->ep_init(ep); + + ret = of_property_read_u8(np, "max-functions", &epc->max_functions); + if (ret < 0) + epc->max_functions = 1; + + ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, + ep->page_size); + if (ret < 0) { + dev_err(dev, "Failed to initialize address space\n"); + return ret; + } + + ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, + epc->mem->page_size); + if (!ep->msi_mem) { + dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); + return -ENOMEM; + } + ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI); + + ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX); + + dw_pcie_setup(pci); + + return 0; +} diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c new file mode 100644 index 000000000..be62f654c --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -0,0 +1,739 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare PCIe host controller driver + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Jingoo Han <jg1.han@samsung.com> + */ + +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/pci_regs.h> +#include <linux/platform_device.h> + +#include "../../pci.h" +#include "pcie-designware.h" + +static struct pci_ops dw_pcie_ops; + +static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + struct dw_pcie *pci; + + if (pp->ops->rd_own_conf) + return pp->ops->rd_own_conf(pp, where, size, val); + + pci = to_dw_pcie_from_pp(pp); + return dw_pcie_read(pci->dbi_base + where, size, val); +} + +static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, + u32 val) +{ + struct dw_pcie *pci; + + if (pp->ops->wr_own_conf) + return pp->ops->wr_own_conf(pp, where, size, val); + + pci = to_dw_pcie_from_pp(pp); + return dw_pcie_write(pci->dbi_base + where, size, val); +} + +static void dw_msi_ack_irq(struct irq_data *d) +{ + irq_chip_ack_parent(d); +} + +static void dw_msi_mask_irq(struct irq_data *d) +{ + pci_msi_mask_irq(d); + irq_chip_mask_parent(d); +} + +static void dw_msi_unmask_irq(struct irq_data *d) +{ + pci_msi_unmask_irq(d); + irq_chip_unmask_parent(d); +} + +static struct irq_chip dw_pcie_msi_irq_chip = { + .name = "PCI-MSI", + .irq_ack = dw_msi_ack_irq, + .irq_mask = dw_msi_mask_irq, + .irq_unmask = dw_msi_unmask_irq, +}; + +static struct msi_domain_info dw_pcie_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), + .chip = &dw_pcie_msi_irq_chip, +}; + +/* MSI int handler */ +irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) +{ + int i, pos, irq; + unsigned long val; + u32 status, num_ctrls; + irqreturn_t ret = IRQ_NONE; + + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + + for (i = 0; i < num_ctrls; i++) { + dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + + (i * MSI_REG_CTRL_BLOCK_SIZE), + 4, &status); + if (!status) + continue; + + ret = IRQ_HANDLED; + val = status; + pos = 0; + while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, + pos)) != MAX_MSI_IRQS_PER_CTRL) { + irq = irq_find_mapping(pp->irq_domain, + (i * MAX_MSI_IRQS_PER_CTRL) + + pos); + generic_handle_irq(irq); + pos++; + } + } + + return ret; +} + +/* Chained MSI interrupt service routine */ +static void dw_chained_msi_isr(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct pcie_port *pp; + + chained_irq_enter(chip, desc); + + pp = irq_desc_get_handler_data(desc); + dw_handle_msi_irq(pp); + + chained_irq_exit(chip, desc); +} + +static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + u64 msi_target; + + if (pp->ops->get_msi_addr) + msi_target = pp->ops->get_msi_addr(pp); + else + msi_target = (u64)pp->msi_data; + + msg->address_lo = lower_32_bits(msi_target); + msg->address_hi = upper_32_bits(msi_target); + + if (pp->ops->get_msi_data) + msg->data = pp->ops->get_msi_data(pp, data->hwirq); + else + msg->data = data->hwirq; + + dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", + (int)data->hwirq, msg->address_hi, msg->address_lo); +} + +static int dw_pci_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static void dw_pci_bottom_mask(struct irq_data *data) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(data); + unsigned int res, bit, ctrl; + unsigned long flags; + + raw_spin_lock_irqsave(&pp->lock, flags); + + if (pp->ops->msi_clear_irq) { + pp->ops->msi_clear_irq(pp, data->hwirq); + } else { + ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; + + pp->irq_status[ctrl] &= ~(1 << bit); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, + ~pp->irq_status[ctrl]); + } + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static void dw_pci_bottom_unmask(struct irq_data *data) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(data); + unsigned int res, bit, ctrl; + unsigned long flags; + + raw_spin_lock_irqsave(&pp->lock, flags); + + if (pp->ops->msi_set_irq) { + pp->ops->msi_set_irq(pp, data->hwirq); + } else { + ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; + + pp->irq_status[ctrl] |= 1 << bit; + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, + ~pp->irq_status[ctrl]); + } + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static void dw_pci_bottom_ack(struct irq_data *d) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + unsigned int res, bit, ctrl; + unsigned long flags; + + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; + + raw_spin_lock_irqsave(&pp->lock, flags); + + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit); + + if (pp->ops->msi_irq_ack) + pp->ops->msi_irq_ack(d->hwirq, pp); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static struct irq_chip dw_pci_msi_bottom_irq_chip = { + .name = "DWPCI-MSI", + .irq_ack = dw_pci_bottom_ack, + .irq_compose_msi_msg = dw_pci_setup_msi_msg, + .irq_set_affinity = dw_pci_msi_set_affinity, + .irq_mask = dw_pci_bottom_mask, + .irq_unmask = dw_pci_bottom_unmask, +}; + +static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct pcie_port *pp = domain->host_data; + unsigned long flags; + u32 i; + int bit; + + raw_spin_lock_irqsave(&pp->lock, flags); + + bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, + order_base_2(nr_irqs)); + + raw_spin_unlock_irqrestore(&pp->lock, flags); + + if (bit < 0) + return -ENOSPC; + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, bit + i, + &dw_pci_msi_bottom_irq_chip, + pp, handle_edge_irq, + NULL, NULL); + + return 0; +} + +static void dw_pcie_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *data = irq_domain_get_irq_data(domain, virq); + struct pcie_port *pp = irq_data_get_irq_chip_data(data); + unsigned long flags; + + raw_spin_lock_irqsave(&pp->lock, flags); + + bitmap_release_region(pp->msi_irq_in_use, data->hwirq, + order_base_2(nr_irqs)); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static const struct irq_domain_ops dw_pcie_msi_domain_ops = { + .alloc = dw_pcie_irq_domain_alloc, + .free = dw_pcie_irq_domain_free, +}; + +int dw_pcie_allocate_domains(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); + + pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, + &dw_pcie_msi_domain_ops, pp); + if (!pp->irq_domain) { + dev_err(pci->dev, "Failed to create IRQ domain\n"); + return -ENOMEM; + } + + irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); + + pp->msi_domain = pci_msi_create_irq_domain(fwnode, + &dw_pcie_msi_domain_info, + pp->irq_domain); + if (!pp->msi_domain) { + dev_err(pci->dev, "Failed to create MSI domain\n"); + irq_domain_remove(pp->irq_domain); + return -ENOMEM; + } + + return 0; +} + +void dw_pcie_free_msi(struct pcie_port *pp) +{ + irq_set_chained_handler(pp->msi_irq, NULL); + irq_set_handler_data(pp->msi_irq, NULL); + + irq_domain_remove(pp->msi_domain); + irq_domain_remove(pp->irq_domain); + + if (pp->msi_page) + __free_page(pp->msi_page); +} + +void dw_pcie_msi_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + u64 msi_target; + + pp->msi_page = alloc_page(GFP_KERNEL); + pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, pp->msi_data)) { + dev_err(dev, "Failed to map MSI data\n"); + __free_page(pp->msi_page); + pp->msi_page = NULL; + return; + } + msi_target = (u64)pp->msi_data; + + /* Program the msi_data */ + dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, + lower_32_bits(msi_target)); + dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, + upper_32_bits(msi_target)); +} + +int dw_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + struct platform_device *pdev = to_platform_device(dev); + struct resource_entry *win, *tmp; + struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; + struct resource *cfg_res; + int ret; + + raw_spin_lock_init(&pci->pp.lock); + + cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); + if (cfg_res) { + pp->cfg0_size = resource_size(cfg_res) >> 1; + pp->cfg1_size = resource_size(cfg_res) >> 1; + pp->cfg0_base = cfg_res->start; + pp->cfg1_base = cfg_res->start + pp->cfg0_size; + } else if (!pp->va_cfg0_base) { + dev_err(dev, "Missing *config* reg space\n"); + } + + bridge = devm_pci_alloc_host_bridge(dev, 0); + if (!bridge) + return -ENOMEM; + + ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, + &bridge->windows, &pp->io_base); + if (ret) + return ret; + + ret = devm_request_pci_bus_resources(dev, &bridge->windows); + if (ret) + return ret; + + /* Get the I/O and memory ranges from DT */ + resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { + switch (resource_type(win->res)) { + case IORESOURCE_IO: + ret = devm_pci_remap_iospace(dev, win->res, + pp->io_base); + if (ret) { + dev_warn(dev, "Error %d: failed to map resource %pR\n", + ret, win->res); + resource_list_destroy_entry(win); + } else { + pp->io = win->res; + pp->io->name = "I/O"; + pp->io_size = resource_size(pp->io); + pp->io_bus_addr = pp->io->start - win->offset; + } + break; + case IORESOURCE_MEM: + pp->mem = win->res; + pp->mem->name = "MEM"; + pp->mem_size = resource_size(pp->mem); + pp->mem_bus_addr = pp->mem->start - win->offset; + break; + case 0: + pp->cfg = win->res; + pp->cfg0_size = resource_size(pp->cfg) >> 1; + pp->cfg1_size = resource_size(pp->cfg) >> 1; + pp->cfg0_base = pp->cfg->start; + pp->cfg1_base = pp->cfg->start + pp->cfg0_size; + break; + case IORESOURCE_BUS: + pp->busn = win->res; + break; + } + } + + if (!pci->dbi_base) { + pci->dbi_base = devm_pci_remap_cfgspace(dev, + pp->cfg->start, + resource_size(pp->cfg)); + if (!pci->dbi_base) { + dev_err(dev, "Error with ioremap\n"); + return -ENOMEM; + } + } + + pp->mem_base = pp->mem->start; + + if (!pp->va_cfg0_base) { + pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, + pp->cfg0_base, pp->cfg0_size); + if (!pp->va_cfg0_base) { + dev_err(dev, "Error with ioremap in function\n"); + return -ENOMEM; + } + } + + if (!pp->va_cfg1_base) { + pp->va_cfg1_base = devm_pci_remap_cfgspace(dev, + pp->cfg1_base, + pp->cfg1_size); + if (!pp->va_cfg1_base) { + dev_err(dev, "Error with ioremap\n"); + return -ENOMEM; + } + } + + ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport); + if (ret) + pci->num_viewport = 2; + + if (pci_msi_enabled()) { + /* + * If a specific SoC driver needs to change the + * default number of vectors, it needs to implement + * the set_num_vectors callback. + */ + if (!pp->ops->set_num_vectors) { + pp->num_vectors = MSI_DEF_NUM_VECTORS; + } else { + pp->ops->set_num_vectors(pp); + + if (pp->num_vectors > MAX_MSI_IRQS || + pp->num_vectors == 0) { + dev_err(dev, + "Invalid number of vectors\n"); + return -EINVAL; + } + } + + if (!pp->ops->msi_host_init) { + ret = dw_pcie_allocate_domains(pp); + if (ret) + return ret; + + if (pp->msi_irq) + irq_set_chained_handler_and_data(pp->msi_irq, + dw_chained_msi_isr, + pp); + } else { + ret = pp->ops->msi_host_init(pp); + if (ret < 0) + return ret; + } + } + + if (pp->ops->host_init) { + ret = pp->ops->host_init(pp); + if (ret) + goto err_free_msi; + } + + pp->root_bus_nr = pp->busn->start; + + bridge->dev.parent = dev; + bridge->sysdata = pp; + bridge->busnr = pp->root_bus_nr; + bridge->ops = &dw_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret) + goto err_free_msi; + + bus = bridge->bus; + + if (pp->ops->scan_bus) + pp->ops->scan_bus(pp); + + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(bus); + return 0; + +err_free_msi: + if (pci_msi_enabled() && !pp->ops->msi_host_init) + dw_pcie_free_msi(pp); + return ret; +} + +static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + u32 devfn, int where, int size, u32 *val) +{ + int ret, type; + u32 busdev, cfg_size; + u64 cpu_addr; + void __iomem *va_cfg_base; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + if (pp->ops->rd_other_conf) + return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); + + busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | + PCIE_ATU_FUNC(PCI_FUNC(devfn)); + + if (bus->parent->number == pp->root_bus_nr) { + type = PCIE_ATU_TYPE_CFG0; + cpu_addr = pp->cfg0_base; + cfg_size = pp->cfg0_size; + va_cfg_base = pp->va_cfg0_base; + } else { + type = PCIE_ATU_TYPE_CFG1; + cpu_addr = pp->cfg1_base; + cfg_size = pp->cfg1_size; + va_cfg_base = pp->va_cfg1_base; + } + + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, + type, cpu_addr, + busdev, cfg_size); + ret = dw_pcie_read(va_cfg_base + where, size, val); + if (pci->num_viewport <= 2) + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_IO, pp->io_base, + pp->io_bus_addr, pp->io_size); + + return ret; +} + +static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + u32 devfn, int where, int size, u32 val) +{ + int ret, type; + u32 busdev, cfg_size; + u64 cpu_addr; + void __iomem *va_cfg_base; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + if (pp->ops->wr_other_conf) + return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); + + busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | + PCIE_ATU_FUNC(PCI_FUNC(devfn)); + + if (bus->parent->number == pp->root_bus_nr) { + type = PCIE_ATU_TYPE_CFG0; + cpu_addr = pp->cfg0_base; + cfg_size = pp->cfg0_size; + va_cfg_base = pp->va_cfg0_base; + } else { + type = PCIE_ATU_TYPE_CFG1; + cpu_addr = pp->cfg1_base; + cfg_size = pp->cfg1_size; + va_cfg_base = pp->va_cfg1_base; + } + + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, + type, cpu_addr, + busdev, cfg_size); + ret = dw_pcie_write(va_cfg_base + where, size, val); + if (pci->num_viewport <= 2) + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_IO, pp->io_base, + pp->io_bus_addr, pp->io_size); + + return ret; +} + +static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, + int dev) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + /* If there is no link, then there is no device */ + if (bus->number != pp->root_bus_nr) { + if (!dw_pcie_link_up(pci)) + return 0; + } + + /* Access only one slot on each root port */ + if (bus->number == pp->root_bus_nr && dev > 0) + return 0; + + return 1; +} + +static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) +{ + struct pcie_port *pp = bus->sysdata; + + if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (bus->number == pp->root_bus_nr) + return dw_pcie_rd_own_conf(pp, where, size, val); + + return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); +} + +static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct pcie_port *pp = bus->sysdata; + + if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (bus->number == pp->root_bus_nr) + return dw_pcie_wr_own_conf(pp, where, size, val); + + return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); +} + +static struct pci_ops dw_pcie_ops = { + .read = dw_pcie_rd_conf, + .write = dw_pcie_wr_conf, +}; + +static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) +{ + u32 val; + + val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); + if (val == 0xffffffff) + return 1; + + return 0; +} + +void dw_pcie_setup_rc(struct pcie_port *pp) +{ + u32 val, ctrl, num_ctrls; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + dw_pcie_setup(pci); + + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + + /* Initialize IRQ Status array */ + for (ctrl = 0; ctrl < num_ctrls; ctrl++) { + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + 4, ~0); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + 4, ~0); + pp->irq_status[ctrl] = 0; + } + + /* Setup RC BARs */ + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); + + /* Setup interrupt pins */ + dw_pcie_dbi_ro_wr_en(pci); + val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); + val &= 0xffff00ff; + val |= 0x00000100; + dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); + dw_pcie_dbi_ro_wr_dis(pci); + + /* Setup bus numbers */ + val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); + val &= 0xff000000; + val |= 0x00ff0100; + dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); + + /* Setup command register */ + val = dw_pcie_readl_dbi(pci, PCI_COMMAND); + val &= 0xffff0000; + val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER | PCI_COMMAND_SERR; + dw_pcie_writel_dbi(pci, PCI_COMMAND, val); + + /* + * If the platform provides ->rd_other_conf, it means the platform + * uses its own address translation component rather than ATU, so + * we should not program the ATU here. + */ + if (!pp->ops->rd_other_conf) { + /* Get iATU unroll support */ + pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); + dev_dbg(pci->dev, "iATU unroll: %s\n", + pci->iatu_unroll_enabled ? "enabled" : "disabled"); + + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_MEM, pp->mem_base, + pp->mem_bus_addr, pp->mem_size); + if (pci->num_viewport > 2) + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2, + PCIE_ATU_TYPE_IO, pp->io_base, + pp->io_bus_addr, pp->io_size); + } + + dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); + + /* Enable write permission for the DBI read-only register */ + dw_pcie_dbi_ro_wr_en(pci); + /* Program correct class for RC */ + dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); + /* Better disable write permission right after the update */ + dw_pcie_dbi_ro_wr_dis(pci); + + dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); + val |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); +} diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c new file mode 100644 index 000000000..c12bf794d --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe RC driver for Synopsys DesignWare Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <Joao.Pinto@synopsys.com> + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/signal.h> +#include <linux/types.h> +#include <linux/regmap.h> + +#include "pcie-designware.h" + +struct dw_plat_pcie { + struct dw_pcie *pci; + struct regmap *regmap; + enum dw_pcie_device_mode mode; +}; + +struct dw_plat_pcie_of_data { + enum dw_pcie_device_mode mode; +}; + +static const struct of_device_id dw_plat_pcie_of_match[]; + +static int dw_plat_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + dw_pcie_setup_rc(pp); + dw_pcie_wait_for_link(pci); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + + return 0; +} + +static void dw_plat_set_num_vectors(struct pcie_port *pp) +{ + pp->num_vectors = MAX_MSI_IRQS; +} + +static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { + .host_init = dw_plat_pcie_host_init, + .set_num_vectors = dw_plat_set_num_vectors, +}; + +static int dw_plat_pcie_establish_link(struct dw_pcie *pci) +{ + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .start_link = dw_plat_pcie_establish_link, +}; + +static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct pci_epc *epc = ep->epc; + enum pci_barno bar; + + for (bar = BAR_0; bar <= BAR_5; bar++) + dw_pcie_ep_reset_bar(pci, bar); + + epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER; + epc->features |= EPC_FEATURE_MSIX_AVAILABLE; +} + +static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, + u16 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + return dw_pcie_ep_raise_legacy_irq(ep, func_no); + case PCI_EPC_IRQ_MSI: + return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); + case PCI_EPC_IRQ_MSIX: + return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); + default: + dev_err(pci->dev, "UNKNOWN IRQ type\n"); + } + + return 0; +} + +static struct dw_pcie_ep_ops pcie_ep_ops = { + .ep_init = dw_plat_pcie_ep_init, + .raise_irq = dw_plat_pcie_ep_raise_irq, +}; + +static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = dw_plat_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + pp->irq = platform_get_irq(pdev, 1); + if (pp->irq < 0) + return pp->irq; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq(pdev, 0); + if (pp->msi_irq < 0) + return pp->msi_irq; + } + + pp->ops = &dw_plat_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "Failed to initialize host\n"); + return ret; + } + + return 0; +} + +static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie, + struct platform_device *pdev) +{ + int ret; + struct dw_pcie_ep *ep; + struct resource *res; + struct device *dev = &pdev->dev; + struct dw_pcie *pci = dw_plat_pcie->pci; + + ep = &pci->ep; + ep->ops = &pcie_ep_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); + pci->dbi_base2 = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base2)) + return PTR_ERR(pci->dbi_base2); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); + if (!res) + return -EINVAL; + + ep->phys_base = res->start; + ep->addr_size = resource_size(res); + + ret = dw_pcie_ep_init(ep); + if (ret) { + dev_err(dev, "Failed to initialize endpoint\n"); + return ret; + } + return 0; +} + +static int dw_plat_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_plat_pcie *dw_plat_pcie; + struct dw_pcie *pci; + struct resource *res; /* Resource from DT */ + int ret; + const struct of_device_id *match; + const struct dw_plat_pcie_of_data *data; + enum dw_pcie_device_mode mode; + + match = of_match_device(dw_plat_pcie_of_match, dev); + if (!match) + return -EINVAL; + + data = (struct dw_plat_pcie_of_data *)match->data; + mode = (enum dw_pcie_device_mode)data->mode; + + dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL); + if (!dw_plat_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + dw_plat_pcie->pci = pci; + dw_plat_pcie->mode = mode; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + pci->dbi_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + platform_set_drvdata(pdev, dw_plat_pcie); + + switch (dw_plat_pcie->mode) { + case DW_PCIE_RC_TYPE: + if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST)) + return -ENODEV; + + ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev); + if (ret < 0) + return ret; + break; + case DW_PCIE_EP_TYPE: + if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP)) + return -ENODEV; + + ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev); + if (ret < 0) + return ret; + break; + default: + dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode); + } + + return 0; +} + +static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = { + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = { + .mode = DW_PCIE_EP_TYPE, +}; + +static const struct of_device_id dw_plat_pcie_of_match[] = { + { + .compatible = "snps,dw-pcie", + .data = &dw_plat_pcie_rc_of_data, + }, + { + .compatible = "snps,dw-pcie-ep", + .data = &dw_plat_pcie_ep_of_data, + }, + {}, +}; + +static struct platform_driver dw_plat_pcie_driver = { + .driver = { + .name = "dw-pcie", + .of_match_table = dw_plat_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = dw_plat_pcie_probe, +}; +builtin_platform_driver(dw_plat_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c new file mode 100644 index 000000000..2153956a0 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare PCIe host controller driver + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Jingoo Han <jg1.han@samsung.com> + */ + +#include <linux/delay.h> +#include <linux/of.h> +#include <linux/types.h> + +#include "pcie-designware.h" + +/* PCIe Port Logic registers */ +#define PLR_OFFSET 0x700 +#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c) +#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4) +#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29) + +int dw_pcie_read(void __iomem *addr, int size, u32 *val) +{ + if ((uintptr_t)addr & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (size == 4) { + *val = readl(addr); + } else if (size == 2) { + *val = readw(addr); + } else if (size == 1) { + *val = readb(addr); + } else { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + return PCIBIOS_SUCCESSFUL; +} + +int dw_pcie_write(void __iomem *addr, int size, u32 val) +{ + if ((uintptr_t)addr & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (size == 4) + writel(val, addr); + else if (size == 2) + writew(val, addr); + else if (size == 1) + writeb(val, addr); + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size) +{ + int ret; + u32 val; + + if (pci->ops->read_dbi) + return pci->ops->read_dbi(pci, base, reg, size); + + ret = dw_pcie_read(base + reg, size, &val); + if (ret) + dev_err(pci->dev, "Read DBI address failed\n"); + + return val; +} + +void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size, u32 val) +{ + int ret; + + if (pci->ops->write_dbi) { + pci->ops->write_dbi(pci, base, reg, size, val); + return; + } + + ret = dw_pcie_write(base + reg, size, val); + if (ret) + dev_err(pci->dev, "Write DBI address failed\n"); +} + +static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) +{ + u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); + + return dw_pcie_readl_dbi(pci, offset + reg); +} + +static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, + u32 val) +{ + u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); + + dw_pcie_writel_dbi(pci, offset + reg, val); +} + +static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, + int type, u64 cpu_addr, + u64 pci_addr, u32 size) +{ + u32 retries, val; + + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, + lower_32_bits(cpu_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, + upper_32_bits(cpu_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT, + lower_32_bits(cpu_addr + size - 1)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, + lower_32_bits(pci_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, + upper_32_bits(pci_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, + type); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_ENABLE); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_ob_unroll(pci, index, + PCIE_ATU_UNR_REGION_CTRL2); + if (val & PCIE_ATU_ENABLE) + return; + + mdelay(LINK_WAIT_IATU); + } + dev_err(pci->dev, "Outbound iATU is not being enabled\n"); +} + +void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, + u64 cpu_addr, u64 pci_addr, u32 size) +{ + u32 retries, val; + + if (pci->ops->cpu_addr_fixup) + cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); + + if (pci->iatu_unroll_enabled) { + dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr, + pci_addr, size); + return; + } + + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, + PCIE_ATU_REGION_OUTBOUND | index); + dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, + lower_32_bits(cpu_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, + upper_32_bits(cpu_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, + lower_32_bits(cpu_addr + size - 1)); + dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, + lower_32_bits(pci_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, + upper_32_bits(pci_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); + if (val & PCIE_ATU_ENABLE) + return; + + mdelay(LINK_WAIT_IATU); + } + dev_err(pci->dev, "Outbound iATU is not being enabled\n"); +} + +static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) +{ + u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); + + return dw_pcie_readl_dbi(pci, offset + reg); +} + +static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, + u32 val) +{ + u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); + + dw_pcie_writel_dbi(pci, offset + reg, val); +} + +static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, + int bar, u64 cpu_addr, + enum dw_pcie_as_type as_type) +{ + int type; + u32 retries, val; + + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, + lower_32_bits(cpu_addr)); + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, + upper_32_bits(cpu_addr)); + + switch (as_type) { + case DW_PCIE_AS_MEM: + type = PCIE_ATU_TYPE_MEM; + break; + case DW_PCIE_AS_IO: + type = PCIE_ATU_TYPE_IO; + break; + default: + return -EINVAL; + } + + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type); + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_ENABLE | + PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_ib_unroll(pci, index, + PCIE_ATU_UNR_REGION_CTRL2); + if (val & PCIE_ATU_ENABLE) + return 0; + + mdelay(LINK_WAIT_IATU); + } + dev_err(pci->dev, "Inbound iATU is not being enabled\n"); + + return -EBUSY; +} + +int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, + u64 cpu_addr, enum dw_pcie_as_type as_type) +{ + int type; + u32 retries, val; + + if (pci->iatu_unroll_enabled) + return dw_pcie_prog_inbound_atu_unroll(pci, index, bar, + cpu_addr, as_type); + + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | + index); + dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); + + switch (as_type) { + case DW_PCIE_AS_MEM: + type = PCIE_ATU_TYPE_MEM; + break; + case DW_PCIE_AS_IO: + type = PCIE_ATU_TYPE_IO; + break; + default: + return -EINVAL; + } + + dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE + | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); + if (val & PCIE_ATU_ENABLE) + return 0; + + mdelay(LINK_WAIT_IATU); + } + dev_err(pci->dev, "Inbound iATU is not being enabled\n"); + + return -EBUSY; +} + +void dw_pcie_disable_atu(struct dw_pcie *pci, int index, + enum dw_pcie_region_type type) +{ + int region; + + switch (type) { + case DW_PCIE_REGION_INBOUND: + region = PCIE_ATU_REGION_INBOUND; + break; + case DW_PCIE_REGION_OUTBOUND: + region = PCIE_ATU_REGION_OUTBOUND; + break; + default: + return; + } + + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE); +} + +int dw_pcie_wait_for_link(struct dw_pcie *pci) +{ + int retries; + + /* Check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (dw_pcie_link_up(pci)) { + dev_info(pci->dev, "Link up\n"); + return 0; + } + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); + } + + dev_err(pci->dev, "Phy link never came up\n"); + + return -ETIMEDOUT; +} + +int dw_pcie_link_up(struct dw_pcie *pci) +{ + u32 val; + + if (pci->ops->link_up) + return pci->ops->link_up(pci); + + val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1); + return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) && + (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING))); +} + +void dw_pcie_setup(struct dw_pcie *pci) +{ + int ret; + u32 val; + u32 lanes; + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + + ret = of_property_read_u32(np, "num-lanes", &lanes); + if (ret) + lanes = 0; + + /* Set the number of lanes */ + val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); + val &= ~PORT_LINK_MODE_MASK; + switch (lanes) { + case 1: + val |= PORT_LINK_MODE_1_LANES; + break; + case 2: + val |= PORT_LINK_MODE_2_LANES; + break; + case 4: + val |= PORT_LINK_MODE_4_LANES; + break; + case 8: + val |= PORT_LINK_MODE_8_LANES; + break; + default: + dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes); + return; + } + dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); + + /* Set link width speed control register */ + val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + val &= ~PORT_LOGIC_LINK_WIDTH_MASK; + switch (lanes) { + case 1: + val |= PORT_LOGIC_LINK_WIDTH_1_LANES; + break; + case 2: + val |= PORT_LOGIC_LINK_WIDTH_2_LANES; + break; + case 4: + val |= PORT_LOGIC_LINK_WIDTH_4_LANES; + break; + case 8: + val |= PORT_LOGIC_LINK_WIDTH_8_LANES; + break; + } + dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); +} diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h new file mode 100644 index 000000000..14dcf6646 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -0,0 +1,392 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Synopsys DesignWare PCIe host controller driver + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Jingoo Han <jg1.han@samsung.com> + */ + +#ifndef _PCIE_DESIGNWARE_H +#define _PCIE_DESIGNWARE_H + +#include <linux/dma-mapping.h> +#include <linux/irq.h> +#include <linux/msi.h> +#include <linux/pci.h> + +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> + +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + +/* Parameters for the waiting for iATU enabled routine */ +#define LINK_WAIT_MAX_IATU_RETRIES 5 +#define LINK_WAIT_IATU 9 + +/* Synopsys-specific PCIe configuration registers */ +#define PCIE_PORT_LINK_CONTROL 0x710 +#define PORT_LINK_MODE_MASK (0x3f << 16) +#define PORT_LINK_MODE_1_LANES (0x1 << 16) +#define PORT_LINK_MODE_2_LANES (0x3 << 16) +#define PORT_LINK_MODE_4_LANES (0x7 << 16) +#define PORT_LINK_MODE_8_LANES (0xf << 16) + +#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C +#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) +#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) +#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) +#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) +#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) +#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) + +#define PCIE_MSI_ADDR_LO 0x820 +#define PCIE_MSI_ADDR_HI 0x824 +#define PCIE_MSI_INTR0_ENABLE 0x828 +#define PCIE_MSI_INTR0_MASK 0x82C +#define PCIE_MSI_INTR0_STATUS 0x830 + +#define PCIE_ATU_VIEWPORT 0x900 +#define PCIE_ATU_REGION_INBOUND (0x1 << 31) +#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) +#define PCIE_ATU_REGION_INDEX2 (0x2 << 0) +#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) +#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) +#define PCIE_ATU_CR1 0x904 +#define PCIE_ATU_TYPE_MEM (0x0 << 0) +#define PCIE_ATU_TYPE_IO (0x2 << 0) +#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) +#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) +#define PCIE_ATU_CR2 0x908 +#define PCIE_ATU_ENABLE (0x1 << 31) +#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) +#define PCIE_ATU_LOWER_BASE 0x90C +#define PCIE_ATU_UPPER_BASE 0x910 +#define PCIE_ATU_LIMIT 0x914 +#define PCIE_ATU_LOWER_TARGET 0x918 +#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) +#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) +#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) +#define PCIE_ATU_UPPER_TARGET 0x91C + +#define PCIE_MISC_CONTROL_1_OFF 0x8BC +#define PCIE_DBI_RO_WR_EN (0x1 << 0) + +/* + * iATU Unroll-specific register definitions + * From 4.80 core version the address translation will be made by unroll + */ +#define PCIE_ATU_UNR_REGION_CTRL1 0x00 +#define PCIE_ATU_UNR_REGION_CTRL2 0x04 +#define PCIE_ATU_UNR_LOWER_BASE 0x08 +#define PCIE_ATU_UNR_UPPER_BASE 0x0C +#define PCIE_ATU_UNR_LIMIT 0x10 +#define PCIE_ATU_UNR_LOWER_TARGET 0x14 +#define PCIE_ATU_UNR_UPPER_TARGET 0x18 + +/* Register address builder */ +#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ + ((0x3 << 20) | ((region) << 9)) + +#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ + ((0x3 << 20) | ((region) << 9) | (0x1 << 8)) + +#define MAX_MSI_IRQS 256 +#define MAX_MSI_IRQS_PER_CTRL 32 +#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL) +#define MSI_REG_CTRL_BLOCK_SIZE 12 +#define MSI_DEF_NUM_VECTORS 32 + +/* Maximum number of inbound/outbound iATUs */ +#define MAX_IATU_IN 256 +#define MAX_IATU_OUT 256 + +struct pcie_port; +struct dw_pcie; +struct dw_pcie_ep; + +enum dw_pcie_region_type { + DW_PCIE_REGION_UNKNOWN, + DW_PCIE_REGION_INBOUND, + DW_PCIE_REGION_OUTBOUND, +}; + +enum dw_pcie_device_mode { + DW_PCIE_UNKNOWN_TYPE, + DW_PCIE_EP_TYPE, + DW_PCIE_LEG_EP_TYPE, + DW_PCIE_RC_TYPE, +}; + +struct dw_pcie_host_ops { + int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); + int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val); + int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 *val); + int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 val); + int (*host_init)(struct pcie_port *pp); + void (*msi_set_irq)(struct pcie_port *pp, int irq); + void (*msi_clear_irq)(struct pcie_port *pp, int irq); + phys_addr_t (*get_msi_addr)(struct pcie_port *pp); + u32 (*get_msi_data)(struct pcie_port *pp, int pos); + void (*scan_bus)(struct pcie_port *pp); + void (*set_num_vectors)(struct pcie_port *pp); + int (*msi_host_init)(struct pcie_port *pp); + void (*msi_irq_ack)(int irq, struct pcie_port *pp); +}; + +struct pcie_port { + u8 root_bus_nr; + u64 cfg0_base; + void __iomem *va_cfg0_base; + u32 cfg0_size; + u64 cfg1_base; + void __iomem *va_cfg1_base; + u32 cfg1_size; + resource_size_t io_base; + phys_addr_t io_bus_addr; + u32 io_size; + u64 mem_base; + phys_addr_t mem_bus_addr; + u32 mem_size; + struct resource *cfg; + struct resource *io; + struct resource *mem; + struct resource *busn; + int irq; + const struct dw_pcie_host_ops *ops; + int msi_irq; + struct irq_domain *irq_domain; + struct irq_domain *msi_domain; + dma_addr_t msi_data; + struct page *msi_page; + u32 num_vectors; + u32 irq_status[MAX_MSI_CTRLS]; + raw_spinlock_t lock; + DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); +}; + +enum dw_pcie_as_type { + DW_PCIE_AS_UNKNOWN, + DW_PCIE_AS_MEM, + DW_PCIE_AS_IO, +}; + +struct dw_pcie_ep_ops { + void (*ep_init)(struct dw_pcie_ep *ep); + int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, u16 interrupt_num); +}; + +struct dw_pcie_ep { + struct pci_epc *epc; + struct dw_pcie_ep_ops *ops; + phys_addr_t phys_base; + size_t addr_size; + size_t page_size; + u8 bar_to_atu[6]; + phys_addr_t *outbound_addr; + unsigned long *ib_window_map; + unsigned long *ob_window_map; + u32 num_ib_windows; + u32 num_ob_windows; + void __iomem *msi_mem; + phys_addr_t msi_mem_phys; + u8 msi_cap; /* MSI capability offset */ + u8 msix_cap; /* MSI-X capability offset */ +}; + +struct dw_pcie_ops { + u64 (*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr); + u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, + size_t size); + void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, + size_t size, u32 val); + int (*link_up)(struct dw_pcie *pcie); + int (*start_link)(struct dw_pcie *pcie); + void (*stop_link)(struct dw_pcie *pcie); +}; + +struct dw_pcie { + struct device *dev; + void __iomem *dbi_base; + void __iomem *dbi_base2; + u32 num_viewport; + u8 iatu_unroll_enabled; + struct pcie_port pp; + struct dw_pcie_ep ep; + const struct dw_pcie_ops *ops; +}; + +#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) + +#define to_dw_pcie_from_ep(endpoint) \ + container_of((endpoint), struct dw_pcie, ep) + +int dw_pcie_read(void __iomem *addr, int size, u32 *val); +int dw_pcie_write(void __iomem *addr, int size, u32 val); + +u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size); +void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size, u32 val); +int dw_pcie_link_up(struct dw_pcie *pci); +int dw_pcie_wait_for_link(struct dw_pcie *pci); +void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, + int type, u64 cpu_addr, u64 pci_addr, + u32 size); +int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, + u64 cpu_addr, enum dw_pcie_as_type as_type); +void dw_pcie_disable_atu(struct dw_pcie *pci, int index, + enum dw_pcie_region_type type); +void dw_pcie_setup(struct dw_pcie *pci); + +static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) +{ + __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val); +} + +static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4); +} + +static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val) +{ + __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val); +} + +static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2); +} + +static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val) +{ + __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val); +} + +static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1); +} + +static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) +{ + __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val); +} + +static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); +} + +static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci) +{ + u32 reg; + u32 val; + + reg = PCIE_MISC_CONTROL_1_OFF; + val = dw_pcie_readl_dbi(pci, reg); + val |= PCIE_DBI_RO_WR_EN; + dw_pcie_writel_dbi(pci, reg, val); +} + +static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci) +{ + u32 reg; + u32 val; + + reg = PCIE_MISC_CONTROL_1_OFF; + val = dw_pcie_readl_dbi(pci, reg); + val &= ~PCIE_DBI_RO_WR_EN; + dw_pcie_writel_dbi(pci, reg, val); +} + +#ifdef CONFIG_PCIE_DW_HOST +irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); +void dw_pcie_msi_init(struct pcie_port *pp); +void dw_pcie_free_msi(struct pcie_port *pp); +void dw_pcie_setup_rc(struct pcie_port *pp); +int dw_pcie_host_init(struct pcie_port *pp); +int dw_pcie_allocate_domains(struct pcie_port *pp); +#else +static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) +{ + return IRQ_NONE; +} + +static inline void dw_pcie_msi_init(struct pcie_port *pp) +{ +} + +static inline void dw_pcie_free_msi(struct pcie_port *pp) +{ +} + +static inline void dw_pcie_setup_rc(struct pcie_port *pp) +{ +} + +static inline int dw_pcie_host_init(struct pcie_port *pp) +{ + return 0; +} + +static inline int dw_pcie_allocate_domains(struct pcie_port *pp) +{ + return 0; +} +#endif + +#ifdef CONFIG_PCIE_DW_EP +void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); +int dw_pcie_ep_init(struct dw_pcie_ep *ep); +void dw_pcie_ep_exit(struct dw_pcie_ep *ep); +int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no); +int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, + u8 interrupt_num); +int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, + u16 interrupt_num); +void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar); +#else +static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) +{ +} + +static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep) +{ + return 0; +} + +static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) +{ +} + +static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) +{ + return 0; +} + +static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, + u8 interrupt_num) +{ + return 0; +} + +static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, + u16 interrupt_num) +{ + return 0; +} + +static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) +{ +} +#endif +#endif /* _PCIE_DESIGNWARE_H */ diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c new file mode 100644 index 000000000..6d9e1b2b8 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-hisi.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for HiSilicon SoCs + * + * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com + * + * Authors: Zhou Wang <wangzhou1@hisilicon.com> + * Dacai Zhu <zhudacai@hisilicon.com> + * Gabriele Paoloni <gabriele.paoloni@huawei.com> + */ +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/mfd/syscon.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/pci.h> +#include <linux/pci-acpi.h> +#include <linux/pci-ecam.h> +#include <linux/regmap.h> +#include "../../pci.h" + +#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) + +static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) +{ + struct pci_config_window *cfg = bus->sysdata; + int dev = PCI_SLOT(devfn); + + if (bus->number == cfg->busr.start) { + /* access only one slot on each root port */ + if (dev > 0) + return PCIBIOS_DEVICE_NOT_FOUND; + else + return pci_generic_config_read32(bus, devfn, where, + size, val); + } + + return pci_generic_config_read(bus, devfn, where, size, val); +} + +static int hisi_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct pci_config_window *cfg = bus->sysdata; + int dev = PCI_SLOT(devfn); + + if (bus->number == cfg->busr.start) { + /* access only one slot on each root port */ + if (dev > 0) + return PCIBIOS_DEVICE_NOT_FOUND; + else + return pci_generic_config_write32(bus, devfn, where, + size, val); + } + + return pci_generic_config_write(bus, devfn, where, size, val); +} + +static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct pci_config_window *cfg = bus->sysdata; + void __iomem *reg_base = cfg->priv; + + if (bus->number == cfg->busr.start) + return reg_base + where; + else + return pci_ecam_map_bus(bus, devfn, where); +} + +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) + +static int hisi_pcie_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct acpi_device *adev = to_acpi_device(dev); + struct acpi_pci_root *root = acpi_driver_data(adev); + struct resource *res; + void __iomem *reg_base; + int ret; + + /* + * Retrieve RC base and size from a HISI0081 device with _UID + * matching our segment. + */ + res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res); + if (ret) { + dev_err(dev, "can't get rc base address\n"); + return -ENOMEM; + } + + reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); + if (!reg_base) + return -ENOMEM; + + cfg->priv = reg_base; + return 0; +} + +struct pci_ecam_ops hisi_pcie_ops = { + .bus_shift = 20, + .init = hisi_pcie_init, + .pci_ops = { + .map_bus = hisi_pcie_map_bus, + .read = hisi_pcie_rd_conf, + .write = hisi_pcie_wr_conf, + } +}; + +#endif + +#ifdef CONFIG_PCI_HISI + +#include "pcie-designware.h" + +#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 +#define PCIE_HIP06_CTRL_OFF 0x1000 +#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c) +#define PCIE_LTSSM_LINKUP_STATE 0x11 +#define PCIE_LTSSM_STATE_MASK 0x3F + +#define to_hisi_pcie(x) dev_get_drvdata((x)->dev) + +struct hisi_pcie; + +struct pcie_soc_ops { + int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie); +}; + +struct hisi_pcie { + struct dw_pcie *pci; + struct regmap *subctrl; + u32 port_id; + const struct pcie_soc_ops *soc_ops; +}; + +/* HipXX PCIe host only supports 32-bit config access */ +static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, + u32 *val) +{ + u32 reg; + u32 reg_val; + void *walker = ®_val; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + walker += (where & 0x3); + reg = where & ~0x3; + reg_val = dw_pcie_readl_dbi(pci, reg); + + if (size == 1) + *val = *(u8 __force *) walker; + else if (size == 2) + *val = *(u16 __force *) walker; + else if (size == 4) + *val = reg_val; + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +/* HipXX PCIe host only supports 32-bit config access */ +static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, + u32 val) +{ + u32 reg_val; + u32 reg; + void *walker = ®_val; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + walker += (where & 0x3); + reg = where & ~0x3; + if (size == 4) + dw_pcie_writel_dbi(pci, reg, val); + else if (size == 2) { + reg_val = dw_pcie_readl_dbi(pci, reg); + *(u16 __force *) walker = val; + dw_pcie_writel_dbi(pci, reg, reg_val); + } else if (size == 1) { + reg_val = dw_pcie_readl_dbi(pci, reg); + *(u8 __force *) walker = val; + dw_pcie_writel_dbi(pci, reg, reg_val); + } else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie) +{ + u32 val; + + regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG + + 0x100 * hisi_pcie->port_id, &val); + + return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); +} + +static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie) +{ + struct dw_pcie *pci = hisi_pcie->pci; + u32 val; + + val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4); + + return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); +} + +static int hisi_pcie_link_up(struct dw_pcie *pci) +{ + struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci); + + return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); +} + +static const struct dw_pcie_host_ops hisi_pcie_host_ops = { + .rd_own_conf = hisi_pcie_cfg_read, + .wr_own_conf = hisi_pcie_cfg_write, +}; + +static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = hisi_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + u32 port_id; + + if (of_property_read_u32(dev->of_node, "port-id", &port_id)) { + dev_err(dev, "failed to read port-id\n"); + return -EINVAL; + } + if (port_id > 3) { + dev_err(dev, "Invalid port-id: %d\n", port_id); + return -EINVAL; + } + hisi_pcie->port_id = port_id; + + pp->ops = &hisi_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = hisi_pcie_link_up, +}; + +static int hisi_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct hisi_pcie *hisi_pcie; + struct resource *reg; + int ret; + + hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL); + if (!hisi_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + hisi_pcie->pci = pci; + + hisi_pcie->soc_ops = of_device_get_match_data(dev); + + hisi_pcie->subctrl = + syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl"); + if (IS_ERR(hisi_pcie->subctrl)) { + dev_err(dev, "cannot get subctrl base\n"); + return PTR_ERR(hisi_pcie->subctrl); + } + + reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + platform_set_drvdata(pdev, hisi_pcie); + + ret = hisi_add_pcie_port(hisi_pcie, pdev); + if (ret) + return ret; + + return 0; +} + +static struct pcie_soc_ops hip05_ops = { + &hisi_pcie_link_up_hip05 +}; + +static struct pcie_soc_ops hip06_ops = { + &hisi_pcie_link_up_hip06 +}; + +static const struct of_device_id hisi_pcie_of_match[] = { + { + .compatible = "hisilicon,hip05-pcie", + .data = (void *) &hip05_ops, + }, + { + .compatible = "hisilicon,hip06-pcie", + .data = (void *) &hip06_ops, + }, + {}, +}; + +static struct platform_driver hisi_pcie_driver = { + .probe = hisi_pcie_probe, + .driver = { + .name = "hisi-pcie", + .of_match_table = hisi_pcie_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(hisi_pcie_driver); + +static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct pci_ecam_ops *ops; + + ops = (struct pci_ecam_ops *)of_device_get_match_data(dev); + return pci_host_common_probe(pdev, ops); +} + +static int hisi_pcie_platform_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + void __iomem *reg_base; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(dev, "missing \"reg[1]\"property\n"); + return -EINVAL; + } + + reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); + if (!reg_base) + return -ENOMEM; + + cfg->priv = reg_base; + return 0; +} + +struct pci_ecam_ops hisi_pcie_platform_ops = { + .bus_shift = 20, + .init = hisi_pcie_platform_init, + .pci_ops = { + .map_bus = hisi_pcie_map_bus, + .read = hisi_pcie_rd_conf, + .write = hisi_pcie_wr_conf, + } +}; + +static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = { + { + .compatible = "hisilicon,hip06-pcie-ecam", + .data = (void *) &hisi_pcie_platform_ops, + }, + { + .compatible = "hisilicon,hip07-pcie-ecam", + .data = (void *) &hisi_pcie_platform_ops, + }, + {}, +}; + +static struct platform_driver hisi_pcie_almost_ecam_driver = { + .probe = hisi_pcie_almost_ecam_probe, + .driver = { + .name = "hisi-pcie-almost-ecam", + .of_match_table = hisi_pcie_almost_ecam_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(hisi_pcie_almost_ecam_driver); + +#endif +#endif diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c new file mode 100644 index 000000000..a3489839a --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for HiSilicon STB SoCs + * + * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com + * + * Authors: Ruqiang Ju <juruqiang@hisilicon.com> + * Jianguo Sun <sunjianguo1@huawei.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/reset.h> + +#include "pcie-designware.h" + +#define to_histb_pcie(x) dev_get_drvdata((x)->dev) + +#define PCIE_SYS_CTRL0 0x0000 +#define PCIE_SYS_CTRL1 0x0004 +#define PCIE_SYS_CTRL7 0x001C +#define PCIE_SYS_CTRL13 0x0034 +#define PCIE_SYS_CTRL15 0x003C +#define PCIE_SYS_CTRL16 0x0040 +#define PCIE_SYS_CTRL17 0x0044 + +#define PCIE_SYS_STAT0 0x0100 +#define PCIE_SYS_STAT4 0x0110 + +#define PCIE_RDLH_LINK_UP BIT(5) +#define PCIE_XMLH_LINK_UP BIT(15) +#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) +#define PCIE_APP_LTSSM_ENABLE BIT(11) + +#define PCIE_DEVICE_TYPE_MASK GENMASK(31, 28) +#define PCIE_WM_EP 0 +#define PCIE_WM_LEGACY BIT(1) +#define PCIE_WM_RC BIT(30) + +#define PCIE_LTSSM_STATE_MASK GENMASK(5, 0) +#define PCIE_LTSSM_STATE_ACTIVE 0x11 + +struct histb_pcie { + struct dw_pcie *pci; + struct clk *aux_clk; + struct clk *pipe_clk; + struct clk *sys_clk; + struct clk *bus_clk; + struct phy *phy; + struct reset_control *soft_reset; + struct reset_control *sys_reset; + struct reset_control *bus_reset; + void __iomem *ctrl; + int reset_gpio; + struct regulator *vpcie; +}; + +static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg) +{ + return readl(histb_pcie->ctrl + reg); +} + +static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val) +{ + writel(val, histb_pcie->ctrl + reg); +} + +static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct histb_pcie *hipcie = to_histb_pcie(pci); + u32 val; + + val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0); + if (enable) + val |= PCIE_ELBI_SLV_DBI_ENABLE; + else + val &= ~PCIE_ELBI_SLV_DBI_ENABLE; + histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val); +} + +static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct histb_pcie *hipcie = to_histb_pcie(pci); + u32 val; + + val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1); + if (enable) + val |= PCIE_ELBI_SLV_DBI_ENABLE; + else + val &= ~PCIE_ELBI_SLV_DBI_ENABLE; + histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val); +} + +static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size) +{ + u32 val; + + histb_pcie_dbi_r_mode(&pci->pp, true); + dw_pcie_read(base + reg, size, &val); + histb_pcie_dbi_r_mode(&pci->pp, false); + + return val; +} + +static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size, u32 val) +{ + histb_pcie_dbi_w_mode(&pci->pp, true); + dw_pcie_write(base + reg, size, val); + histb_pcie_dbi_w_mode(&pci->pp, false); +} + +static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where, + int size, u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + int ret; + + histb_pcie_dbi_r_mode(pp, true); + ret = dw_pcie_read(pci->dbi_base + where, size, val); + histb_pcie_dbi_r_mode(pp, false); + + return ret; +} + +static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where, + int size, u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + int ret; + + histb_pcie_dbi_w_mode(pp, true); + ret = dw_pcie_write(pci->dbi_base + where, size, val); + histb_pcie_dbi_w_mode(pp, false); + + return ret; +} + +static int histb_pcie_link_up(struct dw_pcie *pci) +{ + struct histb_pcie *hipcie = to_histb_pcie(pci); + u32 regval; + u32 status; + + regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0); + status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4); + status &= PCIE_LTSSM_STATE_MASK; + if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) && + (status == PCIE_LTSSM_STATE_ACTIVE)) + return 1; + + return 0; +} + +static int histb_pcie_establish_link(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct histb_pcie *hipcie = to_histb_pcie(pci); + u32 regval; + + if (dw_pcie_link_up(pci)) { + dev_info(pci->dev, "Link already up\n"); + return 0; + } + + /* PCIe RC work mode */ + regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0); + regval &= ~PCIE_DEVICE_TYPE_MASK; + regval |= PCIE_WM_RC; + histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval); + + /* setup root complex */ + dw_pcie_setup_rc(pp); + + /* assert LTSSM enable */ + regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7); + regval |= PCIE_APP_LTSSM_ENABLE; + histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval); + + return dw_pcie_wait_for_link(pci); +} + +static int histb_pcie_host_init(struct pcie_port *pp) +{ + histb_pcie_establish_link(pp); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + + return 0; +} + +static struct dw_pcie_host_ops histb_pcie_host_ops = { + .rd_own_conf = histb_pcie_rd_own_conf, + .wr_own_conf = histb_pcie_wr_own_conf, + .host_init = histb_pcie_host_init, +}; + +static void histb_pcie_host_disable(struct histb_pcie *hipcie) +{ + reset_control_assert(hipcie->soft_reset); + reset_control_assert(hipcie->sys_reset); + reset_control_assert(hipcie->bus_reset); + + clk_disable_unprepare(hipcie->aux_clk); + clk_disable_unprepare(hipcie->pipe_clk); + clk_disable_unprepare(hipcie->sys_clk); + clk_disable_unprepare(hipcie->bus_clk); + + if (gpio_is_valid(hipcie->reset_gpio)) + gpio_set_value_cansleep(hipcie->reset_gpio, 0); + + if (hipcie->vpcie) + regulator_disable(hipcie->vpcie); +} + +static int histb_pcie_host_enable(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct histb_pcie *hipcie = to_histb_pcie(pci); + struct device *dev = pci->dev; + int ret; + + /* power on PCIe device if have */ + if (hipcie->vpcie) { + ret = regulator_enable(hipcie->vpcie); + if (ret) { + dev_err(dev, "failed to enable regulator: %d\n", ret); + return ret; + } + } + + if (gpio_is_valid(hipcie->reset_gpio)) + gpio_set_value_cansleep(hipcie->reset_gpio, 1); + + ret = clk_prepare_enable(hipcie->bus_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable bus clk\n"); + goto err_bus_clk; + } + + ret = clk_prepare_enable(hipcie->sys_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable sys clk\n"); + goto err_sys_clk; + } + + ret = clk_prepare_enable(hipcie->pipe_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable pipe clk\n"); + goto err_pipe_clk; + } + + ret = clk_prepare_enable(hipcie->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clk\n"); + goto err_aux_clk; + } + + reset_control_assert(hipcie->soft_reset); + reset_control_deassert(hipcie->soft_reset); + + reset_control_assert(hipcie->sys_reset); + reset_control_deassert(hipcie->sys_reset); + + reset_control_assert(hipcie->bus_reset); + reset_control_deassert(hipcie->bus_reset); + + return 0; + +err_aux_clk: + clk_disable_unprepare(hipcie->pipe_clk); +err_pipe_clk: + clk_disable_unprepare(hipcie->sys_clk); +err_sys_clk: + clk_disable_unprepare(hipcie->bus_clk); +err_bus_clk: + if (hipcie->vpcie) + regulator_disable(hipcie->vpcie); + + return ret; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .read_dbi = histb_pcie_read_dbi, + .write_dbi = histb_pcie_write_dbi, + .link_up = histb_pcie_link_up, +}; + +static int histb_pcie_probe(struct platform_device *pdev) +{ + struct histb_pcie *hipcie; + struct dw_pcie *pci; + struct pcie_port *pp; + struct resource *res; + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + enum of_gpio_flags of_flags; + unsigned long flag = GPIOF_DIR_OUT; + int ret; + + hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL); + if (!hipcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + hipcie->pci = pci; + pp = &pci->pp; + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); + hipcie->ctrl = devm_ioremap_resource(dev, res); + if (IS_ERR(hipcie->ctrl)) { + dev_err(dev, "cannot get control reg base\n"); + return PTR_ERR(hipcie->ctrl); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi"); + pci->dbi_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base)) { + dev_err(dev, "cannot get rc-dbi base\n"); + return PTR_ERR(pci->dbi_base); + } + + hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie"); + if (IS_ERR(hipcie->vpcie)) { + if (PTR_ERR(hipcie->vpcie) != -ENODEV) + return PTR_ERR(hipcie->vpcie); + hipcie->vpcie = NULL; + } + + hipcie->reset_gpio = of_get_named_gpio_flags(np, + "reset-gpios", 0, &of_flags); + if (of_flags & OF_GPIO_ACTIVE_LOW) + flag |= GPIOF_ACTIVE_LOW; + if (gpio_is_valid(hipcie->reset_gpio)) { + ret = devm_gpio_request_one(dev, hipcie->reset_gpio, + flag, "PCIe device power control"); + if (ret) { + dev_err(dev, "unable to request gpio\n"); + return ret; + } + } + + hipcie->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(hipcie->aux_clk)) { + dev_err(dev, "Failed to get PCIe aux clk\n"); + return PTR_ERR(hipcie->aux_clk); + } + + hipcie->pipe_clk = devm_clk_get(dev, "pipe"); + if (IS_ERR(hipcie->pipe_clk)) { + dev_err(dev, "Failed to get PCIe pipe clk\n"); + return PTR_ERR(hipcie->pipe_clk); + } + + hipcie->sys_clk = devm_clk_get(dev, "sys"); + if (IS_ERR(hipcie->sys_clk)) { + dev_err(dev, "Failed to get PCIEe sys clk\n"); + return PTR_ERR(hipcie->sys_clk); + } + + hipcie->bus_clk = devm_clk_get(dev, "bus"); + if (IS_ERR(hipcie->bus_clk)) { + dev_err(dev, "Failed to get PCIe bus clk\n"); + return PTR_ERR(hipcie->bus_clk); + } + + hipcie->soft_reset = devm_reset_control_get(dev, "soft"); + if (IS_ERR(hipcie->soft_reset)) { + dev_err(dev, "couldn't get soft reset\n"); + return PTR_ERR(hipcie->soft_reset); + } + + hipcie->sys_reset = devm_reset_control_get(dev, "sys"); + if (IS_ERR(hipcie->sys_reset)) { + dev_err(dev, "couldn't get sys reset\n"); + return PTR_ERR(hipcie->sys_reset); + } + + hipcie->bus_reset = devm_reset_control_get(dev, "bus"); + if (IS_ERR(hipcie->bus_reset)) { + dev_err(dev, "couldn't get bus reset\n"); + return PTR_ERR(hipcie->bus_reset); + } + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq < 0) { + dev_err(dev, "Failed to get MSI IRQ\n"); + return pp->msi_irq; + } + } + + hipcie->phy = devm_phy_get(dev, "phy"); + if (IS_ERR(hipcie->phy)) { + dev_info(dev, "no pcie-phy found\n"); + hipcie->phy = NULL; + /* fall through here! + * if no pcie-phy found, phy init + * should be done under boot! + */ + } else { + phy_init(hipcie->phy); + } + + pp->ops = &histb_pcie_host_ops; + + platform_set_drvdata(pdev, hipcie); + + ret = histb_pcie_host_enable(pp); + if (ret) { + dev_err(dev, "failed to enable host\n"); + return ret; + } + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static int histb_pcie_remove(struct platform_device *pdev) +{ + struct histb_pcie *hipcie = platform_get_drvdata(pdev); + + histb_pcie_host_disable(hipcie); + + if (hipcie->phy) + phy_exit(hipcie->phy); + + return 0; +} + +static const struct of_device_id histb_pcie_of_match[] = { + { .compatible = "hisilicon,hi3798cv200-pcie", }, + {}, +}; +MODULE_DEVICE_TABLE(of, histb_pcie_of_match); + +static struct platform_driver histb_pcie_platform_driver = { + .probe = histb_pcie_probe, + .remove = histb_pcie_remove, + .driver = { + .name = "histb-pcie", + .of_match_table = histb_pcie_of_match, + }, +}; +module_platform_driver(histb_pcie_platform_driver); + +MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c new file mode 100644 index 000000000..9b5992962 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -0,0 +1,543 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Kirin Phone SoCs + * + * Copyright (C) 2017 Hilisicon Electronics Co., Ltd. + * http://www.huawei.com + * + * Author: Xiaowei Song <songxiaowei@huawei.com> + */ + +#include <linux/compiler.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/mfd/syscon.h> +#include <linux/of_address.h> +#include <linux/of_gpio.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/pci_regs.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/resource.h> +#include <linux/types.h> +#include "pcie-designware.h" + +#define to_kirin_pcie(x) dev_get_drvdata((x)->dev) + +#define REF_CLK_FREQ 100000000 + +/* PCIe ELBI registers */ +#define SOC_PCIECTRL_CTRL0_ADDR 0x000 +#define SOC_PCIECTRL_CTRL1_ADDR 0x004 +#define SOC_PCIEPHY_CTRL2_ADDR 0x008 +#define SOC_PCIEPHY_CTRL3_ADDR 0x00c +#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) + +/* info located in APB */ +#define PCIE_APP_LTSSM_ENABLE 0x01c +#define PCIE_APB_PHY_CTRL0 0x0 +#define PCIE_APB_PHY_CTRL1 0x4 +#define PCIE_APB_PHY_STATUS0 0x400 +#define PCIE_LINKUP_ENABLE (0x8020) +#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11) +#define PIPE_CLK_STABLE (0x1 << 19) +#define PHY_REF_PAD_BIT (0x1 << 8) +#define PHY_PWR_DOWN_BIT (0x1 << 22) +#define PHY_RST_ACK_BIT (0x1 << 16) + +/* info located in sysctrl */ +#define SCTRL_PCIE_CMOS_OFFSET 0x60 +#define SCTRL_PCIE_CMOS_BIT 0x10 +#define SCTRL_PCIE_ISO_OFFSET 0x44 +#define SCTRL_PCIE_ISO_BIT 0x30 +#define SCTRL_PCIE_HPCLK_OFFSET 0x190 +#define SCTRL_PCIE_HPCLK_BIT 0x184000 +#define SCTRL_PCIE_OE_OFFSET 0x14a +#define PCIE_DEBOUNCE_PARAM 0xF0F400 +#define PCIE_OE_BYPASS (0x3 << 28) + +/* peri_crg ctrl */ +#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88 +#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000 + +/* Time for delay */ +#define REF_2_PERST_MIN 20000 +#define REF_2_PERST_MAX 25000 +#define PERST_2_ACCESS_MIN 10000 +#define PERST_2_ACCESS_MAX 12000 +#define LINK_WAIT_MIN 900 +#define LINK_WAIT_MAX 1000 +#define PIPE_CLK_WAIT_MIN 550 +#define PIPE_CLK_WAIT_MAX 600 +#define TIME_CMOS_MIN 100 +#define TIME_CMOS_MAX 105 +#define TIME_PHY_PD_MIN 10 +#define TIME_PHY_PD_MAX 11 + +struct kirin_pcie { + struct dw_pcie *pci; + void __iomem *apb_base; + void __iomem *phy_base; + struct regmap *crgctrl; + struct regmap *sysctrl; + struct clk *apb_sys_clk; + struct clk *apb_phy_clk; + struct clk *phy_ref_clk; + struct clk *pcie_aclk; + struct clk *pcie_aux_clk; + int gpio_id_reset; +}; + +/* Registers in PCIeCTRL */ +static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie, + u32 val, u32 reg) +{ + writel(val, kirin_pcie->apb_base + reg); +} + +static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg) +{ + return readl(kirin_pcie->apb_base + reg); +} + +/* Registers in PCIePHY */ +static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie, + u32 val, u32 reg) +{ + writel(val, kirin_pcie->phy_base + reg); +} + +static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg) +{ + return readl(kirin_pcie->phy_base + reg); +} + +static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref"); + if (IS_ERR(kirin_pcie->phy_ref_clk)) + return PTR_ERR(kirin_pcie->phy_ref_clk); + + kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux"); + if (IS_ERR(kirin_pcie->pcie_aux_clk)) + return PTR_ERR(kirin_pcie->pcie_aux_clk); + + kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy"); + if (IS_ERR(kirin_pcie->apb_phy_clk)) + return PTR_ERR(kirin_pcie->apb_phy_clk); + + kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys"); + if (IS_ERR(kirin_pcie->apb_sys_clk)) + return PTR_ERR(kirin_pcie->apb_sys_clk); + + kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk"); + if (IS_ERR(kirin_pcie->pcie_aclk)) + return PTR_ERR(kirin_pcie->pcie_aclk); + + return 0; +} + +static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *apb; + struct resource *phy; + struct resource *dbi; + + apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb"); + kirin_pcie->apb_base = devm_ioremap_resource(dev, apb); + if (IS_ERR(kirin_pcie->apb_base)) + return PTR_ERR(kirin_pcie->apb_base); + + phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); + kirin_pcie->phy_base = devm_ioremap_resource(dev, phy); + if (IS_ERR(kirin_pcie->phy_base)) + return PTR_ERR(kirin_pcie->phy_base); + + dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi); + if (IS_ERR(kirin_pcie->pci->dbi_base)) + return PTR_ERR(kirin_pcie->pci->dbi_base); + + kirin_pcie->crgctrl = + syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl"); + if (IS_ERR(kirin_pcie->crgctrl)) + return PTR_ERR(kirin_pcie->crgctrl); + + kirin_pcie->sysctrl = + syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl"); + if (IS_ERR(kirin_pcie->sysctrl)) + return PTR_ERR(kirin_pcie->sysctrl); + + return 0; +} + +static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie) +{ + struct device *dev = kirin_pcie->pci->dev; + u32 reg_val; + + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); + reg_val &= ~PHY_REF_PAD_BIT; + kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); + + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0); + reg_val &= ~PHY_PWR_DOWN_BIT; + kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0); + usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX); + + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); + reg_val &= ~PHY_RST_ACK_BIT; + kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); + + usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX); + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); + if (reg_val & PIPE_CLK_STABLE) { + dev_err(dev, "PIPE clk is not stable\n"); + return -EINVAL; + } + + return 0; +} + +static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie) +{ + u32 val; + + regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val); + val |= PCIE_DEBOUNCE_PARAM; + val &= ~PCIE_OE_BYPASS; + regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val); +} + +static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable) +{ + int ret = 0; + + if (!enable) + goto close_clk; + + ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ); + if (ret) + return ret; + + ret = clk_prepare_enable(kirin_pcie->phy_ref_clk); + if (ret) + return ret; + + ret = clk_prepare_enable(kirin_pcie->apb_sys_clk); + if (ret) + goto apb_sys_fail; + + ret = clk_prepare_enable(kirin_pcie->apb_phy_clk); + if (ret) + goto apb_phy_fail; + + ret = clk_prepare_enable(kirin_pcie->pcie_aclk); + if (ret) + goto aclk_fail; + + ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk); + if (ret) + goto aux_clk_fail; + + return 0; + +close_clk: + clk_disable_unprepare(kirin_pcie->pcie_aux_clk); +aux_clk_fail: + clk_disable_unprepare(kirin_pcie->pcie_aclk); +aclk_fail: + clk_disable_unprepare(kirin_pcie->apb_phy_clk); +apb_phy_fail: + clk_disable_unprepare(kirin_pcie->apb_sys_clk); +apb_sys_fail: + clk_disable_unprepare(kirin_pcie->phy_ref_clk); + + return ret; +} + +static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie) +{ + int ret; + + /* Power supply for Host */ + regmap_write(kirin_pcie->sysctrl, + SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT); + usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX); + kirin_pcie_oe_enable(kirin_pcie); + + ret = kirin_pcie_clk_ctrl(kirin_pcie, true); + if (ret) + return ret; + + /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */ + regmap_write(kirin_pcie->sysctrl, + SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT); + regmap_write(kirin_pcie->crgctrl, + CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT); + regmap_write(kirin_pcie->sysctrl, + SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT); + + ret = kirin_pcie_phy_init(kirin_pcie); + if (ret) + goto close_clk; + + /* perst assert Endpoint */ + if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) { + usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX); + ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1); + if (ret) + goto close_clk; + usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX); + + return 0; + } + +close_clk: + kirin_pcie_clk_ctrl(kirin_pcie, false); + return ret; +} + +static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie, + bool on) +{ + u32 val; + + val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR); + if (on) + val = val | PCIE_ELBI_SLV_DBI_ENABLE; + else + val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; + + kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR); +} + +static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie, + bool on) +{ + u32 val; + + val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR); + if (on) + val = val | PCIE_ELBI_SLV_DBI_ENABLE; + else + val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; + + kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR); +} + +static int kirin_pcie_rd_own_conf(struct pcie_port *pp, + int where, int size, u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + int ret; + + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); + ret = dw_pcie_read(pci->dbi_base + where, size, val); + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); + + return ret; +} + +static int kirin_pcie_wr_own_conf(struct pcie_port *pp, + int where, int size, u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + int ret; + + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); + ret = dw_pcie_write(pci->dbi_base + where, size, val); + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); + + return ret; +} + +static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size) +{ + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + u32 ret; + + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); + dw_pcie_read(base + reg, size, &ret); + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); + + return ret; +} + +static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size, u32 val) +{ + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); + dw_pcie_write(base + reg, size, val); + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); +} + +static int kirin_pcie_link_up(struct dw_pcie *pci) +{ + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); + + if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE) + return 1; + + return 0; +} + +static int kirin_pcie_establish_link(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + struct device *dev = kirin_pcie->pci->dev; + int count = 0; + + if (kirin_pcie_link_up(pci)) + return 0; + + dw_pcie_setup_rc(pp); + + /* assert LTSSM enable */ + kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT, + PCIE_APP_LTSSM_ENABLE); + + /* check if the link is up or not */ + while (!kirin_pcie_link_up(pci)) { + usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); + count++; + if (count == 1000) { + dev_err(dev, "Link Fail\n"); + return -EINVAL; + } + } + + return 0; +} + +static int kirin_pcie_host_init(struct pcie_port *pp) +{ + kirin_pcie_establish_link(pp); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + + return 0; +} + +static struct dw_pcie_ops kirin_dw_pcie_ops = { + .read_dbi = kirin_pcie_read_dbi, + .write_dbi = kirin_pcie_write_dbi, + .link_up = kirin_pcie_link_up, +}; + +static const struct dw_pcie_host_ops kirin_pcie_host_ops = { + .rd_own_conf = kirin_pcie_rd_own_conf, + .wr_own_conf = kirin_pcie_wr_own_conf, + .host_init = kirin_pcie_host_init, +}; + +static int kirin_pcie_add_msi(struct dw_pcie *pci, + struct platform_device *pdev) +{ + int irq; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, + "failed to get MSI IRQ (%d)\n", irq); + return irq; + } + + pci->pp.msi_irq = irq; + } + + return 0; +} + +static int kirin_add_pcie_port(struct dw_pcie *pci, + struct platform_device *pdev) +{ + int ret; + + ret = kirin_pcie_add_msi(pci, pdev); + if (ret) + return ret; + + pci->pp.ops = &kirin_pcie_host_ops; + + return dw_pcie_host_init(&pci->pp); +} + +static int kirin_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct kirin_pcie *kirin_pcie; + struct dw_pcie *pci; + int ret; + + if (!dev->of_node) { + dev_err(dev, "NULL node\n"); + return -EINVAL; + } + + kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); + if (!kirin_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &kirin_dw_pcie_ops; + kirin_pcie->pci = pci; + + ret = kirin_pcie_get_clk(kirin_pcie, pdev); + if (ret) + return ret; + + ret = kirin_pcie_get_resource(kirin_pcie, pdev); + if (ret) + return ret; + + kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, + "reset-gpios", 0); + if (kirin_pcie->gpio_id_reset < 0) + return -ENODEV; + + ret = kirin_pcie_power_on(kirin_pcie); + if (ret) + return ret; + + platform_set_drvdata(pdev, kirin_pcie); + + return kirin_add_pcie_port(pci, pdev); +} + +static const struct of_device_id kirin_pcie_match[] = { + { .compatible = "hisilicon,kirin960-pcie" }, + {}, +}; + +static struct platform_driver kirin_pcie_driver = { + .probe = kirin_pcie_probe, + .driver = { + .name = "kirin-pcie", + .of_match_table = kirin_pcie_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(kirin_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c new file mode 100644 index 000000000..133fad284 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -0,0 +1,1374 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Qualcomm PCIe root complex driver + * + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * Copyright 2015 Linaro Limited. + * + * Author: Stanimir Varbanov <svarbanov@mm-sol.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include "pcie-designware.h" + +#define PCIE20_PARF_SYS_CTRL 0x00 +#define MST_WAKEUP_EN BIT(13) +#define SLV_WAKEUP_EN BIT(12) +#define MSTR_ACLK_CGC_DIS BIT(10) +#define SLV_ACLK_CGC_DIS BIT(9) +#define CORE_CLK_CGC_DIS BIT(6) +#define AUX_PWR_DET BIT(4) +#define L23_CLK_RMV_DIS BIT(2) +#define L1_CLK_RMV_DIS BIT(1) + +#define PCIE20_COMMAND_STATUS 0x04 +#define CMD_BME_VAL 0x4 +#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 +#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 + +#define PCIE20_PARF_PHY_CTRL 0x40 +#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) +#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) + +#define PCIE20_PARF_PHY_REFCLK 0x4C +#define PHY_REFCLK_SSP_EN BIT(16) +#define PHY_REFCLK_USE_PAD BIT(12) + +#define PCIE20_PARF_DBI_BASE_ADDR 0x168 +#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C +#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 +#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 +#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 +#define PCIE20_PARF_LTSSM 0x1B0 +#define PCIE20_PARF_SID_OFFSET 0x234 +#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C + +#define PCIE20_ELBI_SYS_CTRL 0x04 +#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) + +#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 +#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 +#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 +#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c +#define CFG_BRIDGE_SB_INIT BIT(0) + +#define PCIE20_CAP 0x70 +#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC) +#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11)) +#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14) +#define PCIE_CAP_LINK1_VAL 0x2FD7F + +#define PCIE20_PARF_Q2A_FLUSH 0x1AC + +#define PCIE20_MISC_CONTROL_1_REG 0x8BC +#define DBI_RO_WR_EN 1 + +#define PERST_DELAY_US 1000 +/* PARF registers */ +#define PCIE20_PARF_PCS_DEEMPH 0x34 +#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) + +#define PCIE20_PARF_PCS_SWING 0x38 +#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) +#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) + +#define PCIE20_PARF_CONFIG_BITS 0x50 +#define PHY_RX0_EQ(x) ((x) << 24) + +#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 +#define SLV_ADDR_SPACE_SZ 0x10000000 + +#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 +struct qcom_pcie_resources_2_1_0 { + struct clk *iface_clk; + struct clk *core_clk; + struct clk *phy_clk; + struct reset_control *pci_reset; + struct reset_control *axi_reset; + struct reset_control *ahb_reset; + struct reset_control *por_reset; + struct reset_control *phy_reset; + struct reset_control *ext_reset; + struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; +}; + +struct qcom_pcie_resources_1_0_0 { + struct clk *iface; + struct clk *aux; + struct clk *master_bus; + struct clk *slave_bus; + struct reset_control *core; + struct regulator *vdda; +}; + +#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 +struct qcom_pcie_resources_2_3_2 { + struct clk *aux_clk; + struct clk *master_clk; + struct clk *slave_clk; + struct clk *cfg_clk; + struct clk *pipe_clk; + struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; +}; + +struct qcom_pcie_resources_2_4_0 { + struct clk *aux_clk; + struct clk *master_clk; + struct clk *slave_clk; + struct reset_control *axi_m_reset; + struct reset_control *axi_s_reset; + struct reset_control *pipe_reset; + struct reset_control *axi_m_vmid_reset; + struct reset_control *axi_s_xpu_reset; + struct reset_control *parf_reset; + struct reset_control *phy_reset; + struct reset_control *axi_m_sticky_reset; + struct reset_control *pipe_sticky_reset; + struct reset_control *pwr_reset; + struct reset_control *ahb_reset; + struct reset_control *phy_ahb_reset; +}; + +struct qcom_pcie_resources_2_3_3 { + struct clk *iface; + struct clk *axi_m_clk; + struct clk *axi_s_clk; + struct clk *ahb_clk; + struct clk *aux_clk; + struct reset_control *rst[7]; +}; + +union qcom_pcie_resources { + struct qcom_pcie_resources_1_0_0 v1_0_0; + struct qcom_pcie_resources_2_1_0 v2_1_0; + struct qcom_pcie_resources_2_3_2 v2_3_2; + struct qcom_pcie_resources_2_3_3 v2_3_3; + struct qcom_pcie_resources_2_4_0 v2_4_0; +}; + +struct qcom_pcie; + +struct qcom_pcie_ops { + int (*get_resources)(struct qcom_pcie *pcie); + int (*init)(struct qcom_pcie *pcie); + int (*post_init)(struct qcom_pcie *pcie); + void (*deinit)(struct qcom_pcie *pcie); + void (*post_deinit)(struct qcom_pcie *pcie); + void (*ltssm_enable)(struct qcom_pcie *pcie); +}; + +struct qcom_pcie { + struct dw_pcie *pci; + void __iomem *parf; /* DT parf */ + void __iomem *elbi; /* DT elbi */ + union qcom_pcie_resources res; + struct phy *phy; + struct gpio_desc *reset; + const struct qcom_pcie_ops *ops; +}; + +#define to_qcom_pcie(x) dev_get_drvdata((x)->dev) + +static void qcom_ep_reset_assert(struct qcom_pcie *pcie) +{ + gpiod_set_value_cansleep(pcie->reset, 1); + usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); +} + +static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) +{ + /* Ensure that PERST has been asserted for at least 100 ms */ + msleep(100); + gpiod_set_value_cansleep(pcie->reset, 0); + usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); +} + +static int qcom_pcie_establish_link(struct qcom_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + + if (dw_pcie_link_up(pci)) + return 0; + + /* Enable Link Training state machine */ + if (pcie->ops->ltssm_enable) + pcie->ops->ltssm_enable(pcie); + + return dw_pcie_wait_for_link(pci); +} + +static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) +{ + u32 val; + + /* enable link training */ + val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); + val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; + writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); +} + +static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int ret; + + res->supplies[0].supply = "vdda"; + res->supplies[1].supply = "vdda_phy"; + res->supplies[2].supply = "vdda_refclk"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), + res->supplies); + if (ret) + return ret; + + res->iface_clk = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface_clk)) + return PTR_ERR(res->iface_clk); + + res->core_clk = devm_clk_get(dev, "core"); + if (IS_ERR(res->core_clk)) + return PTR_ERR(res->core_clk); + + res->phy_clk = devm_clk_get(dev, "phy"); + if (IS_ERR(res->phy_clk)) + return PTR_ERR(res->phy_clk); + + res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); + if (IS_ERR(res->pci_reset)) + return PTR_ERR(res->pci_reset); + + res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); + if (IS_ERR(res->axi_reset)) + return PTR_ERR(res->axi_reset); + + res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); + if (IS_ERR(res->ahb_reset)) + return PTR_ERR(res->ahb_reset); + + res->por_reset = devm_reset_control_get_exclusive(dev, "por"); + if (IS_ERR(res->por_reset)) + return PTR_ERR(res->por_reset); + + res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); + if (IS_ERR(res->ext_reset)) + return PTR_ERR(res->ext_reset); + + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); + return PTR_ERR_OR_ZERO(res->phy_reset); +} + +static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; + + reset_control_assert(res->pci_reset); + reset_control_assert(res->axi_reset); + reset_control_assert(res->ahb_reset); + reset_control_assert(res->por_reset); + reset_control_assert(res->ext_reset); + reset_control_assert(res->pci_reset); + clk_disable_unprepare(res->iface_clk); + clk_disable_unprepare(res->core_clk); + clk_disable_unprepare(res->phy_clk); + regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); +} + +static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + struct device_node *node = dev->of_node; + u32 val; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); + if (ret < 0) { + dev_err(dev, "cannot enable regulators\n"); + return ret; + } + + ret = reset_control_assert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot assert ahb reset\n"); + goto err_assert_ahb; + } + + ret = clk_prepare_enable(res->iface_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_assert_ahb; + } + + ret = clk_prepare_enable(res->phy_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable phy clock\n"); + goto err_clk_phy; + } + + ret = clk_prepare_enable(res->core_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_core; + } + + ret = reset_control_deassert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot deassert ahb reset\n"); + goto err_deassert_ahb; + } + + ret = reset_control_deassert(res->ext_reset); + if (ret) { + dev_err(dev, "cannot deassert ext reset\n"); + goto err_deassert_ahb; + } + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), + pcie->parf + PCIE20_PARF_PCS_DEEMPH); + writel(PCS_SWING_TX_SWING_FULL(120) | + PCS_SWING_TX_SWING_LOW(120), + pcie->parf + PCIE20_PARF_PCS_SWING); + writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); + } + + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + /* set TX termination offset */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; + val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + } + + /* enable external reference clock */ + val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); + /* USE_PAD is required only for ipq806x */ + if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) + val &= ~PHY_REFCLK_USE_PAD; + val |= PHY_REFCLK_SSP_EN; + writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); + + ret = reset_control_deassert(res->phy_reset); + if (ret) { + dev_err(dev, "cannot deassert phy reset\n"); + return ret; + } + + ret = reset_control_deassert(res->pci_reset); + if (ret) { + dev_err(dev, "cannot deassert pci reset\n"); + return ret; + } + + ret = reset_control_deassert(res->por_reset); + if (ret) { + dev_err(dev, "cannot deassert por reset\n"); + return ret; + } + + ret = reset_control_deassert(res->axi_reset); + if (ret) { + dev_err(dev, "cannot deassert axi reset\n"); + return ret; + } + + /* wait for clock acquisition */ + usleep_range(1000, 1500); + + + /* Set the Max TLP size to 2K, instead of using default of 4K */ + writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, + pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); + writel(CFG_BRIDGE_SB_INIT, + pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); + + return 0; + +err_deassert_ahb: + clk_disable_unprepare(res->core_clk); +err_clk_core: + clk_disable_unprepare(res->phy_clk); +err_clk_phy: + clk_disable_unprepare(res->iface_clk); +err_assert_ahb: + regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); + + return ret; +} + +static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + + res->vdda = devm_regulator_get(dev, "vdda"); + if (IS_ERR(res->vdda)) + return PTR_ERR(res->vdda); + + res->iface = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface)) + return PTR_ERR(res->iface); + + res->aux = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux)) + return PTR_ERR(res->aux); + + res->master_bus = devm_clk_get(dev, "master_bus"); + if (IS_ERR(res->master_bus)) + return PTR_ERR(res->master_bus); + + res->slave_bus = devm_clk_get(dev, "slave_bus"); + if (IS_ERR(res->slave_bus)) + return PTR_ERR(res->slave_bus); + + res->core = devm_reset_control_get_exclusive(dev, "core"); + return PTR_ERR_OR_ZERO(res->core); +} + +static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; + + reset_control_assert(res->core); + clk_disable_unprepare(res->slave_bus); + clk_disable_unprepare(res->master_bus); + clk_disable_unprepare(res->iface); + clk_disable_unprepare(res->aux); + regulator_disable(res->vdda); +} + +static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int ret; + + ret = reset_control_deassert(res->core); + if (ret) { + dev_err(dev, "cannot deassert core reset\n"); + return ret; + } + + ret = clk_prepare_enable(res->aux); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_res; + } + + ret = clk_prepare_enable(res->iface); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_aux; + } + + ret = clk_prepare_enable(res->master_bus); + if (ret) { + dev_err(dev, "cannot prepare/enable master_bus clock\n"); + goto err_iface; + } + + ret = clk_prepare_enable(res->slave_bus); + if (ret) { + dev_err(dev, "cannot prepare/enable slave_bus clock\n"); + goto err_master; + } + + ret = regulator_enable(res->vdda); + if (ret) { + dev_err(dev, "cannot enable vdda regulator\n"); + goto err_slave; + } + + /* change DBI base address */ + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); + + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); + } + + return 0; +err_slave: + clk_disable_unprepare(res->slave_bus); +err_master: + clk_disable_unprepare(res->master_bus); +err_iface: + clk_disable_unprepare(res->iface); +err_aux: + clk_disable_unprepare(res->aux); +err_res: + reset_control_assert(res->core); + + return ret; +} + +static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) +{ + u32 val; + + /* enable link training */ + val = readl(pcie->parf + PCIE20_PARF_LTSSM); + val |= BIT(8); + writel(val, pcie->parf + PCIE20_PARF_LTSSM); +} + +static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int ret; + + res->supplies[0].supply = "vdda"; + res->supplies[1].supply = "vddpe-3v3"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), + res->supplies); + if (ret) + return ret; + + res->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + res->cfg_clk = devm_clk_get(dev, "cfg"); + if (IS_ERR(res->cfg_clk)) + return PTR_ERR(res->cfg_clk); + + res->master_clk = devm_clk_get(dev, "bus_master"); + if (IS_ERR(res->master_clk)) + return PTR_ERR(res->master_clk); + + res->slave_clk = devm_clk_get(dev, "bus_slave"); + if (IS_ERR(res->slave_clk)) + return PTR_ERR(res->slave_clk); + + res->pipe_clk = devm_clk_get(dev, "pipe"); + return PTR_ERR_OR_ZERO(res->pipe_clk); +} + +static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; + + clk_disable_unprepare(res->slave_clk); + clk_disable_unprepare(res->master_clk); + clk_disable_unprepare(res->cfg_clk); + clk_disable_unprepare(res->aux_clk); + + regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); +} + +static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; + + clk_disable_unprepare(res->pipe_clk); +} + +static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + u32 val; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); + if (ret < 0) { + dev_err(dev, "cannot enable regulators\n"); + return ret; + } + + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_aux_clk; + } + + ret = clk_prepare_enable(res->cfg_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable cfg clock\n"); + goto err_cfg_clk; + } + + ret = clk_prepare_enable(res->master_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable master clock\n"); + goto err_master_clk; + } + + ret = clk_prepare_enable(res->slave_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable slave clock\n"); + goto err_slave_clk; + } + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + /* change DBI base address */ + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + /* MAC PHY_POWERDOWN MUX DISABLE */ + val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); + val &= ~BIT(29); + writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + val |= BIT(4); + writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + + return 0; + +err_slave_clk: + clk_disable_unprepare(res->master_clk); +err_master_clk: + clk_disable_unprepare(res->cfg_clk); +err_cfg_clk: + clk_disable_unprepare(res->aux_clk); + +err_aux_clk: + regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); + + return ret; +} + +static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int ret; + + ret = clk_prepare_enable(res->pipe_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable pipe clock\n"); + return ret; + } + + return 0; +} + +static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + + res->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + res->master_clk = devm_clk_get(dev, "master_bus"); + if (IS_ERR(res->master_clk)) + return PTR_ERR(res->master_clk); + + res->slave_clk = devm_clk_get(dev, "slave_bus"); + if (IS_ERR(res->slave_clk)) + return PTR_ERR(res->slave_clk); + + res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); + if (IS_ERR(res->axi_m_reset)) + return PTR_ERR(res->axi_m_reset); + + res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); + if (IS_ERR(res->axi_s_reset)) + return PTR_ERR(res->axi_s_reset); + + res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); + if (IS_ERR(res->pipe_reset)) + return PTR_ERR(res->pipe_reset); + + res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, + "axi_m_vmid"); + if (IS_ERR(res->axi_m_vmid_reset)) + return PTR_ERR(res->axi_m_vmid_reset); + + res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, + "axi_s_xpu"); + if (IS_ERR(res->axi_s_xpu_reset)) + return PTR_ERR(res->axi_s_xpu_reset); + + res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); + if (IS_ERR(res->parf_reset)) + return PTR_ERR(res->parf_reset); + + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); + if (IS_ERR(res->phy_reset)) + return PTR_ERR(res->phy_reset); + + res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, + "axi_m_sticky"); + if (IS_ERR(res->axi_m_sticky_reset)) + return PTR_ERR(res->axi_m_sticky_reset); + + res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, + "pipe_sticky"); + if (IS_ERR(res->pipe_sticky_reset)) + return PTR_ERR(res->pipe_sticky_reset); + + res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); + if (IS_ERR(res->pwr_reset)) + return PTR_ERR(res->pwr_reset); + + res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); + if (IS_ERR(res->ahb_reset)) + return PTR_ERR(res->ahb_reset); + + res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); + if (IS_ERR(res->phy_ahb_reset)) + return PTR_ERR(res->phy_ahb_reset); + + return 0; +} + +static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; + + reset_control_assert(res->axi_m_reset); + reset_control_assert(res->axi_s_reset); + reset_control_assert(res->pipe_reset); + reset_control_assert(res->pipe_sticky_reset); + reset_control_assert(res->phy_reset); + reset_control_assert(res->phy_ahb_reset); + reset_control_assert(res->axi_m_sticky_reset); + reset_control_assert(res->pwr_reset); + reset_control_assert(res->ahb_reset); + clk_disable_unprepare(res->aux_clk); + clk_disable_unprepare(res->master_clk); + clk_disable_unprepare(res->slave_clk); +} + +static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + u32 val; + int ret; + + ret = reset_control_assert(res->axi_m_reset); + if (ret) { + dev_err(dev, "cannot assert axi master reset\n"); + return ret; + } + + ret = reset_control_assert(res->axi_s_reset); + if (ret) { + dev_err(dev, "cannot assert axi slave reset\n"); + return ret; + } + + usleep_range(10000, 12000); + + ret = reset_control_assert(res->pipe_reset); + if (ret) { + dev_err(dev, "cannot assert pipe reset\n"); + return ret; + } + + ret = reset_control_assert(res->pipe_sticky_reset); + if (ret) { + dev_err(dev, "cannot assert pipe sticky reset\n"); + return ret; + } + + ret = reset_control_assert(res->phy_reset); + if (ret) { + dev_err(dev, "cannot assert phy reset\n"); + return ret; + } + + ret = reset_control_assert(res->phy_ahb_reset); + if (ret) { + dev_err(dev, "cannot assert phy ahb reset\n"); + return ret; + } + + usleep_range(10000, 12000); + + ret = reset_control_assert(res->axi_m_sticky_reset); + if (ret) { + dev_err(dev, "cannot assert axi master sticky reset\n"); + return ret; + } + + ret = reset_control_assert(res->pwr_reset); + if (ret) { + dev_err(dev, "cannot assert power reset\n"); + return ret; + } + + ret = reset_control_assert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot assert ahb reset\n"); + return ret; + } + + usleep_range(10000, 12000); + + ret = reset_control_deassert(res->phy_ahb_reset); + if (ret) { + dev_err(dev, "cannot deassert phy ahb reset\n"); + return ret; + } + + ret = reset_control_deassert(res->phy_reset); + if (ret) { + dev_err(dev, "cannot deassert phy reset\n"); + goto err_rst_phy; + } + + ret = reset_control_deassert(res->pipe_reset); + if (ret) { + dev_err(dev, "cannot deassert pipe reset\n"); + goto err_rst_pipe; + } + + ret = reset_control_deassert(res->pipe_sticky_reset); + if (ret) { + dev_err(dev, "cannot deassert pipe sticky reset\n"); + goto err_rst_pipe_sticky; + } + + usleep_range(10000, 12000); + + ret = reset_control_deassert(res->axi_m_reset); + if (ret) { + dev_err(dev, "cannot deassert axi master reset\n"); + goto err_rst_axi_m; + } + + ret = reset_control_deassert(res->axi_m_sticky_reset); + if (ret) { + dev_err(dev, "cannot deassert axi master sticky reset\n"); + goto err_rst_axi_m_sticky; + } + + ret = reset_control_deassert(res->axi_s_reset); + if (ret) { + dev_err(dev, "cannot deassert axi slave reset\n"); + goto err_rst_axi_s; + } + + ret = reset_control_deassert(res->pwr_reset); + if (ret) { + dev_err(dev, "cannot deassert power reset\n"); + goto err_rst_pwr; + } + + ret = reset_control_deassert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot deassert ahb reset\n"); + goto err_rst_ahb; + } + + usleep_range(10000, 12000); + + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_clk_aux; + } + + ret = clk_prepare_enable(res->master_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_axi_m; + } + + ret = clk_prepare_enable(res->slave_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable phy clock\n"); + goto err_clk_axi_s; + } + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + /* change DBI base address */ + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + /* MAC PHY_POWERDOWN MUX DISABLE */ + val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); + val &= ~BIT(29); + writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + val |= BIT(4); + writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + + return 0; + +err_clk_axi_s: + clk_disable_unprepare(res->master_clk); +err_clk_axi_m: + clk_disable_unprepare(res->aux_clk); +err_clk_aux: + reset_control_assert(res->ahb_reset); +err_rst_ahb: + reset_control_assert(res->pwr_reset); +err_rst_pwr: + reset_control_assert(res->axi_s_reset); +err_rst_axi_s: + reset_control_assert(res->axi_m_sticky_reset); +err_rst_axi_m_sticky: + reset_control_assert(res->axi_m_reset); +err_rst_axi_m: + reset_control_assert(res->pipe_sticky_reset); +err_rst_pipe_sticky: + reset_control_assert(res->pipe_reset); +err_rst_pipe: + reset_control_assert(res->phy_reset); +err_rst_phy: + reset_control_assert(res->phy_ahb_reset); + return ret; +} + +static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int i; + const char *rst_names[] = { "axi_m", "axi_s", "pipe", + "axi_m_sticky", "sticky", + "ahb", "sleep", }; + + res->iface = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface)) + return PTR_ERR(res->iface); + + res->axi_m_clk = devm_clk_get(dev, "axi_m"); + if (IS_ERR(res->axi_m_clk)) + return PTR_ERR(res->axi_m_clk); + + res->axi_s_clk = devm_clk_get(dev, "axi_s"); + if (IS_ERR(res->axi_s_clk)) + return PTR_ERR(res->axi_s_clk); + + res->ahb_clk = devm_clk_get(dev, "ahb"); + if (IS_ERR(res->ahb_clk)) + return PTR_ERR(res->ahb_clk); + + res->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + for (i = 0; i < ARRAY_SIZE(rst_names); i++) { + res->rst[i] = devm_reset_control_get(dev, rst_names[i]); + if (IS_ERR(res->rst[i])) + return PTR_ERR(res->rst[i]); + } + + return 0; +} + +static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; + + clk_disable_unprepare(res->iface); + clk_disable_unprepare(res->axi_m_clk); + clk_disable_unprepare(res->axi_s_clk); + clk_disable_unprepare(res->ahb_clk); + clk_disable_unprepare(res->aux_clk); +} + +static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int i, ret; + u32 val; + + for (i = 0; i < ARRAY_SIZE(res->rst); i++) { + ret = reset_control_assert(res->rst[i]); + if (ret) { + dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); + return ret; + } + } + + usleep_range(2000, 2500); + + for (i = 0; i < ARRAY_SIZE(res->rst); i++) { + ret = reset_control_deassert(res->rst[i]); + if (ret) { + dev_err(dev, "reset #%d deassert failed (%d)\n", i, + ret); + return ret; + } + } + + /* + * Don't have a way to see if the reset has completed. + * Wait for some time. + */ + usleep_range(2000, 2500); + + ret = clk_prepare_enable(res->iface); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_iface; + } + + ret = clk_prepare_enable(res->axi_m_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_axi_m; + } + + ret = clk_prepare_enable(res->axi_s_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable axi slave clock\n"); + goto err_clk_axi_s; + } + + ret = clk_prepare_enable(res->ahb_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable ahb clock\n"); + goto err_clk_ahb; + } + + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_clk_aux; + } + + writel(SLV_ADDR_SPACE_SZ, + pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); + + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS + | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | + AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, + pcie->parf + PCIE20_PARF_SYS_CTRL); + writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); + + writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS); + writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); + writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1); + + val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); + val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT; + writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); + + writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base + + PCIE20_DEVICE_CONTROL2_STATUS2); + + return 0; + +err_clk_aux: + clk_disable_unprepare(res->ahb_clk); +err_clk_ahb: + clk_disable_unprepare(res->axi_s_clk); +err_clk_axi_s: + clk_disable_unprepare(res->axi_m_clk); +err_clk_axi_m: + clk_disable_unprepare(res->iface); +err_clk_iface: + /* + * Not checking for failure, will anyway return + * the original failure in 'ret'. + */ + for (i = 0; i < ARRAY_SIZE(res->rst); i++) + reset_control_assert(res->rst[i]); + + return ret; +} + +static int qcom_pcie_link_up(struct dw_pcie *pci) +{ + u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); + + return !!(val & PCI_EXP_LNKSTA_DLLLA); +} + +static int qcom_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct qcom_pcie *pcie = to_qcom_pcie(pci); + int ret; + + qcom_ep_reset_assert(pcie); + + ret = pcie->ops->init(pcie); + if (ret) + return ret; + + ret = phy_power_on(pcie->phy); + if (ret) + goto err_deinit; + + if (pcie->ops->post_init) { + ret = pcie->ops->post_init(pcie); + if (ret) + goto err_disable_phy; + } + + dw_pcie_setup_rc(pp); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + + qcom_ep_reset_deassert(pcie); + + ret = qcom_pcie_establish_link(pcie); + if (ret) + goto err; + + return 0; +err: + qcom_ep_reset_assert(pcie); + if (pcie->ops->post_deinit) + pcie->ops->post_deinit(pcie); +err_disable_phy: + phy_power_off(pcie->phy); +err_deinit: + pcie->ops->deinit(pcie); + + return ret; +} + +static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + /* the device class is not reported correctly from the register */ + if (where == PCI_CLASS_REVISION && size == 4) { + *val = readl(pci->dbi_base + PCI_CLASS_REVISION); + *val &= 0xff; /* keep revision id */ + *val |= PCI_CLASS_BRIDGE_PCI << 16; + return PCIBIOS_SUCCESSFUL; + } + + return dw_pcie_read(pci->dbi_base + where, size, val); +} + +static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { + .host_init = qcom_pcie_host_init, + .rd_own_conf = qcom_pcie_rd_own_conf, +}; + +/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ +static const struct qcom_pcie_ops ops_2_1_0 = { + .get_resources = qcom_pcie_get_resources_2_1_0, + .init = qcom_pcie_init_2_1_0, + .deinit = qcom_pcie_deinit_2_1_0, + .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, +}; + +/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ +static const struct qcom_pcie_ops ops_1_0_0 = { + .get_resources = qcom_pcie_get_resources_1_0_0, + .init = qcom_pcie_init_1_0_0, + .deinit = qcom_pcie_deinit_1_0_0, + .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, +}; + +/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ +static const struct qcom_pcie_ops ops_2_3_2 = { + .get_resources = qcom_pcie_get_resources_2_3_2, + .init = qcom_pcie_init_2_3_2, + .post_init = qcom_pcie_post_init_2_3_2, + .deinit = qcom_pcie_deinit_2_3_2, + .post_deinit = qcom_pcie_post_deinit_2_3_2, + .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, +}; + +/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ +static const struct qcom_pcie_ops ops_2_4_0 = { + .get_resources = qcom_pcie_get_resources_2_4_0, + .init = qcom_pcie_init_2_4_0, + .deinit = qcom_pcie_deinit_2_4_0, + .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, +}; + +/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ +static const struct qcom_pcie_ops ops_2_3_3 = { + .get_resources = qcom_pcie_get_resources_2_3_3, + .init = qcom_pcie_init_2_3_3, + .deinit = qcom_pcie_deinit_2_3_3, + .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, +}; + +static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = qcom_pcie_link_up, +}; + +static int qcom_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct pcie_port *pp; + struct dw_pcie *pci; + struct qcom_pcie *pcie; + int ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_disable(dev); + return ret; + } + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + pp = &pci->pp; + + pcie->pci = pci; + + pcie->ops = of_device_get_match_data(dev); + + pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); + if (IS_ERR(pcie->reset)) { + ret = PTR_ERR(pcie->reset); + goto err_pm_runtime_put; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); + pcie->parf = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->parf)) { + ret = PTR_ERR(pcie->parf); + goto err_pm_runtime_put; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pci->dbi_base)) { + ret = PTR_ERR(pci->dbi_base); + goto err_pm_runtime_put; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); + pcie->elbi = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->elbi)) { + ret = PTR_ERR(pcie->elbi); + goto err_pm_runtime_put; + } + + pcie->phy = devm_phy_optional_get(dev, "pciephy"); + if (IS_ERR(pcie->phy)) { + ret = PTR_ERR(pcie->phy); + goto err_pm_runtime_put; + } + + ret = pcie->ops->get_resources(pcie); + if (ret) + goto err_pm_runtime_put; + + pp->ops = &qcom_pcie_dw_ops; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq < 0) { + ret = pp->msi_irq; + goto err_pm_runtime_put; + } + } + + ret = phy_init(pcie->phy); + if (ret) + goto err_pm_runtime_put; + + platform_set_drvdata(pdev, pcie); + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "cannot initialize host\n"); + goto err_phy_exit; + } + + return 0; + +err_phy_exit: + phy_exit(pcie->phy); +err_pm_runtime_put: + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return ret; +} + +static const struct of_device_id qcom_pcie_match[] = { + { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 }, + { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 }, + { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 }, + { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, + { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, + { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 }, + { } +}; + +static struct platform_driver qcom_pcie_driver = { + .probe = qcom_pcie_probe, + .driver = { + .name = "qcom-pcie", + .suppress_bind_attrs = true, + .of_match_table = qcom_pcie_match, + }, +}; +builtin_platform_driver(qcom_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c new file mode 100644 index 000000000..7d0cdfd81 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs + * + * SPEAr13xx PCIe Glue Layer Source Code + * + * Copyright (C) 2010-2014 ST Microelectronics + * Pratyush Anand <pratyush.anand@gmail.com> + * Mohit Kumar <mohit.kumar.dhaka@gmail.com> + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/resource.h> + +#include "pcie-designware.h" + +struct spear13xx_pcie { + struct dw_pcie *pci; + void __iomem *app_base; + struct phy *phy; + struct clk *clk; + bool is_gen1; +}; + +struct pcie_app_reg { + u32 app_ctrl_0; /* cr0 */ + u32 app_ctrl_1; /* cr1 */ + u32 app_status_0; /* cr2 */ + u32 app_status_1; /* cr3 */ + u32 msg_status; /* cr4 */ + u32 msg_payload; /* cr5 */ + u32 int_sts; /* cr6 */ + u32 int_clr; /* cr7 */ + u32 int_mask; /* cr8 */ + u32 mst_bmisc; /* cr9 */ + u32 phy_ctrl; /* cr10 */ + u32 phy_status; /* cr11 */ + u32 cxpl_debug_info_0; /* cr12 */ + u32 cxpl_debug_info_1; /* cr13 */ + u32 ven_msg_ctrl_0; /* cr14 */ + u32 ven_msg_ctrl_1; /* cr15 */ + u32 ven_msg_data_0; /* cr16 */ + u32 ven_msg_data_1; /* cr17 */ + u32 ven_msi_0; /* cr18 */ + u32 ven_msi_1; /* cr19 */ + u32 mst_rmisc; /* cr20 */ +}; + +/* CR0 ID */ +#define APP_LTSSM_ENABLE_ID 3 +#define DEVICE_TYPE_RC (4 << 25) +#define MISCTRL_EN_ID 30 +#define REG_TRANSLATION_ENABLE 31 + +/* CR3 ID */ +#define XMLH_LINK_UP (1 << 6) + +/* CR6 */ +#define MSI_CTRL_INT (1 << 26) + +#define EXP_CAP_ID_OFFSET 0x70 + +#define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev) + +static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie) +{ + struct dw_pcie *pci = spear13xx_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + u32 val; + u32 exp_cap_off = EXP_CAP_ID_OFFSET; + + if (dw_pcie_link_up(pci)) { + dev_err(pci->dev, "link already up\n"); + return 0; + } + + dw_pcie_setup_rc(pp); + + /* + * this controller support only 128 bytes read size, however its + * default value in capability register is 512 bytes. So force + * it to 128 here. + */ + dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val); + val &= ~PCI_EXP_DEVCTL_READRQ; + dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val); + + dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A); + dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80); + + /* + * if is_gen1 is set then handle it, so that some buggy card + * also works + */ + if (spear13xx_pcie->is_gen1) { + dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, + 4, &val); + if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { + val &= ~((u32)PCI_EXP_LNKCAP_SLS); + val |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_write(pci->dbi_base + exp_cap_off + + PCI_EXP_LNKCAP, 4, val); + } + + dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, + 2, &val); + if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { + val &= ~((u32)PCI_EXP_LNKCAP_SLS); + val |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_write(pci->dbi_base + exp_cap_off + + PCI_EXP_LNKCTL2, 2, val); + } + } + + /* enable ltssm */ + writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID) + | (1 << APP_LTSSM_ENABLE_ID) + | ((u32)1 << REG_TRANSLATION_ENABLE), + &app_reg->app_ctrl_0); + + return dw_pcie_wait_for_link(pci); +} + +static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) +{ + struct spear13xx_pcie *spear13xx_pcie = arg; + struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + struct dw_pcie *pci = spear13xx_pcie->pci; + struct pcie_port *pp = &pci->pp; + unsigned int status; + + status = readl(&app_reg->int_sts); + + if (status & MSI_CTRL_INT) { + BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI)); + dw_handle_msi_irq(pp); + } + + writel(status, &app_reg->int_clr); + + return IRQ_HANDLED; +} + +static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie) +{ + struct dw_pcie *pci = spear13xx_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + + /* Enable MSI interrupt */ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + dw_pcie_msi_init(pp); + writel(readl(&app_reg->int_mask) | + MSI_CTRL_INT, &app_reg->int_mask); + } +} + +static int spear13xx_pcie_link_up(struct dw_pcie *pci) +{ + struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); + struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + + if (readl(&app_reg->app_status_1) & XMLH_LINK_UP) + return 1; + + return 0; +} + +static int spear13xx_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); + + spear13xx_pcie_establish_link(spear13xx_pcie); + spear13xx_pcie_enable_interrupts(spear13xx_pcie); + + return 0; +} + +static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { + .host_init = spear13xx_pcie_host_init, +}; + +static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = spear13xx_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + pp->irq = platform_get_irq(pdev, 0); + if (pp->irq < 0) { + dev_err(dev, "failed to get irq\n"); + return pp->irq; + } + ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, + IRQF_SHARED | IRQF_NO_THREAD, + "spear1340-pcie", spear13xx_pcie); + if (ret) { + dev_err(dev, "failed to request irq %d\n", pp->irq); + return ret; + } + + pp->ops = &spear13xx_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = spear13xx_pcie_link_up, +}; + +static int spear13xx_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct spear13xx_pcie *spear13xx_pcie; + struct device_node *np = dev->of_node; + struct resource *dbi_base; + int ret; + + spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL); + if (!spear13xx_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + spear13xx_pcie->pci = pci; + + spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy"); + if (IS_ERR(spear13xx_pcie->phy)) { + ret = PTR_ERR(spear13xx_pcie->phy); + if (ret == -EPROBE_DEFER) + dev_info(dev, "probe deferred\n"); + else + dev_err(dev, "couldn't get pcie-phy\n"); + return ret; + } + + phy_init(spear13xx_pcie->phy); + + spear13xx_pcie->clk = devm_clk_get(dev, NULL); + if (IS_ERR(spear13xx_pcie->clk)) { + dev_err(dev, "couldn't get clk for pcie\n"); + return PTR_ERR(spear13xx_pcie->clk); + } + ret = clk_prepare_enable(spear13xx_pcie->clk); + if (ret) { + dev_err(dev, "couldn't enable clk for pcie\n"); + return ret; + } + + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); + if (IS_ERR(pci->dbi_base)) { + dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); + ret = PTR_ERR(pci->dbi_base); + goto fail_clk; + } + spear13xx_pcie->app_base = pci->dbi_base + 0x2000; + + if (of_property_read_bool(np, "st,pcie-is-gen1")) + spear13xx_pcie->is_gen1 = true; + + platform_set_drvdata(pdev, spear13xx_pcie); + + ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev); + if (ret < 0) + goto fail_clk; + + return 0; + +fail_clk: + clk_disable_unprepare(spear13xx_pcie->clk); + + return ret; +} + +static const struct of_device_id spear13xx_pcie_of_match[] = { + { .compatible = "st,spear1340-pcie", }, + {}, +}; + +static struct platform_driver spear13xx_pcie_driver = { + .probe = spear13xx_pcie_probe, + .driver = { + .name = "spear-pcie", + .of_match_table = of_match_ptr(spear13xx_pcie_of_match), + .suppress_bind_attrs = true, + }, +}; + +builtin_platform_driver(spear13xx_pcie_driver); |